?? dct-a.asm
字號:
movq %2, %1 punpcklbw %1, %7 punpckhbw %2, %7 movq %3, %6 movq %4, %3 punpcklbw %3, %7 punpckhbw %4, %7 psubw %1, %3 psubw %2, %4%endmacro %macro MMX_LOADSUMSUB 4 ; returns %1=%3+%4, %2=%3-%4 movq %2, %3 movq %1, %4 MMX_SUMSUB_BA %1, %2%endmacrocglobal x264_pixel_sub_8x8_mmxcglobal x264_pixel_add_8x8_mmxcglobal x264_transpose_8x8_mmxcglobal x264_ydct8_mmxcglobal x264_yidct8_mmxALIGN 16;-----------------------------------------------------------------------------; void __cdecl x264_pixel_sub_8x8_mmx( int16_t *diff, uint8_t *pix1, uint8_t *pix2 );;-----------------------------------------------------------------------------x264_pixel_sub_8x8_mmx: mov edx, [esp+ 4] ; diff mov eax, [esp+ 8] ; pix1 mov ecx, [esp+12] ; pix2 MMX_ZERO mm7 %assign disp 0 %rep 8 MMX_LOAD_DIFF_8P mm0, mm1, mm2, mm3, [eax], [ecx], mm7 movq [edx+disp], mm0 movq [edx+disp+8], mm1 add eax, FENC_STRIDE add ecx, FDEC_STRIDE %assign disp disp+16 %endrep retALIGN 16;-----------------------------------------------------------------------------; void __cdecl x264_ydct8_mmx( int16_t dest[8][8] );;-----------------------------------------------------------------------------x264_ydct8_mmx: mov eax, [esp+04] ; dest ;------------------------------------------------------------------------- ; vertical dct ( compute 4 columns at a time -> 2 loops ) ;------------------------------------------------------------------------- %assign disp 0 %rep 2 MMX_LOADSUMSUB mm2, mm3, [eax+disp+0*16], [eax+disp+7*16] ; mm2 = s07, mm3 = d07 MMX_LOADSUMSUB mm1, mm5, [eax+disp+1*16], [eax+disp+6*16] ; mm1 = s16, mm5 = d16 MMX_LOADSUMSUB mm0, mm6, [eax+disp+2*16], [eax+disp+5*16] ; mm0 = s25, mm6 = d25 MMX_LOADSUMSUB mm4, mm7, [eax+disp+3*16], [eax+disp+4*16] ; mm4 = s34, mm7 = d34 MMX_SUMSUB_BA mm4, mm2 ; mm4 = a0, mm2 = a2 MMX_SUMSUB_BA mm0, mm1 ; mm0 = a1, mm1 = a3 MMX_SUMSUB_BA mm0, mm4 ; mm0 = dst0, mm1 = dst4 movq [eax+disp+0*16], mm0 movq [eax+disp+4*16], mm4 movq mm0, mm1 ; a3 psraw mm0, 1 ; a3>>1 paddw mm0, mm2 ; a2 + (a3>>1) psraw mm2, 1 ; a2>>1 psubw mm2, mm1 ; (a2>>1) - a3 movq [eax+disp+2*16], mm0 movq [eax+disp+6*16], mm2 movq mm0, mm6 psraw mm0, 1 paddw mm0, mm6 ; d25+(d25>>1) movq mm1, mm3 psubw mm1, mm7 ; a5 = d07-d34-(d25+(d25>>1)) psubw mm1, mm0 movq mm0, mm5 psraw mm0, 1 paddw mm0, mm5 ; d16+(d16>>1) movq mm2, mm3 paddw mm2, mm7 ; a6 = d07+d34-(d16+(d16>>1)) psubw mm2, mm0 movq mm0, mm3 psraw mm0, 1 paddw mm0, mm3 ; d07+(d07>>1) paddw mm0, mm5 paddw mm0, mm6 ; a4 = d16+d25+(d07+(d07>>1)) movq mm3, mm7 psraw mm3, 1 paddw mm3, mm7 ; d34+(d34>>1) paddw mm3, mm5 psubw mm3, mm6 ; a7 = d16-d25+(d34+(d34>>1)) movq mm7, mm3 psraw mm7, 2 paddw mm7, mm0 ; a4 + (a7>>2) movq mm6, mm2 psraw mm6, 2 paddw mm6, mm1 ; a5 + (a6>>2) psraw mm0, 2 psraw mm1, 2 psubw mm0, mm3 ; (a4>>2) - a7 psubw mm2, mm1 ; a6 - (a5>>2) movq [eax+disp+1*16], mm7 movq [eax+disp+3*16], mm6 movq [eax+disp+5*16], mm2 movq [eax+disp+7*16], mm0 %assign disp disp+8 %endrep retALIGN 16;-----------------------------------------------------------------------------; void __cdecl x264_yidct8_mmx( int16_t dest[8][8] );;-----------------------------------------------------------------------------x264_yidct8_mmx: mov eax, [esp+04] ; dest ;------------------------------------------------------------------------- ; vertical idct ( compute 4 columns at a time -> 2 loops ) ;------------------------------------------------------------------------- %assign disp 0 %rep 2 movq mm1, [eax+disp+1*16] ; mm1 = d1 movq mm3, [eax+disp+3*16] ; mm3 = d3 movq mm5, [eax+disp+5*16] ; mm5 = d5 movq mm7, [eax+disp+7*16] ; mm7 = d7 movq mm4, mm7 psraw mm4, 1 movq mm0, mm5 psubw mm0, mm7 psubw mm0, mm4 psubw mm0, mm3 ; mm0 = e1 movq mm6, mm3 psraw mm6, 1 movq mm2, mm7 psubw mm2, mm6 psubw mm2, mm3 paddw mm2, mm1 ; mm2 = e3 movq mm4, mm5 psraw mm4, 1 paddw mm4, mm5 paddw mm4, mm7 psubw mm4, mm1 ; mm4 = e5 movq mm6, mm1 psraw mm6, 1 paddw mm6, mm1 paddw mm6, mm5 paddw mm6, mm3 ; mm6 = e7 movq mm1, mm0 movq mm3, mm4 movq mm5, mm2 movq mm7, mm6 psraw mm6, 2 psraw mm3, 2 psraw mm5, 2 psraw mm0, 2 paddw mm1, mm6 ; mm1 = f1 paddw mm3, mm2 ; mm3 = f3 psubw mm5, mm4 ; mm5 = f5 psubw mm7, mm0 ; mm7 = f7 movq mm2, [eax+disp+2*16] ; mm2 = d2 movq mm6, [eax+disp+6*16] ; mm6 = d6 movq mm4, mm2 movq mm0, mm6 psraw mm4, 1 psraw mm6, 1 psubw mm4, mm0 ; mm4 = a4 paddw mm6, mm2 ; mm6 = a6 movq mm2, [eax+disp+0*16] ; mm2 = d0 movq mm0, [eax+disp+4*16] ; mm0 = d4 MMX_SUMSUB_BA mm0, mm2 ; mm0 = a0, mm2 = a2 MMX_SUMSUB_BA mm6, mm0 ; mm6 = f0, mm0 = f6 MMX_SUMSUB_BA mm4, mm2 ; mm4 = f2, mm2 = f4 MMX_SUMSUB_BA mm7, mm6 ; mm7 = g0, mm6 = g7 MMX_SUMSUB_BA mm5, mm4 ; mm5 = g1, mm4 = g6 MMX_SUMSUB_BA mm3, mm2 ; mm3 = g2, mm2 = g5 MMX_SUMSUB_BA mm1, mm0 ; mm1 = g3, mm0 = g4 movq [eax+disp+0*16], mm7 movq [eax+disp+1*16], mm5 movq [eax+disp+2*16], mm3 movq [eax+disp+3*16], mm1 movq [eax+disp+4*16], mm0 movq [eax+disp+5*16], mm2 movq [eax+disp+6*16], mm4 movq [eax+disp+7*16], mm6 %assign disp disp+8 %endrep retALIGN 16;-----------------------------------------------------------------------------; void __cdecl x264_pixel_add_8x8_mmx( uint8_t *dst, int16_t src[8][8] );;-----------------------------------------------------------------------------x264_pixel_add_8x8_mmx: mov eax, [esp+4] ; dst mov edx, [esp+8] ; src MMX_ZERO mm7 %assign disp 0 %rep 8 movq mm0, [eax] movq mm2, [edx+disp] movq mm3, [edx+disp+8] movq mm1, mm0 psraw mm2, 6 psraw mm3, 6 punpcklbw mm0, mm7 punpckhbw mm1, mm7 paddw mm0, mm2 paddw mm1, mm3 packuswb mm0, mm1 movq [eax], mm0 add eax, FDEC_STRIDE %assign disp disp+16 %endrep retALIGN 16;-----------------------------------------------------------------------------; void __cdecl x264_transpose_8x8_mmx( int16_t src[8][8] );;-----------------------------------------------------------------------------x264_transpose_8x8_mmx: mov eax, [esp+4] movq mm0, [eax ] movq mm1, [eax+ 16] movq mm2, [eax+ 32] movq mm3, [eax+ 48] MMX_TRANSPOSE mm0, mm1, mm2, mm3, mm4 movq [eax ], mm0 movq [eax+ 16], mm3 movq [eax+ 32], mm4 movq [eax+ 48], mm2 movq mm0, [eax+ 72] movq mm1, [eax+ 88] movq mm2, [eax+104] movq mm3, [eax+120] MMX_TRANSPOSE mm0, mm1, mm2, mm3, mm4 movq [eax+ 72], mm0 movq [eax+ 88], mm3 movq [eax+104], mm4 movq [eax+120], mm2 movq mm0, [eax+ 8] movq mm1, [eax+ 24] movq mm2, [eax+ 40] movq mm3, [eax+ 56] MMX_TRANSPOSE mm0, mm1, mm2, mm3, mm4 movq mm1, [eax+ 64] movq mm5, [eax+ 80] movq mm6, [eax+ 96] movq mm7, [eax+112] movq [eax+ 64], mm0 movq [eax+ 80], mm3 movq [eax+ 96], mm4 movq [eax+112], mm2 MMX_TRANSPOSE mm1, mm5, mm6, mm7, mm4 movq [eax+ 8], mm1 movq [eax+ 24], mm7 movq [eax+ 40], mm4 movq [eax+ 56], mm6 ret
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -