?? skl_mpg4_rvop.cpp
字號:
/******************************************************** * Some code. Copyright (C) 2003 by Pascal Massimino. * * All Rights Reserved. (http://skal.planet-d.net) * * For Educational/Academic use ONLY. See 'LICENSE.TXT'.* ********************************************************//* * skl_mpg4_rvop.cpp * * MPEG4 decoder. Reduced-Resolution related funcs. ********************************************************/#include "./skl_mpg4i.h"//////////////////////////////////////////////////////////// Reduced-I/P-VOP//////////////////////////////////////////////////////////void SKL_MB::Next_Reduced(){ SKL_ASSERT(VOL->Reduced_VOP>0); Tops[0] += 2; Tops[1] += 2; Tops[2] += 2; Tops[3] += 2; Tops[4] += 1; Tops[5] += 1; Curs[0] += 2; Curs[1] += 2; Curs[2] += 2; Curs[3] += 2; Curs[4] += 1; Curs[5] += 1; MVs += 2; MVs2 += 2; Y1 += 32; Y2 += 32; U += 16; V += 16; Limit_Mins[0] -= 2*MB_Pels; Limit_Maxs[0] -= 2*MB_Pels; Limit_Mins_UV[0] -= 2*16; Limit_Maxs_UV[0] -= 2*16; x += 2; Pos += 2;}//////////////////////////////////////////////////////////// section 7.6.10.1.5static void Post_Filter(const SKL_MB * const MB){ const int BpS = MB->VOL->BpS; if (MB->y>0 && MB->Map[MB->Pos-MB->VOL->MB_W].Type!=SKL_MAP_SKIPPED) { MB->VOL->MB_Ops.HFilter_31(MB->Y1-BpS, MB->Y1, 4); MB->VOL->MB_Ops.HFilter_31(MB->U-BpS, MB->U, 2); MB->VOL->MB_Ops.HFilter_31(MB->V-BpS, MB->V, 2); } MB->VOL->MB_Ops.HFilter_31(MB->Y1+16*BpS-BpS, MB->Y1+16*BpS, 4); if (MB->x>0 && MB->Map[MB->Pos-1].Type!=SKL_MAP_SKIPPED) { MB->VOL->MB_Ops.VFilter_31(MB->Y1-1, MB->Y1, BpS, 4); MB->VOL->MB_Ops.VFilter_31(MB->U-1, MB->U, BpS, 2); MB->VOL->MB_Ops.VFilter_31(MB->V-1, MB->V, BpS, 2); } MB->VOL->MB_Ops.VFilter_31(MB->Y1+15, MB->Y1+16, BpS, 4);}//////////////////////////////////////////////////////////// Writing an 'upsampling' version of IDct_Put/IDct_Add// is out of question (my shrink said;). So, let's split// the work in two:inlinevoid SKL_MB::Copy_16To8_Upsampled(SKL_INT16 In[6*64]) const{ VOL->Quant_Ops.IDct_Sparse(In+0*64); VOL->Quant_Ops.IDct_Sparse(In+1*64); VOL->Quant_Ops.IDct_Sparse(In+2*64); VOL->Quant_Ops.IDct_Sparse(In+3*64); VOL->Quant_Ops.IDct_Sparse(In+4*64); VOL->Quant_Ops.IDct_Sparse(In+5*64); VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1, In+0*64, YBpS); VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1+16,In+1*64, YBpS); if (Field_DCT<=0) { VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1+2*BpS8, In+2*64, YBpS); VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1+2*BpS8+16,In+3*64, YBpS); } else { VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1+BpS, In+2*64, YBpS); VOL->MB_Ops.Copy_Upsampled_8x8_16To8(Y1+BpS+16,In+3*64, YBpS); } VOL->MB_Ops.Copy_Upsampled_8x8_16To8(U, In+4*64, BpS); VOL->MB_Ops.Copy_Upsampled_8x8_16To8(V, In+5*64, BpS);}inlinevoid SKL_MB::Add_16To8_Upsampled(SKL_INT16 In[6*64]) const{ if (Cbp&0x20) VOL->Quant_Ops.IDct_Sparse(In+0*64); if (Cbp&0x10) VOL->Quant_Ops.IDct_Sparse(In+1*64); if (Cbp&0x08) VOL->Quant_Ops.IDct_Sparse(In+2*64); if (Cbp&0x04) VOL->Quant_Ops.IDct_Sparse(In+3*64); if (Cbp&0x02) VOL->Quant_Ops.IDct_Sparse(In+4*64); if (Cbp&0x01) VOL->Quant_Ops.IDct_Sparse(In+5*64); if (Cbp&0x20) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1, In+0*64, YBpS); if (Cbp&0x10) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1+16,In+1*64, YBpS); if (Field_DCT<=0) { if (Cbp&0x08) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1+2*BpS8, In+2*64, YBpS); if (Cbp&0x04) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1+2*BpS8+16,In+3*64, YBpS); } else { if (Cbp&0x08) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1+BpS, In+2*64, YBpS); if (Cbp&0x04) VOL->MB_Ops.Add_Upsampled_8x8_16To8(Y1+BpS+16,In+3*64, YBpS); } if (Cbp&0x02) VOL->MB_Ops.Add_Upsampled_8x8_16To8(U, In+4*64, BpS); if (Cbp&0x01) VOL->MB_Ops.Add_Upsampled_8x8_16To8(V, In+5*64, BpS);}//////////////////////////////////////////////////////////// This functions expands the informations from a 16x16// block to a 32x32 one.inline void SKL_MB::Expand_Reduced() const{ const int Type = MB_To_Map_Type[MB_Type]; Map[Pos ].Type = Type; Map[Pos +1].Type = Type; Map[Pos+VOL->MB_W ].Type = Type; Map[Pos+VOL->MB_W+1].Type = Type; Map[Pos +1].dQ = 0; Map[Pos+VOL->MB_W ].dQ = 0; Map[Pos+VOL->MB_W+1].dQ = 0; // expand the MVs. Normally, it should only be // useful if a BVOP follows (Low_Delay=0) SKL_MV * const MVs3 = MVs2 + MV_Stride; SKL_MV * const MVs4 = MVs3 + MV_Stride; if (Map[Pos].Type!=SKL_MAP_16x8) { // SKIPPED, INTRA, 8x8 or 16x16/* 12| 11|22 <-MVs [] 34| 11|22 <-MVs2[] --+- => --+-- | 33|44 <-MVs3[] 33|44 <-MVs4[]*/ /* expand lines #1/#3 */ SKL_COPY_MV(MVs [2], MVs [1]); SKL_COPY_MV(MVs [3], MVs [1]); SKL_COPY_MV(MVs [1], MVs [0]); SKL_COPY_MV(MVs3[0], MVs2[0]); SKL_COPY_MV(MVs3[1], MVs2[0]); SKL_COPY_MV(MVs3[2], MVs2[1]); SKL_COPY_MV(MVs3[3], MVs2[1]); /* copy lines #1/#3 to #2/#4 */ SKL_COPY_MV(MVs4[0], MVs3[0]); SKL_COPY_MV(MVs4[1], MVs3[1]); SKL_COPY_MV(MVs4[2], MVs3[2]); SKL_COPY_MV(MVs4[3], MVs3[3]); SKL_COPY_MV(MVs2[0], MVs [0]); SKL_COPY_MV(MVs2[1], MVs [1]); SKL_COPY_MV(MVs2[2], MVs [2]); SKL_COPY_MV(MVs2[3], MVs [3]); } else { /* ?!?! TODO: is it right ?!?! *//* 12| 12|12 <-MVs [] 33| 33|33 <-MVs2[] --+- => --+-- | 12|12 <-MVs3[] 33|33 <-MVs4[]*/ SKL_COPY_MV(MVs [2], MVs [0]); SKL_COPY_MV(MVs [3], MVs [1]); SKL_COPY_MV(MVs3[0], MVs [0]); SKL_COPY_MV(MVs3[1], MVs [1]); SKL_COPY_MV(MVs3[2], MVs [0]); SKL_COPY_MV(MVs3[3], MVs [1]); SKL_COPY_MV(MVs2[2], MVs2[0]); SKL_COPY_MV(MVs2[3], MVs2[0]); SKL_COPY_MV(MVs4[0], MVs2[0]); SKL_COPY_MV(MVs4[1], MVs2[0]); SKL_COPY_MV(MVs4[2], MVs2[0]); SKL_COPY_MV(MVs4[3], MVs2[0]); }}//////////////////////////////////////////////////////////static void Read_Reduced_I_VOP(SKL_FBB * const Bits, const SKL_MP4_I * const VOP){ SKL_ASSERT(VOP->Reduced_VOP>0); SKL_MB MB(VOP); SKL_INT16 Base[7*64+SKL_ALIGN]; SKL_INT16 * const In = (SKL_INT16*)SKL_ALIGN_PTR(Base, SKL_ALIGN); while(MB.y<VOP->MB_H) { MB.Init_Scanline(VOP, 0); while(MB.x<VOP->MB_W) { MB.Decode_Intra_Infos(Bits); MB.Decode_Intra_Blocks(Bits, In, VOP); MB.Copy_16To8_Upsampled(In+1*64); MB.Store_Zero_MV(); MB.Store_Map_Infos(); Post_Filter(&MB); MB.Expand_Reduced(); if (!MB.Resync || !MB.Resync_Marker(Bits)) MB.Next_Reduced(); if (VOP->Debug_Level==5) { printf( "%c", ".-="[1+MB.Field_DCT] ); if (MB.x==VOP->MB_W) printf( "\n" ); } } if (VOP->Debug_Level==2) VOP->Dump_Line(0, &MB); if (MB.y>0 && VOP->Slicer) VOP->Slicer(VOP->Cur, (MB.y-2)*16, 32, VOP->Slicer_Data); MB.y += 2; } if (MB.y>0 && VOP->Slicer) VOP->Slicer(VOP->Cur, (MB.y-2)*16, 32, VOP->Slicer_Data);}//////////////////////////////////////////////////////////#define SCALE_UP_MV(x) ( ((x)<<1) - ((x)>0) + ((x)<0) )void SKL_MB::Predict_Reduced_With_0MV() const{ const SKL_MB_FUNCS * const Ops = VOL->Copy_Ops; Ops->HP_16x8[0](Y1, Y1 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1 +BpS8, Y1+ BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+16, Y1+16 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+16+BpS8, Y1+16+BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+2*BpS8, Y1+2*BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+2*BpS8 +BpS8, Y1+2*BpS8+ BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+2*BpS8+16, Y1+2*BpS8+16 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](Y1+2*BpS8+16+BpS8, Y1+2*BpS8+16+BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](U, U + Fwd_CoLoc, BpS); Ops->HP_16x8[0](U +BpS8, U +BpS8 + Fwd_CoLoc, BpS); Ops->HP_16x8[0](V, V + Fwd_CoLoc, BpS); Ops->HP_16x8[0](V +BpS8, V +BpS8 + Fwd_CoLoc, BpS);}void SKL_MB::Predict_Reduced_With_1MV(const SKL_MV MV) const{ const SKL_MB_FUNCS * const Ops = VOL->Copy_Ops; SKL_MV Tmp; Tmp[0] = SCALE_UP_MV(MV[0]); Tmp[1] = SCALE_UP_MV(MV[1]); Clip(Tmp, Tmp); // TODO: dunno if it's correct. // should we clip on 4x16x16 basis // instead of a 32x32 one?? if (!VOL->Quarter) { Predict_16x16 (Y1, Y1 +Fwd_CoLoc, Tmp, Ops); Predict_16x16 (Y1 +16, Y1+ 16+Fwd_CoLoc, Tmp, Ops); Predict_16x16 (Y1+2*BpS8, Y1+2*BpS8 +Fwd_CoLoc, Tmp, Ops); Predict_16x16 (Y1+2*BpS8+16, Y1+2*BpS8+16+Fwd_CoLoc, Tmp, Ops); } else { // mirroring problem: should be 32x32 quarter prediction? Predict_16x16_QP(Y1, Y1 +Fwd_CoLoc, Tmp, Ops); Predict_16x16_QP(Y1 +16, Y1+ 16+Fwd_CoLoc, Tmp, Ops); Predict_16x16_QP(Y1+2*BpS8, Y1+2*BpS8 +Fwd_CoLoc, Tmp, Ops); Predict_16x16_QP(Y1+2*BpS8+16, Y1+2*BpS8+16+Fwd_CoLoc, Tmp, Ops); } SKL_MV uv_MV; Derive_uv_MV_From_1MV(uv_MV, Tmp); const int Halves = (uv_MV[0]&1) | ((uv_MV[1]&1)<<1); const int Off = Fwd_CoLoc + (uv_MV[1]>>1)*BpS + (uv_MV[0]>>1); Ops->HP_16x8[Halves](U, U +Off, BpS); Ops->HP_16x8[Halves](V, V +Off, BpS); Ops->HP_16x8[Halves](U+BpS8, U+BpS8+Off, BpS); Ops->HP_16x8[Halves](V+BpS8, V+BpS8+Off, BpS);}void SKL_MB::Predict_Reduced_Fields(const SKL_MV MV[2], const int Fld_Dirs) const{ const SKL_MB_FUNCS * const Ops = VOL->Copy_Ops; int Off, Halves; SKL_MV uv_MV; SKL_MV Tmp; // 1rst field Tmp[0] = SCALE_UP_MV(MV[0][0]); Tmp[1] = SCALE_UP_MV(MV[0][1]); Clip_Field(Tmp, Tmp); // TODO: dunno if it's correct. Off = Fwd_CoLoc; if (Fld_Dirs&2) Off += BpS; if (!VOL->Quarter) { Predict_16x8_Field(Y1, Y1 +Off, Tmp, Ops); Predict_16x8_Field(Y1+16, Y1+16 +Off, Tmp, Ops); Predict_16x8_Field(Y1 +2*BpS8, Y1 +2*BpS8+Off, Tmp, Ops); Predict_16x8_Field(Y1+16+2*BpS8, Y1+16+2*BpS8+Off, Tmp, Ops); } else { Predict_16x8_Field_QP(Y1, Y1 +Off, Tmp, Ops); Predict_16x8_Field_QP(Y1+16, Y1+16 +Off, Tmp, Ops); Predict_16x8_Field_QP(Y1 +2*BpS8, Y1 +2*BpS8+Off, Tmp, Ops); Predict_16x8_Field_QP(Y1+16+2*BpS8, Y1+16+2*BpS8+Off, Tmp, Ops); } Derive_uv_MV_From_1MV(uv_MV, Tmp); Halves = (uv_MV[0]&1) | ((uv_MV[1]&1)<<1); Off += (uv_MV[0]>>1) + (uv_MV[1]&~1)*BpS; Ops->HP_16x8[Halves](U, U+Off, 2*BpS); Ops->HP_16x8[Halves](V, V+Off, 2*BpS); // 2nd field Tmp[0] = SCALE_UP_MV(MV[1][0]); Tmp[1] = SCALE_UP_MV(MV[1][1]); Clip_Field(Tmp, Tmp); // TODO: (still) dunno if it's correct. Off = Fwd_CoLoc; if (Fld_Dirs&1) Off += BpS; if (!VOL->Quarter) { Predict_16x8_Field(Y1+BpS, Y1 +Off, Tmp, Ops); Predict_16x8_Field(Y1+BpS+16, Y1+16 +Off, Tmp, Ops); Predict_16x8_Field(Y1+BpS +2*BpS8, Y1 +2*BpS8+Off, Tmp, Ops); Predict_16x8_Field(Y1+BpS+16+2*BpS8, Y1+16+2*BpS8+Off, Tmp, Ops); } else { Predict_16x8_Field_QP(Y1+BpS, Y1 +Off, Tmp, Ops); Predict_16x8_Field_QP(Y1+BpS+16, Y1+16 +Off, Tmp, Ops); Predict_16x8_Field_QP(Y1+BpS +2*BpS8, Y1 +2*BpS8+Off, Tmp, Ops); Predict_16x8_Field_QP(Y1+BpS+16+2*BpS8, Y1+16+2*BpS8+Off, Tmp, Ops); } Derive_uv_MV_From_1MV(uv_MV, Tmp); Halves = (uv_MV[0]&1) | ((uv_MV[1]&1)<<1); Off += (uv_MV[0]>>1) + (uv_MV[1]&~1)*BpS; Ops->HP_16x8[Halves](U+BpS, U+Off, 2*BpS); Ops->HP_16x8[Halves](V+BpS, V+Off, 2*BpS);}void SKL_MB::Predict_Reduced_With_4MV(const SKL_MV MV1[2], const SKL_MV MV2[2]) const{ const SKL_MB_FUNCS * const Ops = VOL->Copy_Ops; SKL_MV Tmp[4]; Tmp[0][0] = SCALE_UP_MV(MV1[0][0]); Tmp[0][1] = SCALE_UP_MV(MV1[0][1]); Tmp[1][0] = SCALE_UP_MV(MV1[1][0]); Tmp[1][1] = SCALE_UP_MV(MV1[1][1]); Tmp[2][0] = SCALE_UP_MV(MV2[0][0]); Tmp[2][1] = SCALE_UP_MV(MV2[0][1]); Tmp[3][0] = SCALE_UP_MV(MV2[1][0]);
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -