?? cxarithm.cpp
字號:
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/* ////////////////////////////////////////////////////////////////////
//
// CvMat arithmetic operations: +, - ...
//
// */
#include "_cxcore.h"
/****************************************************************************************\
* Arithmetic operations (+, -) without mask *
\****************************************************************************************/
#define ICV_DEF_BIN_ARI_OP_CASE( __op__, worktype, cast_macro, len )\
{ \
int i; \
\
for( i = 0; i <= (len) - 4; i += 4 ) \
{ \
worktype t0 = __op__((src1)[i], (src2)[i]); \
worktype t1 = __op__((src1)[i+1], (src2)[i+1]); \
\
(dst)[i] = cast_macro( t0 ); \
(dst)[i+1] = cast_macro( t1 ); \
\
t0 = __op__((src1)[i+2],(src2)[i+2]); \
t1 = __op__((src1)[i+3],(src2)[i+3]); \
\
(dst)[i+2] = cast_macro( t0 ); \
(dst)[i+3] = cast_macro( t1 ); \
} \
\
for( ; i < (len); i++ ) \
{ \
worktype t0 = __op__((src1)[i],(src2)[i]); \
(dst)[i] = cast_macro( t0 ); \
} \
}
#define ICV_DEF_BIN_ARI_OP_2D( __op__, name, type, worktype, cast_macro ) \
IPCVAPI_IMPL( CvStatus, name, \
( const type* src1, int step1, const type* src2, int step2, \
type* dst, int step, CvSize size ), \
(src1, step1, src2, step2, dst, step, size) ) \
{ \
step1/=sizeof(src1[0]); step2/=sizeof(src2[0]); step/=sizeof(dst[0]); \
\
if( size.width == 1 ) \
{ \
for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
{ \
worktype t0 = __op__((src1)[0],(src2)[0]); \
(dst)[0] = cast_macro( t0 ); \
} \
} \
else \
{ \
for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
{ \
ICV_DEF_BIN_ARI_OP_CASE( __op__, worktype, \
cast_macro, size.width ); \
} \
} \
\
return CV_OK; \
}
#define ICV_DEF_BIN_ARI_OP_2D_SFS(__op__, name, type, worktype, cast_macro) \
IPCVAPI_IMPL( CvStatus, name, \
( const type* src1, int step1, const type* src2, int step2, \
type* dst, int step, CvSize size, int /*scalefactor*/ ), \
(src1, step1, src2, step2, dst, step, size, 0) ) \
{ \
step1/=sizeof(src1[0]); step2/=sizeof(src2[0]); step/=sizeof(dst[0]); \
\
if( size.width == 1 ) \
{ \
for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
{ \
worktype t0 = __op__((src1)[0],(src2)[0]); \
(dst)[0] = cast_macro( t0 ); \
} \
} \
else \
{ \
for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
{ \
ICV_DEF_BIN_ARI_OP_CASE( __op__, worktype, \
cast_macro, size.width ); \
} \
} \
\
return CV_OK; \
}
#define ICV_DEF_UN_ARI_OP_CASE( __op__, worktype, cast_macro, \
src, scalar, dst, len ) \
{ \
int i; \
\
for( ; ((len) -= 12) >= 0; (dst) += 12, (src) += 12 ) \
{ \
worktype t0 = __op__((scalar)[0], (src)[0]); \
worktype t1 = __op__((scalar)[1], (src)[1]); \
\
(dst)[0] = cast_macro( t0 ); \
(dst)[1] = cast_macro( t1 ); \
\
t0 = __op__((scalar)[2], (src)[2]); \
t1 = __op__((scalar)[3], (src)[3]); \
\
(dst)[2] = cast_macro( t0 ); \
(dst)[3] = cast_macro( t1 ); \
\
t0 = __op__((scalar)[4], (src)[4]); \
t1 = __op__((scalar)[5], (src)[5]); \
\
(dst)[4] = cast_macro( t0 ); \
(dst)[5] = cast_macro( t1 ); \
\
t0 = __op__((scalar)[6], (src)[6]); \
t1 = __op__((scalar)[7], (src)[7]); \
\
(dst)[6] = cast_macro( t0 ); \
(dst)[7] = cast_macro( t1 ); \
\
t0 = __op__((scalar)[8], (src)[8]); \
t1 = __op__((scalar)[9], (src)[9]); \
\
(dst)[8] = cast_macro( t0 ); \
(dst)[9] = cast_macro( t1 ); \
\
t0 = __op__((scalar)[10], (src)[10]); \
t1 = __op__((scalar)[11], (src)[11]); \
\
(dst)[10] = cast_macro( t0 ); \
(dst)[11] = cast_macro( t1 ); \
} \
\
for( (len) += 12, i = 0; i < (len); i++ ) \
{ \
worktype t0 = __op__((scalar)[i],(src)[i]); \
(dst)[i] = cast_macro( t0 ); \
} \
}
#define ICV_DEF_UN_ARI_OP_2D( __op__, name, type, worktype, cast_macro ) \
static CvStatus CV_STDCALL name \
( const type* src, int step1, type* dst, int step, \
CvSize size, const worktype* scalar ) \
{ \
step1 /= sizeof(src[0]); step /= sizeof(dst[0]); \
\
if( size.width == 1 ) \
{ \
for( ; size.height--; src += step1, dst += step ) \
{ \
worktype t0 = __op__(*(scalar),*(src)); \
*(dst) = cast_macro( t0 ); \
} \
} \
else \
{ \
for( ; size.height--; src += step1, dst += step ) \
{ \
const type *tsrc = src; \
type *tdst = dst; \
int width = size.width; \
\
ICV_DEF_UN_ARI_OP_CASE( __op__, worktype, cast_macro, \
tsrc, scalar, tdst, width ); \
} \
} \
\
return CV_OK; \
}
#define ICV_DEF_BIN_ARI_ALL( __op__, name, cast_8u ) \
ICV_DEF_BIN_ARI_OP_2D_SFS( __op__, icv##name##_8u_C1R, uchar, int, cast_8u ) \
ICV_DEF_BIN_ARI_OP_2D_SFS( __op__, icv##name##_16u_C1R, ushort, int, CV_CAST_16U ) \
ICV_DEF_BIN_ARI_OP_2D_SFS( __op__, icv##name##_16s_C1R, short, int, CV_CAST_16S ) \
ICV_DEF_BIN_ARI_OP_2D( __op__, icv##name##_32s_C1R, int, int, CV_CAST_32S ) \
ICV_DEF_BIN_ARI_OP_2D( __op__, icv##name##_32f_C1R, float, float, CV_CAST_32F ) \
ICV_DEF_BIN_ARI_OP_2D( __op__, icv##name##_64f_C1R, double, double, CV_CAST_64F )
#define ICV_DEF_UN_ARI_ALL( __op__, name ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_8u_C1R, uchar, int, CV_CAST_8U ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_16u_C1R, ushort, int, CV_CAST_16U ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_16s_C1R, short, int, CV_CAST_16S ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_32s_C1R, int, int, CV_CAST_32S ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_32f_C1R, float, float, CV_CAST_32F ) \
ICV_DEF_UN_ARI_OP_2D( __op__, icv##name##_64f_C1R, double, double, CV_CAST_64F )
#undef CV_SUB_R
#define CV_SUB_R(a,b) ((b) - (a))
ICV_DEF_BIN_ARI_ALL( CV_ADD, Add, CV_FAST_CAST_8U )
ICV_DEF_BIN_ARI_ALL( CV_SUB_R, Sub, CV_FAST_CAST_8U )
ICV_DEF_UN_ARI_ALL( CV_ADD, AddC )
ICV_DEF_UN_ARI_ALL( CV_SUB, SubRC )
#define ICV_DEF_INIT_ARITHM_FUNC_TAB( FUNCNAME, FLAG ) \
static void icvInit##FUNCNAME##FLAG##Table( CvFuncTable* tab )\
{ \
tab->fn_2d[CV_8U] = (void*)icv##FUNCNAME##_8u_##FLAG; \
tab->fn_2d[CV_8S] = 0; \
tab->fn_2d[CV_16U] = (void*)icv##FUNCNAME##_16u_##FLAG; \
tab->fn_2d[CV_16S] = (void*)icv##FUNCNAME##_16s_##FLAG; \
tab->fn_2d[CV_32S] = (void*)icv##FUNCNAME##_32s_##FLAG; \
tab->fn_2d[CV_32F] = (void*)icv##FUNCNAME##_32f_##FLAG; \
tab->fn_2d[CV_64F] = (void*)icv##FUNCNAME##_64f_##FLAG; \
}
ICV_DEF_INIT_ARITHM_FUNC_TAB( Sub, C1R )
ICV_DEF_INIT_ARITHM_FUNC_TAB( SubRC, C1R )
ICV_DEF_INIT_ARITHM_FUNC_TAB( Add, C1R )
ICV_DEF_INIT_ARITHM_FUNC_TAB( AddC, C1R )
/****************************************************************************************\
* External Functions for Arithmetic Operations *
\****************************************************************************************/
/*************************************** S U B ******************************************/
CV_IMPL void
cvSub( const void* srcarr1, const void* srcarr2,
void* dstarr, const void* maskarr )
{
static CvFuncTable sub_tab;
static int inittab = 0;
int local_alloc = 1;
uchar* buffer = 0;
CV_FUNCNAME( "cvSub" );
__BEGIN__;
const CvArr* tmp;
int y, dy, type, depth, cn, cont_flag = 0;
int src1_step, src2_step, dst_step, tdst_step, mask_step;
CvMat srcstub1, srcstub2, *src1, *src2;
CvMat dststub, *dst = (CvMat*)dstarr;
CvMat maskstub, *mask = (CvMat*)maskarr;
CvMat dstbuf, *tdst;
CvFunc2D_3A func;
CvFunc2D_3A1I func_sfs;
CvCopyMaskFunc copym_func;
CvSize size, tsize;
CV_SWAP( srcarr1, srcarr2, tmp ); // to comply with IPP
src1 = (CvMat*)srcarr1;
src2 = (CvMat*)srcarr2;
if( !CV_IS_MAT(src1) || !CV_IS_MAT(src2) || !CV_IS_MAT(dst))
{
if( CV_IS_MATND(src1) || CV_IS_MATND(src2) || CV_IS_MATND(dst))
{
CvArr* arrs[] = { src1, src2, dst };
CvMatND stubs[3];
CvNArrayIterator iterator;
if( maskarr )
CV_ERROR( CV_StsBadMask,
"This operation on multi-dimensional arrays does not support mask" );
CV_CALL( cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator ));
type = iterator.hdr[0]->type;
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -