?? gf2x.cpp
字號:
hn = (n+1) >> 1;
sp += (hn << 2) + 3;
n = hn;
}
stk.SetLength(sp);
_ntl_ulong *stk_p = stk.elts();
if (sa > sb) {
{ long t; t = sa; sa = sb; sb = t; }
ap = b.xrep.elts();
bp = a.xrep.elts();
}
else {
ap = a.xrep.elts();
bp = b.xrep.elts();
}
vec.SetLength(2*sa);
_ntl_ulong *v = vec.elts();
long i, j;
for (i = 0; i < sc; i++)
cp[i] = 0;
do {
if (sa == 0) break;
if (sa == 1) {
AddMul1(cp, bp, sb, ap[0]);
break;
}
// general case
for (i = 0; i+sa <= sb; i += sa) {
KarMul(v, ap, bp + i, sa, stk_p);
for (j = 0; j < 2*sa; j++)
cp[i+j] ^= v[j];
}
{ const _ntl_ulong *t; t = ap; ap = bp + i; bp = t; }
{ long t; t = sa; sa = sb - i; sb = t; }
cp = cp + i;
} while (1);
if (in_mem)
c.xrep = mem;
c.normalize();
}
void mul(GF2X& c, const GF2X& a, long b)
{
if (b & 1)
c = a;
else
clear(c);
}
void mul(GF2X& c, const GF2X& a, GF2 b)
{
if (b == 1)
c = a;
else
clear(c);
}
void trunc(GF2X& x, const GF2X& a, long m)
{
if (m < 0) Error("trunc: bad args");
long n = a.xrep.length();
if (n == 0 || m == 0) {
clear(x);
return;
}
if (&x == &a) {
if (n*NTL_BITS_PER_LONG > m) {
long wm = (m-1)/NTL_BITS_PER_LONG;
long bm = m - NTL_BITS_PER_LONG*wm;
_ntl_ulong msk;
if (bm == NTL_BITS_PER_LONG)
msk = ~(0UL);
else
msk = ((1UL << bm) - 1UL);
x.xrep[wm] &= msk;
x.xrep.QuickSetLength(wm+1);
x.normalize();
}
}
else if (n*NTL_BITS_PER_LONG <= m)
x = a;
else {
long wm = (m-1)/NTL_BITS_PER_LONG;
long bm = m - NTL_BITS_PER_LONG*wm;
x.xrep.SetLength(wm+1);
_ntl_ulong *xp = &x.xrep[0];
const _ntl_ulong *ap = &a.xrep[0];
long i;
for (i = 0; i < wm; i++)
xp[i] = ap[i];
_ntl_ulong msk;
if (bm == NTL_BITS_PER_LONG)
msk = ~(0UL);
else
msk = ((1UL << bm) - 1UL);
xp[wm] = ap[wm] & msk;
x.normalize();
}
}
/****** implementation of vec_GF2X ******/
NTL_vector_impl(GF2X,vec_GF2X)
NTL_io_vector_impl(GF2X,vec_GF2X)
NTL_eq_vector_impl(GF2X,vec_GF2X)
void MulByX(GF2X& x, const GF2X& a)
{
long n = a.xrep.length();
if (n == 0) {
clear(x);
return;
}
if (a.xrep[n-1] & (1UL << (NTL_BITS_PER_LONG-1))) {
x.xrep.SetLength(n+1);
x.xrep[n] = 1;
}
else if (&x != &a)
x.xrep.SetLength(n);
_ntl_ulong *xp = &x.xrep[0];
const _ntl_ulong *ap = &a.xrep[0];
long i;
for (i = n-1; i > 0; i--)
xp[i] = (ap[i] << 1) | (ap[i-1] >> (NTL_BITS_PER_LONG-1));
xp[0] = ap[0] << 1;
// no need to normalize
}
static _ntl_ulong sqrtab[256] = {
0UL, 1UL, 4UL, 5UL, 16UL, 17UL, 20UL, 21UL, 64UL,
65UL, 68UL, 69UL, 80UL, 81UL, 84UL, 85UL, 256UL,
257UL, 260UL, 261UL, 272UL, 273UL, 276UL, 277UL, 320UL,
321UL, 324UL, 325UL, 336UL, 337UL, 340UL, 341UL, 1024UL,
1025UL, 1028UL, 1029UL, 1040UL, 1041UL, 1044UL, 1045UL, 1088UL,
1089UL, 1092UL, 1093UL, 1104UL, 1105UL, 1108UL, 1109UL, 1280UL,
1281UL, 1284UL, 1285UL, 1296UL, 1297UL, 1300UL, 1301UL, 1344UL,
1345UL, 1348UL, 1349UL, 1360UL, 1361UL, 1364UL, 1365UL, 4096UL,
4097UL, 4100UL, 4101UL, 4112UL, 4113UL, 4116UL, 4117UL, 4160UL,
4161UL, 4164UL, 4165UL, 4176UL, 4177UL, 4180UL, 4181UL, 4352UL,
4353UL, 4356UL, 4357UL, 4368UL, 4369UL, 4372UL, 4373UL, 4416UL,
4417UL, 4420UL, 4421UL, 4432UL, 4433UL, 4436UL, 4437UL, 5120UL,
5121UL, 5124UL, 5125UL, 5136UL, 5137UL, 5140UL, 5141UL, 5184UL,
5185UL, 5188UL, 5189UL, 5200UL, 5201UL, 5204UL, 5205UL, 5376UL,
5377UL, 5380UL, 5381UL, 5392UL, 5393UL, 5396UL, 5397UL, 5440UL,
5441UL, 5444UL, 5445UL, 5456UL, 5457UL, 5460UL, 5461UL, 16384UL,
16385UL, 16388UL, 16389UL, 16400UL, 16401UL, 16404UL, 16405UL, 16448UL,
16449UL, 16452UL, 16453UL, 16464UL, 16465UL, 16468UL, 16469UL, 16640UL,
16641UL, 16644UL, 16645UL, 16656UL, 16657UL, 16660UL, 16661UL, 16704UL,
16705UL, 16708UL, 16709UL, 16720UL, 16721UL, 16724UL, 16725UL, 17408UL,
17409UL, 17412UL, 17413UL, 17424UL, 17425UL, 17428UL, 17429UL, 17472UL,
17473UL, 17476UL, 17477UL, 17488UL, 17489UL, 17492UL, 17493UL, 17664UL,
17665UL, 17668UL, 17669UL, 17680UL, 17681UL, 17684UL, 17685UL, 17728UL,
17729UL, 17732UL, 17733UL, 17744UL, 17745UL, 17748UL, 17749UL, 20480UL,
20481UL, 20484UL, 20485UL, 20496UL, 20497UL, 20500UL, 20501UL, 20544UL,
20545UL, 20548UL, 20549UL, 20560UL, 20561UL, 20564UL, 20565UL, 20736UL,
20737UL, 20740UL, 20741UL, 20752UL, 20753UL, 20756UL, 20757UL, 20800UL,
20801UL, 20804UL, 20805UL, 20816UL, 20817UL, 20820UL, 20821UL, 21504UL,
21505UL, 21508UL, 21509UL, 21520UL, 21521UL, 21524UL, 21525UL, 21568UL,
21569UL, 21572UL, 21573UL, 21584UL, 21585UL, 21588UL, 21589UL, 21760UL,
21761UL, 21764UL, 21765UL, 21776UL, 21777UL, 21780UL, 21781UL, 21824UL,
21825UL, 21828UL, 21829UL, 21840UL, 21841UL, 21844UL, 21845UL };
static inline
void sqr1(_ntl_ulong *c, _ntl_ulong a)
{
_ntl_ulong hi, lo;
NTL_BB_SQR_CODE
c[0] = lo;
c[1] = hi;
}
void sqr(GF2X& c, const GF2X& a)
{
long sa = a.xrep.length();
if (sa <= 0) {
clear(c);
return;
}
c.xrep.SetLength(sa << 1);
_ntl_ulong *cp = c.xrep.elts();
const _ntl_ulong *ap = a.xrep.elts();
long i;
for (i = sa-1; i >= 0; i--)
sqr1(cp + (i << 1), ap[i]);
c.normalize();
return;
}
void LeftShift(GF2X& c, const GF2X& a, long n)
{
if (IsZero(a)) {
clear(c);
return;
}
if (n == 1) {
MulByX(c, a);
return;
}
if (n < 0) {
if (n < -NTL_MAX_LONG)
clear(c);
else
RightShift(c, a, -n);
return;
}
if (NTL_OVERFLOW(n, 1, 0))
Error("overflow in LeftShift");
if (n == 0) {
c = a;
return;
}
long sa = a.xrep.length();
long wn = n/NTL_BITS_PER_LONG;
long bn = n - wn*NTL_BITS_PER_LONG;
long sc = sa + wn;
if (bn) sc++;
c.xrep.SetLength(sc);
_ntl_ulong *cp = c.xrep.elts();
const _ntl_ulong *ap = a.xrep.elts();
long i;
if (bn == 0) {
for (i = sa+wn-1; i >= wn; i--)
cp[i] = ap[i-wn];
for (i = wn-1; i >= 0; i--)
cp[i] = 0;
}
else {
cp[sa+wn] = ap[sa-1] >> (NTL_BITS_PER_LONG-bn);
for (i = sa+wn-1; i >= wn+1; i--)
cp[i] = (ap[i-wn] << bn) | (ap[i-wn-1] >> (NTL_BITS_PER_LONG-bn));
cp[wn] = ap[0] << bn;
for (i = wn-1; i >= 0; i--)
cp[i] = 0;
}
c.normalize();
}
void ShiftAdd(GF2X& c, const GF2X& a, long n)
// c = c + a*X^n
{
if (n < 0) Error("ShiftAdd: negative argument");
if (n == 0) {
add(c, c, a);
return;
}
if (NTL_OVERFLOW(n, 1, 0))
Error("overflow in ShiftAdd");
long sa = a.xrep.length();
if (sa <= 0) {
return;
}
long sc = c.xrep.length();
long wn = n/NTL_BITS_PER_LONG;
long bn = n - wn*NTL_BITS_PER_LONG;
long ss = sa + wn;
if (bn) ss++;
if (ss > sc)
c.xrep.SetLength(ss);
_ntl_ulong *cp = c.xrep.elts();
const _ntl_ulong *ap = a.xrep.elts();
long i;
for (i = sc; i < ss; i++)
cp[i] = 0;
if (bn == 0) {
for (i = sa+wn-1; i >= wn; i--)
cp[i] ^= ap[i-wn];
}
else {
cp[sa+wn] ^= ap[sa-1] >> (NTL_BITS_PER_LONG-bn);
for (i = sa+wn-1; i >= wn+1; i--)
cp[i] ^= (ap[i-wn] << bn) | (ap[i-wn-1] >> (NTL_BITS_PER_LONG-bn));
cp[wn] ^= ap[0] << bn;
}
c.normalize();
}
void RightShift(GF2X& c, const GF2X& a, long n)
{
if (IsZero(a)) {
clear(c);
return;
}
if (n < 0) {
if (n < -NTL_MAX_LONG) Error("overflow in RightShift");
LeftShift(c, a, -n);
return;
}
if (n == 0) {
c = a;
return;
}
long sa = a.xrep.length();
long wn = n/NTL_BITS_PER_LONG;
long bn = n - wn*NTL_BITS_PER_LONG;
if (wn >= sa) {
clear(c);
return;
}
c.xrep.SetLength(sa-wn);
_ntl_ulong *cp = c.xrep.elts();
const _ntl_ulong *ap = a.xrep.elts();
long i;
if (bn == 0) {
for (i = 0; i < sa-wn; i++)
cp[i] = ap[i+wn];
}
else {
for (i = 0; i < sa-wn-1; i++)
cp[i] = (ap[i+wn] >> bn) | (ap[i+wn+1] << (NTL_BITS_PER_LONG - bn));
cp[sa-wn-1] = ap[sa-1] >> bn;
}
c.normalize();
}
static _ntl_ulong revtab[256] = {
0UL, 128UL, 64UL, 192UL, 32UL, 160UL, 96UL, 224UL, 16UL, 144UL,
80UL, 208UL, 48UL, 176UL, 112UL, 240UL, 8UL, 136UL, 72UL, 200UL,
40UL, 168UL, 104UL, 232UL, 24UL, 152UL, 88UL, 216UL, 56UL, 184UL,
120UL, 248UL, 4UL, 132UL, 68UL, 196UL, 36UL, 164UL, 100UL, 228UL,
20UL, 148UL, 84UL, 212UL, 52UL, 180UL, 116UL, 244UL, 12UL, 140UL,
76UL, 204UL, 44UL, 172UL, 108UL, 236UL, 28UL, 156UL, 92UL, 220UL,
60UL, 188UL, 124UL, 252UL, 2UL, 130UL, 66UL, 194UL, 34UL, 162UL,
98UL, 226UL, 18UL, 146UL, 82UL, 210UL, 50UL, 178UL, 114UL, 242UL,
10UL, 138UL, 74UL, 202UL, 42UL, 170UL, 106UL, 234UL, 26UL, 154UL,
90UL, 218UL, 58UL, 186UL, 122UL, 250UL, 6UL, 134UL, 70UL, 198UL,
38UL, 166UL, 102UL, 230UL, 22UL, 150UL, 86UL, 214UL, 54UL, 182UL,
118UL, 246UL, 14UL, 142UL, 78UL, 206UL, 46UL, 174UL, 110UL, 238UL,
30UL, 158UL, 94UL, 222UL, 62UL, 190UL, 126UL, 254UL, 1UL, 129UL,
65UL, 193UL, 33UL, 161UL, 97UL, 225UL, 17UL, 145UL, 81UL, 209UL,
49UL, 177UL, 113UL, 241UL, 9UL, 137UL, 73UL, 201UL, 41UL, 169UL,
105UL, 233UL, 25UL, 153UL, 89UL, 217UL, 57UL, 185UL, 121UL, 249UL,
5UL, 133UL, 69UL, 197UL, 37UL, 165UL, 101UL, 229UL, 21UL, 149UL,
85UL, 213UL, 53UL, 181UL, 117UL, 245UL, 13UL, 141UL, 77UL, 205UL,
45UL, 173UL, 109UL, 237UL, 29UL, 157UL, 93UL, 221UL, 61UL, 189UL,
125UL, 253UL, 3UL, 131UL, 67UL, 195UL, 35UL, 163UL, 99UL, 227UL,
19UL, 147UL, 83UL, 211UL, 51UL, 179UL, 115UL, 243UL, 11UL, 139UL,
75UL, 203UL, 43UL, 171UL, 107UL, 235UL, 27UL, 155UL, 91UL, 219UL,
59UL, 187UL, 123UL, 251UL, 7UL, 135UL, 71UL, 199UL, 39UL, 167UL,
103UL, 231UL, 23UL, 151UL, 87UL, 215UL, 55UL, 183UL, 119UL, 247UL,
15UL, 143UL, 79UL, 207UL, 47UL, 175UL, 111UL, 239UL, 31UL, 159UL,
95UL, 223UL, 63UL, 191UL, 127UL, 255UL };
static inline
_ntl_ulong rev1(_ntl_ulong a)
{
return NTL_BB_REV_CODE;
}
void CopyReverse(GF2X& c, const GF2X& a, long hi)
// c[0..hi] = reverse(a[0..hi]), with zero fill as necessary
// input may alias output
{
if (hi < 0) { clear(c); return; }
if (NTL_OVERFLOW(hi, 1, 0))
Error("overflow in CopyReverse");
long n = hi+1;
long sa = a.xrep.length();
if (n <= 0 || sa <= 0) {
clear(c);
return;
}
long wn = n/NTL_BITS_PER_LONG;
long bn = n - wn*NTL_BITS_PER_LONG;
if (bn != 0) {
wn++;
bn = NTL_BITS_PER_LONG - bn;
}
c.xrep.SetLength(wn);
_ntl_ulong *cp = c.xrep.elts();
const _ntl_ulong *ap = a.xrep.elts();
long mm = min(sa, wn);
long i;
for (i = 0; i < mm; i++)
cp[i] = ap[i];
for (i = mm; i < wn; i++)
cp[i] = 0;
if (bn != 0) {
for (i = wn-1; i >= 1; i--)
cp[i] = (cp[i] << bn) | (cp[i-1] >> (NTL_BITS_PER_LONG-bn));
cp[0] = cp[0] << bn;
}
for (i = 0; i < wn/2; i++) {
_ntl_ulong t; t = cp[i]; cp[i] = cp[wn-1-i]; cp[wn-1-i] = t;
}
for (i = 0; i < wn; i++)
cp[i] = rev1(cp[i]);
c.normalize();
}
void div(GF2X& q, const GF2X& a, GF2 b)
{
if (b == 0)
Error("div: division by zero");
q = a;
}
void div(GF2X& q, const GF2X& a, long b)
{
if ((b & 1) == 0)
Error("div: division by zero");
q = a;
}
void GF2XFromBytes(GF2X& x, const unsigned char *p, long n)
{
if (n <= 0) {
x = 0;
return;
}
const long BytesPerLong = NTL_BITS_PER_LONG/8;
long lw, r, i, j;
lw = n/BytesPerLong;
r = n - lw*BytesPerLong;
if (r != 0)
lw++;
else
r = BytesPerLong;
x.xrep.SetLength(lw);
unsigned long *xp = x.xrep.elts();
for (i = 0; i < lw-1; i++) {
unsigned long t = 0;
for (j = 0; j < BytesPerLong; j++) {
t >>= 8;
t += (((unsigned long)(*p)) & 255UL) << ((BytesPerLong-1)*8);
p++;
}
xp[i] = t;
}
unsigned long t = 0;
for (j = 0; j < r; j++) {
t >>= 8;
t += (((unsigned long)(*p)) & 255UL) << ((BytesPerLong-1)*8);
p++;
}
t >>= (BytesPerLong-r)*8;
xp[lw-1] = t;
x.normalize();
}
void BytesFromGF2X(unsigned char *p, const GF2X& a, long n)
{
if (n < 0) n = 0;
const long BytesPerLong = NTL_BITS_PER_LONG/8;
long lbits = deg(a) + 1;
long lbytes = (lbits+7)/8;
long min_bytes = min(lbytes, n);
long min_words = min_bytes/BytesPerLong;
long r = min_bytes - min_words*BytesPerLong;
if (r != 0)
min_words++;
else
r = BytesPerLong;
const unsigned long *ap = a.xrep.elts();
long i, j;
for (i = 0; i < min_words-1; i++) {
unsigned long t = ap[i];
for (j = 0; j < BytesPerLong; j++) {
*p = t & 255UL;
t >>= 8;
p++;
}
}
if (min_words > 0) {
unsigned long t = ap[min_words-1];
for (j = 0; j < r; j++) {
*p = t & 255UL;
t >>= 8;
p++;
}
}
for (j = min_bytes; j < n; j++) {
*p = 0;
p++;
}
}
NTL_END_IMPL
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -