secp256k1ScalarReduce512 static method
Implementation
static void secp256k1ScalarReduce512(Secp256k1Scalar r, List<BigInt> l) {
Secp256k1Uint128 c128 = Secp256k1Uint128();
BigInt c, c0, c1, c2;
BigInt n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
BigInt m0, m1, m2, m3, m4, m5;
int m6;
BigInt p0, p1, p2, p3;
int p4;
/// Reduce 512 bits into 385.
/// m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C.
c0 = l[0];
c1 = BigInt.zero;
c2 = BigInt.zero;
BigInt extractFast() {
final n = c0;
c0 = c1;
c1 = BigInt.zero;
_cond(c2 == BigInt.zero, "secp256k1ScalarReduce512");
return n;
}
void muladd(BigInt a, BigInt b) {
BigInt tl, th = BigInt.zero;
Secp256k1Uint128 t = Secp256k1Uint128();
secp256k1U128Mul(t, a, b);
th = secp256k1U128HiU64(t);
tl = secp256k1U128ToU64(t);
c0 = (c0 + tl).toUnsigned64;
th = (th + (c0 < tl).toBigInt).toUnsigned64;
c1 = (c1 + th).toUnsigned64;
c2 = (c2 + (c1 < th).toBigInt).toUnsigned64;
_cond((c1 >= th) || (c2 != BigInt.zero), "secp256k1ScalarReduce512");
}
void muladdFast(BigInt a, BigInt b) {
BigInt tl, th = BigInt.zero;
Secp256k1Uint128 t = Secp256k1Uint128();
secp256k1U128Mul(t, a, b);
th = secp256k1U128HiU64(t);
tl = secp256k1U128ToU64(t);
c0 = (c0 + tl).toUnsigned64;
th = (th + (c0 < tl).toBigInt).toUnsigned64;
c1 = (c1 + th).toUnsigned64;
}
BigInt extract() {
final n = c0;
c0 = c1;
c1 = c2;
c2 = BigInt.zero;
return n;
}
void sumaddFast(BigInt a) {
c0 = (c0 + a).toUnsigned64;
c1 = (c1 + (c0 < a).toBigInt).toUnsigned64;
_cond((c1 != BigInt.zero) | (c0 >= (a)), "secp256k1ScalarReduce512");
_cond(c2 == BigInt.zero, "secp256k1ScalarReduce512");
}
void sumadd(BigInt a) {
c0 = (c0 + a).toUnsigned64;
BigInt over = (c0 < a).toBigInt;
c1 = (c1 + over).toUnsigned64;
c2 = (c2 + (c1 < over).toBigInt).toUnsigned64;
}
muladdFast(n0, Secp256k1Const.secp256k1NC0);
m0 = extractFast();
sumaddFast(l[1]);
muladd(n1, Secp256k1Const.secp256k1NC0);
muladd(n0, Secp256k1Const.secp256k1NC1);
m1 = extract();
sumadd(l[2]);
muladd(n2, Secp256k1Const.secp256k1NC0);
muladd(n1, Secp256k1Const.secp256k1NC1);
sumadd(n0);
m2 = extract();
sumadd(l[3]);
muladd(n3, Secp256k1Const.secp256k1NC0);
muladd(n2, Secp256k1Const.secp256k1NC1);
sumadd(n1);
m3 = extract();
muladd(n3, Secp256k1Const.secp256k1NC1);
sumadd(n2);
m4 = extract();
sumaddFast(n3);
m5 = extractFast();
_cond(c0 <= BigInt.one, "secp256k1ScalarReduce512");
m6 = c0.toUnSignedInt32;
/// Reduce 385 bits into 258.
/// p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C.
c0 = m0;
c1 = BigInt.zero;
c2 = BigInt.zero;
muladdFast(m4, Secp256k1Const.secp256k1NC0);
p0 = extractFast();
sumaddFast(m1);
muladd(m5, Secp256k1Const.secp256k1NC0);
muladd(m4, Secp256k1Const.secp256k1NC1);
p1 = extract();
sumadd(m2);
muladd(m6.toBigInt, Secp256k1Const.secp256k1NC0);
muladd(m5, Secp256k1Const.secp256k1NC1);
sumadd(m4);
p2 = extract();
sumaddFast(m3);
muladdFast(m6.toBigInt, Secp256k1Const.secp256k1NC1);
sumaddFast(m5);
p3 = extractFast();
p4 = c0.toUnSignedInt32 + m6;
_cond(p4 <= 2, "secp256k1ScalarReduce512");
/// Reduce 258 bits into 256.
/// r[0..3] = p[0..3] + p[4] * SECP256K1_N_C.
secp256k1U128FromU64(c128, p0);
secp256k1U128AccumMul(c128, Secp256k1Const.secp256k1NC0, p4.toBigInt);
r[0] = secp256k1U128ToU64(c128);
secp256k1U128Rshift(c128, 64);
secp256k1U128AccumU64(c128, p1);
secp256k1U128AccumMul(c128, Secp256k1Const.secp256k1NC1, p4.toBigInt);
r[1] = secp256k1U128ToU64(c128);
secp256k1U128Rshift(c128, 64);
secp256k1U128AccumU64(c128, p2);
secp256k1U128AccumU64(c128, p4.toBigInt);
r[2] = secp256k1U128ToU64(c128);
secp256k1U128Rshift(c128, 64);
secp256k1U128AccumU64(c128, p3);
r[3] = secp256k1U128ToU64(c128);
c = secp256k1U128HiU64(c128);
secp256k1ScalarReduce(
r, c.toUnSignedInt32 + secp256k1ScalarCheckOverflow(r));
}