Source file src/crypto/internal/fips140/edwards25519/field/fe_generic.go
1 // Copyright (c) 2017 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package field 6 7 import "math/bits" 8 9 // uint128 holds a 128-bit number as two 64-bit limbs, for use with the 10 // bits.Mul64 and bits.Add64 intrinsics. 11 type uint128 struct { 12 lo, hi uint64 13 } 14 15 // mul returns a * b. 16 func mul(a, b uint64) uint128 { 17 hi, lo := bits.Mul64(a, b) 18 return uint128{lo, hi} 19 } 20 21 // addMul returns v + a * b. 22 func addMul(v uint128, a, b uint64) uint128 { 23 hi, lo := bits.Mul64(a, b) 24 lo, c := bits.Add64(lo, v.lo, 0) 25 hi, _ = bits.Add64(hi, v.hi, c) 26 return uint128{lo, hi} 27 } 28 29 // mul19 returns v * 19. 30 func mul19(v uint64) uint64 { 31 // Using this approach seems to yield better optimizations than *19. 32 return v + (v+v<<3)<<1 33 } 34 35 // addMul19 returns v + 19 * a * b, where a and b are at most 52 bits. 36 func addMul19(v uint128, a, b uint64) uint128 { 37 hi, lo := bits.Mul64(mul19(a), b) 38 lo, c := bits.Add64(lo, v.lo, 0) 39 hi, _ = bits.Add64(hi, v.hi, c) 40 return uint128{lo, hi} 41 } 42 43 // addMul38 returns v + 38 * a * b, where a and b are at most 52 bits. 44 func addMul38(v uint128, a, b uint64) uint128 { 45 hi, lo := bits.Mul64(mul19(a), b*2) 46 lo, c := bits.Add64(lo, v.lo, 0) 47 hi, _ = bits.Add64(hi, v.hi, c) 48 return uint128{lo, hi} 49 } 50 51 // shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. 52 func shiftRightBy51(a uint128) uint64 { 53 return (a.hi << (64 - 51)) | (a.lo >> 51) 54 } 55 56 func feMulGeneric(v, a, b *Element) { 57 a0 := a.l0 58 a1 := a.l1 59 a2 := a.l2 60 a3 := a.l3 61 a4 := a.l4 62 63 b0 := b.l0 64 b1 := b.l1 65 b2 := b.l2 66 b3 := b.l3 67 b4 := b.l4 68 69 // Limb multiplication works like pen-and-paper columnar multiplication, but 70 // with 51-bit limbs instead of digits. 71 // 72 // a4 a3 a2 a1 a0 x 73 // b4 b3 b2 b1 b0 = 74 // ------------------------ 75 // a4b0 a3b0 a2b0 a1b0 a0b0 + 76 // a4b1 a3b1 a2b1 a1b1 a0b1 + 77 // a4b2 a3b2 a2b2 a1b2 a0b2 + 78 // a4b3 a3b3 a2b3 a1b3 a0b3 + 79 // a4b4 a3b4 a2b4 a1b4 a0b4 = 80 // ---------------------------------------------- 81 // r8 r7 r6 r5 r4 r3 r2 r1 r0 82 // 83 // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to 84 // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, 85 // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. 86 // 87 // Reduction can be carried out simultaneously to multiplication. For 88 // example, we do not compute r5: whenever the result of a multiplication 89 // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. 90 // 91 // a4b0 a3b0 a2b0 a1b0 a0b0 + 92 // a3b1 a2b1 a1b1 a0b1 19×a4b1 + 93 // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + 94 // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + 95 // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = 96 // -------------------------------------- 97 // r4 r3 r2 r1 r0 98 // 99 // Finally we add up the columns into wide, overlapping limbs. 100 101 // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) 102 r0 := mul(a0, b0) 103 r0 = addMul19(r0, a1, b4) 104 r0 = addMul19(r0, a2, b3) 105 r0 = addMul19(r0, a3, b2) 106 r0 = addMul19(r0, a4, b1) 107 108 // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) 109 r1 := mul(a0, b1) 110 r1 = addMul(r1, a1, b0) 111 r1 = addMul19(r1, a2, b4) 112 r1 = addMul19(r1, a3, b3) 113 r1 = addMul19(r1, a4, b2) 114 115 // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) 116 r2 := mul(a0, b2) 117 r2 = addMul(r2, a1, b1) 118 r2 = addMul(r2, a2, b0) 119 r2 = addMul19(r2, a3, b4) 120 r2 = addMul19(r2, a4, b3) 121 122 // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 123 r3 := mul(a0, b3) 124 r3 = addMul(r3, a1, b2) 125 r3 = addMul(r3, a2, b1) 126 r3 = addMul(r3, a3, b0) 127 r3 = addMul19(r3, a4, b4) 128 129 // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 130 r4 := mul(a0, b4) 131 r4 = addMul(r4, a1, b3) 132 r4 = addMul(r4, a2, b2) 133 r4 = addMul(r4, a3, b1) 134 r4 = addMul(r4, a4, b0) 135 136 // After the multiplication, we need to reduce (carry) the five coefficients 137 // to obtain a result with limbs that are at most slightly larger than 2⁵¹, 138 // to respect the Element invariant. 139 // 140 // Overall, the reduction works the same as carryPropagate, except with 141 // wider inputs: we take the carry for each coefficient by shifting it right 142 // by 51, and add it to the limb above it. The top carry is multiplied by 19 143 // according to the reduction identity and added to the lowest limb. 144 // 145 // The largest coefficient (r0) will be at most 111 bits, which guarantees 146 // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. 147 // 148 // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) 149 // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) 150 // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² 151 // r0 < 2⁷ × 2⁵² × 2⁵² 152 // r0 < 2¹¹¹ 153 // 154 // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most 155 // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and 156 // allows us to easily apply the reduction identity. 157 // 158 // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 159 // r4 < 5 × 2⁵² × 2⁵² 160 // r4 < 2¹⁰⁷ 161 // 162 163 c0 := shiftRightBy51(r0) 164 c1 := shiftRightBy51(r1) 165 c2 := shiftRightBy51(r2) 166 c3 := shiftRightBy51(r3) 167 c4 := shiftRightBy51(r4) 168 169 rr0 := r0.lo&maskLow51Bits + mul19(c4) 170 rr1 := r1.lo&maskLow51Bits + c0 171 rr2 := r2.lo&maskLow51Bits + c1 172 rr3 := r3.lo&maskLow51Bits + c2 173 rr4 := r4.lo&maskLow51Bits + c3 174 175 // Now all coefficients fit into 64-bit registers but are still too large to 176 // be passed around as an Element. We therefore do one last carry chain, 177 // where the carries will be small enough to fit in the wiggle room above 2⁵¹. 178 179 v.l0 = rr0&maskLow51Bits + mul19(rr4>>51) 180 v.l1 = rr1&maskLow51Bits + rr0>>51 181 v.l2 = rr2&maskLow51Bits + rr1>>51 182 v.l3 = rr3&maskLow51Bits + rr2>>51 183 v.l4 = rr4&maskLow51Bits + rr3>>51 184 } 185 186 func feSquareGeneric(v, a *Element) { 187 l0 := a.l0 188 l1 := a.l1 189 l2 := a.l2 190 l3 := a.l3 191 l4 := a.l4 192 193 // Squaring works precisely like multiplication above, but thanks to its 194 // symmetry we get to group a few terms together. 195 // 196 // l4 l3 l2 l1 l0 x 197 // l4 l3 l2 l1 l0 = 198 // ------------------------ 199 // l4l0 l3l0 l2l0 l1l0 l0l0 + 200 // l4l1 l3l1 l2l1 l1l1 l0l1 + 201 // l4l2 l3l2 l2l2 l1l2 l0l2 + 202 // l4l3 l3l3 l2l3 l1l3 l0l3 + 203 // l4l4 l3l4 l2l4 l1l4 l0l4 = 204 // ---------------------------------------------- 205 // r8 r7 r6 r5 r4 r3 r2 r1 r0 206 // 207 // l4l0 l3l0 l2l0 l1l0 l0l0 + 208 // l3l1 l2l1 l1l1 l0l1 19×l4l1 + 209 // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + 210 // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + 211 // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = 212 // -------------------------------------- 213 // r4 r3 r2 r1 r0 214 215 // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) 216 r0 := mul(l0, l0) 217 r0 = addMul38(r0, l1, l4) 218 r0 = addMul38(r0, l2, l3) 219 220 // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 221 r1 := mul(l0*2, l1) 222 r1 = addMul38(r1, l2, l4) 223 r1 = addMul19(r1, l3, l3) 224 225 // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 226 r2 := mul(l0*2, l2) 227 r2 = addMul(r2, l1, l1) 228 r2 = addMul38(r2, l3, l4) 229 230 // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 231 r3 := mul(l0*2, l3) 232 r3 = addMul(r3, l1*2, l2) 233 r3 = addMul19(r3, l4, l4) 234 235 // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 236 r4 := mul(l0*2, l4) 237 r4 = addMul(r4, l1*2, l3) 238 r4 = addMul(r4, l2, l2) 239 240 c0 := shiftRightBy51(r0) 241 c1 := shiftRightBy51(r1) 242 c2 := shiftRightBy51(r2) 243 c3 := shiftRightBy51(r3) 244 c4 := shiftRightBy51(r4) 245 246 rr0 := r0.lo&maskLow51Bits + mul19(c4) 247 rr1 := r1.lo&maskLow51Bits + c0 248 rr2 := r2.lo&maskLow51Bits + c1 249 rr3 := r3.lo&maskLow51Bits + c2 250 rr4 := r4.lo&maskLow51Bits + c3 251 252 v.l0 = rr0&maskLow51Bits + mul19(rr4>>51) 253 v.l1 = rr1&maskLow51Bits + rr0>>51 254 v.l2 = rr2&maskLow51Bits + rr1>>51 255 v.l3 = rr3&maskLow51Bits + rr2>>51 256 v.l4 = rr4&maskLow51Bits + rr3>>51 257 } 258 259 // carryPropagate brings the limbs below 52 bits by applying the reduction 260 // identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. 261 func (v *Element) carryPropagate() *Element { 262 // (l4>>51) is at most 64 - 51 = 13 bits, so (l4>>51)*19 is at most 18 bits, and 263 // the final l0 will be at most 52 bits. Similarly for the rest. 264 l0 := v.l0 265 v.l0 = v.l0&maskLow51Bits + mul19(v.l4>>51) 266 v.l4 = v.l4&maskLow51Bits + v.l3>>51 267 v.l3 = v.l3&maskLow51Bits + v.l2>>51 268 v.l2 = v.l2&maskLow51Bits + v.l1>>51 269 v.l1 = v.l1&maskLow51Bits + l0>>51 270 271 return v 272 } 273