Skip to main content

airbender_crypto/secp256k1/scalars/
mod.rs

1use core::ops::{AddAssign, MulAssign};
2
3use crate::k256::FieldBytes;
4use cfg_if::cfg_if;
5
6mod invert;
7
8#[cfg(all(target_pointer_width = "64", not(feature = "bigint_ops")))]
9mod scalar64;
10
11#[cfg(all(target_pointer_width = "32", not(feature = "bigint_ops")))]
12mod scalar32;
13
14#[cfg(any(
15    all(target_arch = "riscv32", feature = "bigint_ops"),
16    test,
17    feature = "proving"
18))]
19pub(crate) mod scalar32_delegation;
20
21cfg_if! {
22    if #[cfg(feature = "bigint_ops")] {
23        use scalar32_delegation::ScalarInner;
24    } else if #[cfg(target_pointer_width = "32")] {
25        use scalar32::ScalarInner;
26    } else if #[cfg(target_pointer_width = "64")] {
27        use scalar64::ScalarInner;
28    }
29}
30
31const ORDER_HEX: &str = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141";
32
33#[derive(Debug, Clone, Copy)]
34pub struct Scalar(pub(crate) ScalarInner);
35
36impl Scalar {
37    pub const ZERO: Self = Self(ScalarInner::ZERO);
38    pub const ONE: Self = Self(ScalarInner::ONE);
39    #[cfg(test)]
40    pub(crate) const ORDER: Self = Self(ScalarInner::ORDER);
41    #[cfg(test)]
42    pub(crate) const MINUS_LAMBDA: Self = Self(ScalarInner::MINUS_LAMBDA);
43
44    #[cfg(test)]
45    pub(crate) const fn from_bytes_unchecked(bytes: &[u8; 32]) -> Self {
46        Self(ScalarInner::from_be_bytes_unchecked(bytes))
47    }
48
49    #[cfg(test)]
50    pub(crate) fn from_u128(n: u128) -> Self {
51        Self(ScalarInner::from_u128(n))
52    }
53
54    pub fn from_signature(signature: &crate::k256::ecdsa::Signature) -> (Self, Self) {
55        let (r, s) = signature.split_scalars();
56        (Self::from_k256_scalar(*r), Self::from_k256_scalar(*s))
57    }
58
59    pub fn to_repr(self) -> FieldBytes {
60        self.0.to_be_bytes().into()
61    }
62
63    #[cfg(test)]
64    pub(crate) fn from_repr(bytes: FieldBytes) -> Self {
65        let bytes: [u8; 32] = bytes.into();
66        Self(ScalarInner::from_be_bytes(&bytes))
67    }
68
69    #[inline(always)]
70    pub fn from_k256_scalar(s: crate::k256::Scalar) -> Self {
71        Self(ScalarInner::from_k256_scalar(s))
72    }
73
74    pub fn decompose(self) -> (Self, Self) {
75        let (k1, k2) = self.0.decompose();
76        (Self(k1), Self(k2))
77    }
78
79    pub fn decompose_128(self) -> (Self, Self) {
80        let (k1, k2) = self.0.decompose_128();
81        (Self(k1), Self(k2))
82    }
83
84    pub fn bits(&self, offset: usize, count: usize) -> u32 {
85        self.0.bits(offset, count)
86    }
87
88    pub fn bits_var(&self, offset: usize, count: usize) -> u32 {
89        self.0.bits_var(offset, count)
90    }
91
92    pub fn is_zero(&self) -> bool {
93        self.0.is_zero()
94    }
95
96    pub fn negate_in_place(&mut self) {
97        self.0.negate_in_place();
98    }
99}
100
101impl MulAssign for Scalar {
102    fn mul_assign(&mut self, rhs: Self) {
103        self.0.mul_in_place(&rhs.0);
104    }
105}
106
107impl MulAssign<&Scalar> for Scalar {
108    fn mul_assign(&mut self, rhs: &Scalar) {
109        self.0.mul_in_place(&rhs.0);
110    }
111}
112
113impl AddAssign for Scalar {
114    fn add_assign(&mut self, rhs: Self) {
115        self.0.add_in_place(&rhs.0);
116    }
117}
118
119impl AddAssign<&Scalar> for Scalar {
120    fn add_assign(&mut self, rhs: &Scalar) {
121        self.0.add_in_place(&rhs.0);
122    }
123}
124
125#[cfg(test)]
126impl proptest::arbitrary::Arbitrary for Scalar {
127    type Parameters = ();
128
129    fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
130        use proptest::prelude::{any, Strategy};
131
132        #[allow(clippy::redundant_closure)]
133        any::<ScalarInner>().prop_map(|inner| Self(inner))
134    }
135
136    type Strategy = proptest::arbitrary::Mapped<ScalarInner, Self>;
137}
138
139#[cfg(test)]
140impl PartialEq for Scalar {
141    fn eq(&self, other: &Self) -> bool {
142        self.0 == other.0
143    }
144}
145
146#[cfg(test)]
147impl PartialOrd for Scalar {
148    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
149        self.0.partial_cmp(&other.0)
150    }
151}
152
153impl core::ops::Neg for Scalar {
154    type Output = Self;
155
156    fn neg(self) -> Self::Output {
157        let mut x = self;
158        x.0.negate_in_place();
159        x
160    }
161}
162
163#[cfg(test)]
164impl core::ops::Mul for Scalar {
165    type Output = Self;
166
167    fn mul(self, rhs: Self) -> Self::Output {
168        let mut lhs = self;
169        lhs.0.mul_in_place(&rhs.0);
170        lhs
171    }
172}
173
174impl core::ops::Add for Scalar {
175    type Output = Self;
176
177    fn add(self, rhs: Self) -> Self::Output {
178        let mut lhs = self;
179        lhs.0.add_in_place(&rhs.0);
180        lhs
181    }
182}
183
184impl core::ops::Sub for Scalar {
185    type Output = Self;
186
187    fn sub(self, rhs: Self) -> Self::Output {
188        self + (-rhs)
189    }
190}
191
192#[cfg(test)]
193mod tests {
194    use super::Scalar;
195    use proptest::{prop_assert, prop_assert_eq, proptest};
196
197    #[test]
198    fn test_zero() {
199        assert_eq!(Scalar::ZERO, Scalar::ORDER);
200        assert!(Scalar::ZERO.is_zero());
201        assert!(Scalar::ORDER.is_zero());
202    }
203
204    #[test]
205    fn test_mul() {
206        proptest!(|(x: Scalar, y: Scalar, z: Scalar)| {
207            prop_assert_eq!(x * y, y * x);
208            prop_assert_eq!((x * y) * z, x * (y * z));
209            prop_assert_eq!(x * Scalar::ONE, x);
210            prop_assert_eq!(x * Scalar::ZERO, Scalar::ZERO);
211
212            prop_assert_eq!(x * (y + z), (x * y) + (x * z));
213        })
214    }
215
216    #[test]
217    fn test_add() {
218        proptest!(|(x: Scalar, y: Scalar, z: Scalar)| {
219            prop_assert_eq!(x + y, y + x);
220            prop_assert_eq!(x + Scalar::ZERO, x);
221            prop_assert_eq!(x + (y + z), (x + y) + z);
222            prop_assert_eq!(x - x, Scalar::ZERO);
223        })
224    }
225
226    #[test]
227    fn test_decompose() {
228        proptest!(|(k: Scalar)| {
229            let (r1, r2) = k.decompose();
230            let lambda = -Scalar::MINUS_LAMBDA;
231
232            #[cfg(feature = "bigint_ops")]
233            {
234                r1 = Scalar(r1.0.to_representation());
235                r2 = Scalar(r2.0.to_representation());
236            }
237
238            prop_assert_eq!(r1 + r2 * lambda, k);
239
240            #[cfg(feature = "bigint_ops")]
241            {
242                r1 = Scalar(r1.0.to_integer());
243                r2 = Scalar(r2.0.to_integer());
244            }
245
246            let bound = Scalar::from_bytes_unchecked(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
247            prop_assert!(r1 < bound || -r1 < bound);
248            prop_assert!(r2 < bound || -r2 < bound);
249        })
250    }
251}