ark_ff/fields/models/fp/
mod.rs

1use core::iter;
2
3use ark_serialize::{
4    buffer_byte_size, CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize,
5    CanonicalSerializeWithFlags, Compress, EmptyFlags, Flags, SerializationError, Valid, Validate,
6};
7use ark_std::{
8    cmp::{Ord, Ordering, PartialOrd},
9    fmt::{Display, Formatter, Result as FmtResult},
10    marker::PhantomData,
11    ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign},
12    str::FromStr,
13    string::ToString,
14    One, Zero,
15};
16
17#[macro_use]
18mod montgomery_backend;
19pub use montgomery_backend::*;
20
21use crate::{BigInt, BigInteger, FftField, Field, LegendreSymbol, PrimeField, SqrtPrecomputation};
22/// A trait that specifies the configuration of a prime field.
23/// Also specifies how to perform arithmetic on field elements.
24pub trait FpConfig<const N: usize>: Send + Sync + 'static + Sized {
25    /// The modulus of the field.
26    const MODULUS: crate::BigInt<N>;
27
28    /// A multiplicative generator of the field.
29    /// `Self::GENERATOR` is an element having multiplicative order
30    /// `Self::MODULUS - 1`.
31    const GENERATOR: Fp<Self, N>;
32
33    /// Additive identity of the field, i.e. the element `e`
34    /// such that, for all elements `f` of the field, `e + f = f`.
35    const ZERO: Fp<Self, N>;
36
37    /// Multiplicative identity of the field, i.e. the element `e`
38    /// such that, for all elements `f` of the field, `e * f = f`.
39    const ONE: Fp<Self, N>;
40
41    /// Let `N` be the size of the multiplicative group defined by the field.
42    /// Then `TWO_ADICITY` is the two-adicity of `N`, i.e. the integer `s`
43    /// such that `N = 2^s * t` for some odd integer `t`.
44    const TWO_ADICITY: u32;
45
46    /// 2^s root of unity computed by GENERATOR^t
47    const TWO_ADIC_ROOT_OF_UNITY: Fp<Self, N>;
48
49    /// An integer `b` such that there exists a multiplicative subgroup
50    /// of size `b^k` for some integer `k`.
51    const SMALL_SUBGROUP_BASE: Option<u32> = None;
52
53    /// The integer `k` such that there exists a multiplicative subgroup
54    /// of size `Self::SMALL_SUBGROUP_BASE^k`.
55    const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = None;
56
57    /// GENERATOR^((MODULUS-1) / (2^s *
58    /// SMALL_SUBGROUP_BASE^SMALL_SUBGROUP_BASE_ADICITY)) Used for mixed-radix
59    /// FFT.
60    const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Fp<Self, N>> = None;
61
62    /// Precomputed material for use when computing square roots.
63    /// Currently uses the generic Tonelli-Shanks,
64    /// which works for every modulus.
65    const SQRT_PRECOMP: Option<SqrtPrecomputation<Fp<Self, N>>>;
66
67    /// Set a += b.
68    fn add_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
69
70    /// Set a -= b.
71    fn sub_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
72
73    /// Set a = a + a.
74    fn double_in_place(a: &mut Fp<Self, N>);
75
76    /// Set a = -a;
77    fn neg_in_place(a: &mut Fp<Self, N>);
78
79    /// Set a *= b.
80    fn mul_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
81
82    /// Compute the inner product `<a, b>`.
83    fn sum_of_products<const T: usize>(a: &[Fp<Self, N>; T], b: &[Fp<Self, N>; T]) -> Fp<Self, N>;
84
85    /// Set a *= b.
86    fn square_in_place(a: &mut Fp<Self, N>);
87
88    /// Compute a^{-1} if `a` is not zero.
89    fn inverse(a: &Fp<Self, N>) -> Option<Fp<Self, N>>;
90
91    /// Construct a field element from an integer in the range
92    /// `0..(Self::MODULUS - 1)`. Returns `None` if the integer is outside
93    /// this range.
94    fn from_bigint(other: BigInt<N>) -> Option<Fp<Self, N>>;
95
96    /// Convert a field element to an integer in the range `0..(Self::MODULUS -
97    /// 1)`.
98    fn into_bigint(other: Fp<Self, N>) -> BigInt<N>;
99}
100
101/// Represents an element of the prime field F_p, where `p == P::MODULUS`.
102/// This type can represent elements in any field of size at most N * 64 bits.
103#[derive(Derivative)]
104#[derivative(
105    Default(bound = ""),
106    Hash(bound = ""),
107    Clone(bound = ""),
108    Copy(bound = ""),
109    PartialEq(bound = ""),
110    Eq(bound = "")
111)]
112pub struct Fp<P: FpConfig<N>, const N: usize>(
113    pub BigInt<N>,
114    #[derivative(Debug = "ignore")]
115    #[doc(hidden)]
116    pub PhantomData<P>,
117);
118
119pub type Fp64<P> = Fp<P, 1>;
120pub type Fp128<P> = Fp<P, 2>;
121pub type Fp192<P> = Fp<P, 3>;
122pub type Fp256<P> = Fp<P, 4>;
123pub type Fp320<P> = Fp<P, 5>;
124pub type Fp384<P> = Fp<P, 6>;
125pub type Fp448<P> = Fp<P, 7>;
126pub type Fp512<P> = Fp<P, 8>;
127pub type Fp576<P> = Fp<P, 9>;
128pub type Fp640<P> = Fp<P, 10>;
129pub type Fp704<P> = Fp<P, 11>;
130pub type Fp768<P> = Fp<P, 12>;
131pub type Fp832<P> = Fp<P, 13>;
132
133impl<P: FpConfig<N>, const N: usize> Fp<P, N> {
134    #[doc(hidden)]
135    #[inline]
136    pub fn is_geq_modulus(&self) -> bool {
137        self.0 >= P::MODULUS
138    }
139
140    #[inline]
141    fn subtract_modulus(&mut self) {
142        if self.is_geq_modulus() {
143            self.0.sub_with_borrow(&Self::MODULUS);
144        }
145    }
146
147    #[inline]
148    fn subtract_modulus_with_carry(&mut self, carry: bool) {
149        if carry || self.is_geq_modulus() {
150            self.0.sub_with_borrow(&Self::MODULUS);
151        }
152    }
153
154    fn num_bits_to_shave() -> usize {
155        64 * N - (Self::MODULUS_BIT_SIZE as usize)
156    }
157}
158
159impl<P: FpConfig<N>, const N: usize> ark_std::fmt::Debug for Fp<P, N> {
160    fn fmt(&self, f: &mut Formatter<'_>) -> ark_std::fmt::Result {
161        ark_std::fmt::Debug::fmt(&self.into_bigint(), f)
162    }
163}
164
165impl<P: FpConfig<N>, const N: usize> Zero for Fp<P, N> {
166    #[inline]
167    fn zero() -> Self {
168        P::ZERO
169    }
170
171    #[inline]
172    fn is_zero(&self) -> bool {
173        *self == P::ZERO
174    }
175}
176
177impl<P: FpConfig<N>, const N: usize> One for Fp<P, N> {
178    #[inline]
179    fn one() -> Self {
180        P::ONE
181    }
182
183    #[inline]
184    fn is_one(&self) -> bool {
185        *self == P::ONE
186    }
187}
188
189impl<P: FpConfig<N>, const N: usize> Field for Fp<P, N> {
190    type BasePrimeField = Self;
191    type BasePrimeFieldIter = iter::Once<Self::BasePrimeField>;
192
193    const SQRT_PRECOMP: Option<SqrtPrecomputation<Self>> = P::SQRT_PRECOMP;
194    const ZERO: Self = P::ZERO;
195    const ONE: Self = P::ONE;
196
197    fn extension_degree() -> u64 {
198        1
199    }
200
201    fn from_base_prime_field(elem: Self::BasePrimeField) -> Self {
202        elem
203    }
204
205    fn to_base_prime_field_elements(&self) -> Self::BasePrimeFieldIter {
206        iter::once(*self)
207    }
208
209    fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option<Self> {
210        if elems.len() != (Self::extension_degree() as usize) {
211            return None;
212        }
213        Some(elems[0])
214    }
215
216    #[inline]
217    fn double(&self) -> Self {
218        let mut temp = *self;
219        temp.double_in_place();
220        temp
221    }
222
223    #[inline]
224    fn double_in_place(&mut self) -> &mut Self {
225        P::double_in_place(self);
226        self
227    }
228
229    #[inline]
230    fn neg_in_place(&mut self) -> &mut Self {
231        P::neg_in_place(self);
232        self
233    }
234
235    #[inline]
236    fn characteristic() -> &'static [u64] {
237        P::MODULUS.as_ref()
238    }
239
240    #[inline]
241    fn sum_of_products<const T: usize>(a: &[Self; T], b: &[Self; T]) -> Self {
242        P::sum_of_products(a, b)
243    }
244
245    #[inline]
246    fn from_random_bytes_with_flags<F: Flags>(bytes: &[u8]) -> Option<(Self, F)> {
247        if F::BIT_SIZE > 8 {
248            None
249        } else {
250            let shave_bits = Self::num_bits_to_shave();
251            let mut result_bytes = crate::const_helpers::SerBuffer::<N>::zeroed();
252            // Copy the input into a temporary buffer.
253            result_bytes.copy_from_u8_slice(bytes);
254            // This mask retains everything in the last limb
255            // that is below `P::MODULUS_BIT_SIZE`.
256            let last_limb_mask =
257                (u64::MAX.checked_shr(shave_bits as u32).unwrap_or(0)).to_le_bytes();
258            let mut last_bytes_mask = [0u8; 9];
259            last_bytes_mask[..8].copy_from_slice(&last_limb_mask);
260
261            // Length of the buffer containing the field element and the flag.
262            let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
263            // Location of the flag is the last byte of the serialized
264            // form of the field element.
265            let flag_location = output_byte_size - 1;
266
267            // At which byte is the flag located in the last limb?
268            let flag_location_in_last_limb = flag_location.saturating_sub(8 * (N - 1));
269
270            // Take all but the last 9 bytes.
271            let last_bytes = result_bytes.last_n_plus_1_bytes_mut();
272
273            // The mask only has the last `F::BIT_SIZE` bits set
274            let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0);
275
276            // Mask away the remaining bytes, and try to reconstruct the
277            // flag
278            let mut flags: u8 = 0;
279            for (i, (b, m)) in last_bytes.zip(&last_bytes_mask).enumerate() {
280                if i == flag_location_in_last_limb {
281                    flags = *b & flags_mask
282                }
283                *b &= m;
284            }
285            Self::deserialize_compressed(&result_bytes.as_slice()[..(N * 8)])
286                .ok()
287                .and_then(|f| F::from_u8(flags).map(|flag| (f, flag)))
288        }
289    }
290
291    #[inline]
292    fn square(&self) -> Self {
293        let mut temp = *self;
294        temp.square_in_place();
295        temp
296    }
297
298    fn square_in_place(&mut self) -> &mut Self {
299        P::square_in_place(self);
300        self
301    }
302
303    #[inline]
304    fn inverse(&self) -> Option<Self> {
305        P::inverse(self)
306    }
307
308    fn inverse_in_place(&mut self) -> Option<&mut Self> {
309        if let Some(inverse) = self.inverse() {
310            *self = inverse;
311            Some(self)
312        } else {
313            None
314        }
315    }
316
317    /// The Frobenius map has no effect in a prime field.
318    #[inline]
319    fn frobenius_map_in_place(&mut self, _: usize) {}
320
321    #[inline]
322    fn legendre(&self) -> LegendreSymbol {
323        use crate::fields::LegendreSymbol::*;
324
325        // s = self^((MODULUS - 1) // 2)
326        let s = self.pow(Self::MODULUS_MINUS_ONE_DIV_TWO);
327        if s.is_zero() {
328            Zero
329        } else if s.is_one() {
330            QuadraticResidue
331        } else {
332            QuadraticNonResidue
333        }
334    }
335}
336
337impl<P: FpConfig<N>, const N: usize> PrimeField for Fp<P, N> {
338    type BigInt = BigInt<N>;
339    const MODULUS: Self::BigInt = P::MODULUS;
340    const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt = P::MODULUS.divide_by_2_round_down();
341    const MODULUS_BIT_SIZE: u32 = P::MODULUS.const_num_bits();
342    const TRACE: Self::BigInt = P::MODULUS.two_adic_coefficient();
343    const TRACE_MINUS_ONE_DIV_TWO: Self::BigInt = Self::TRACE.divide_by_2_round_down();
344
345    #[inline]
346    fn from_bigint(r: BigInt<N>) -> Option<Self> {
347        P::from_bigint(r)
348    }
349
350    fn into_bigint(self) -> BigInt<N> {
351        P::into_bigint(self)
352    }
353}
354
355impl<P: FpConfig<N>, const N: usize> FftField for Fp<P, N> {
356    const GENERATOR: Self = P::GENERATOR;
357    const TWO_ADICITY: u32 = P::TWO_ADICITY;
358    const TWO_ADIC_ROOT_OF_UNITY: Self = P::TWO_ADIC_ROOT_OF_UNITY;
359    const SMALL_SUBGROUP_BASE: Option<u32> = P::SMALL_SUBGROUP_BASE;
360    const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = P::SMALL_SUBGROUP_BASE_ADICITY;
361    const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Self> = P::LARGE_SUBGROUP_ROOT_OF_UNITY;
362}
363
364/// Note that this implementation of `Ord` compares field elements viewing
365/// them as integers in the range 0, 1, ..., P::MODULUS - 1. However, other
366/// implementations of `PrimeField` might choose a different ordering, and
367/// as such, users should use this `Ord` for applications where
368/// any ordering suffices (like in a BTreeMap), and not in applications
369/// where a particular ordering is required.
370impl<P: FpConfig<N>, const N: usize> Ord for Fp<P, N> {
371    #[inline(always)]
372    fn cmp(&self, other: &Self) -> Ordering {
373        self.into_bigint().cmp(&other.into_bigint())
374    }
375}
376
377/// Note that this implementation of `PartialOrd` compares field elements
378/// viewing them as integers in the range 0, 1, ..., `P::MODULUS` - 1. However,
379/// other implementations of `PrimeField` might choose a different ordering, and
380/// as such, users should use this `PartialOrd` for applications where
381/// any ordering suffices (like in a BTreeMap), and not in applications
382/// where a particular ordering is required.
383impl<P: FpConfig<N>, const N: usize> PartialOrd for Fp<P, N> {
384    #[inline(always)]
385    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
386        Some(self.cmp(other))
387    }
388}
389
390impl<P: FpConfig<N>, const N: usize> From<u128> for Fp<P, N> {
391    fn from(mut other: u128) -> Self {
392        let mut result = BigInt::default();
393        if N == 1 {
394            result.0[0] = (other % u128::from(P::MODULUS.0[0])) as u64;
395        } else if N == 2 || P::MODULUS.0[2..].iter().all(|&x| x == 0) {
396            let mod_as_u128 = P::MODULUS.0[0] as u128 + ((P::MODULUS.0[1] as u128) << 64);
397            other %= mod_as_u128;
398            result.0[0] = ((other << 64) >> 64) as u64;
399            result.0[1] = (other >> 64) as u64;
400        } else {
401            result.0[0] = ((other << 64) >> 64) as u64;
402            result.0[1] = (other >> 64) as u64;
403        }
404        Self::from_bigint(result).unwrap()
405    }
406}
407
408impl<P: FpConfig<N>, const N: usize> From<i128> for Fp<P, N> {
409    fn from(other: i128) -> Self {
410        let abs = Self::from(other.unsigned_abs());
411        if other.is_positive() {
412            abs
413        } else {
414            -abs
415        }
416    }
417}
418
419impl<P: FpConfig<N>, const N: usize> From<bool> for Fp<P, N> {
420    fn from(other: bool) -> Self {
421        if N == 1 {
422            Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
423        } else {
424            Self::from_bigint(BigInt::from(u64::from(other))).unwrap()
425        }
426    }
427}
428
429impl<P: FpConfig<N>, const N: usize> From<u64> for Fp<P, N> {
430    fn from(other: u64) -> Self {
431        if N == 1 {
432            Self::from_bigint(BigInt::from(other % P::MODULUS.0[0])).unwrap()
433        } else {
434            Self::from_bigint(BigInt::from(other)).unwrap()
435        }
436    }
437}
438
439impl<P: FpConfig<N>, const N: usize> From<i64> for Fp<P, N> {
440    fn from(other: i64) -> Self {
441        let abs = Self::from(other.unsigned_abs());
442        if other.is_positive() {
443            abs
444        } else {
445            -abs
446        }
447    }
448}
449
450impl<P: FpConfig<N>, const N: usize> From<u32> for Fp<P, N> {
451    fn from(other: u32) -> Self {
452        if N == 1 {
453            Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
454        } else {
455            Self::from_bigint(BigInt::from(other)).unwrap()
456        }
457    }
458}
459
460impl<P: FpConfig<N>, const N: usize> From<i32> for Fp<P, N> {
461    fn from(other: i32) -> Self {
462        let abs = Self::from(other.unsigned_abs());
463        if other.is_positive() {
464            abs
465        } else {
466            -abs
467        }
468    }
469}
470
471impl<P: FpConfig<N>, const N: usize> From<u16> for Fp<P, N> {
472    fn from(other: u16) -> Self {
473        if N == 1 {
474            Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
475        } else {
476            Self::from_bigint(BigInt::from(other)).unwrap()
477        }
478    }
479}
480
481impl<P: FpConfig<N>, const N: usize> From<i16> for Fp<P, N> {
482    fn from(other: i16) -> Self {
483        let abs = Self::from(other.unsigned_abs());
484        if other.is_positive() {
485            abs
486        } else {
487            -abs
488        }
489    }
490}
491
492impl<P: FpConfig<N>, const N: usize> From<u8> for Fp<P, N> {
493    fn from(other: u8) -> Self {
494        if N == 1 {
495            Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
496        } else {
497            Self::from_bigint(BigInt::from(other)).unwrap()
498        }
499    }
500}
501
502impl<P: FpConfig<N>, const N: usize> From<i8> for Fp<P, N> {
503    fn from(other: i8) -> Self {
504        let abs = Self::from(other.unsigned_abs());
505        if other.is_positive() {
506            abs
507        } else {
508            -abs
509        }
510    }
511}
512
513impl<P: FpConfig<N>, const N: usize> ark_std::rand::distributions::Distribution<Fp<P, N>>
514    for ark_std::rand::distributions::Standard
515{
516    #[inline]
517    fn sample<R: ark_std::rand::Rng + ?Sized>(&self, rng: &mut R) -> Fp<P, N> {
518        loop {
519            let mut tmp = Fp(
520                rng.sample(ark_std::rand::distributions::Standard),
521                PhantomData,
522            );
523            let shave_bits = Fp::<P, N>::num_bits_to_shave();
524            // Mask away the unused bits at the beginning.
525            assert!(shave_bits <= 64);
526            let mask = if shave_bits == 64 {
527                0
528            } else {
529                u64::MAX >> shave_bits
530            };
531
532            if let Some(val) = tmp.0 .0.last_mut() {
533                *val &= mask
534            }
535
536            if !tmp.is_geq_modulus() {
537                return tmp;
538            }
539        }
540    }
541}
542
543impl<P: FpConfig<N>, const N: usize> CanonicalSerializeWithFlags for Fp<P, N> {
544    fn serialize_with_flags<W: ark_std::io::Write, F: Flags>(
545        &self,
546        writer: W,
547        flags: F,
548    ) -> Result<(), SerializationError> {
549        // All reasonable `Flags` should be less than 8 bits in size
550        // (256 values are enough for anyone!)
551        if F::BIT_SIZE > 8 {
552            return Err(SerializationError::NotEnoughSpace);
553        }
554
555        // Calculate the number of bytes required to represent a field element
556        // serialized with `flags`. If `F::BIT_SIZE < 8`,
557        // this is at most `N * 8 + 1`
558        let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
559
560        // Write out `self` to a temporary buffer.
561        // The size of the buffer is $byte_size + 1 because `F::BIT_SIZE`
562        // is at most 8 bits.
563        let mut bytes = crate::const_helpers::SerBuffer::zeroed();
564        bytes.copy_from_u64_slice(&self.into_bigint().0);
565        // Mask out the bits of the last byte that correspond to the flag.
566        bytes[output_byte_size - 1] |= flags.u8_bitmask();
567
568        bytes.write_up_to(writer, output_byte_size)?;
569        Ok(())
570    }
571
572    // Let `m = 8 * n` for some `n` be the smallest multiple of 8 greater
573    // than `P::MODULUS_BIT_SIZE`.
574    // If `(m - P::MODULUS_BIT_SIZE) >= F::BIT_SIZE` , then this method returns `n`;
575    // otherwise, it returns `n + 1`.
576    fn serialized_size_with_flags<F: Flags>(&self) -> usize {
577        buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE)
578    }
579}
580
581impl<P: FpConfig<N>, const N: usize> CanonicalSerialize for Fp<P, N> {
582    #[inline]
583    fn serialize_with_mode<W: ark_std::io::Write>(
584        &self,
585        writer: W,
586        _compress: Compress,
587    ) -> Result<(), SerializationError> {
588        self.serialize_with_flags(writer, EmptyFlags)
589    }
590
591    #[inline]
592    fn serialized_size(&self, _compress: Compress) -> usize {
593        self.serialized_size_with_flags::<EmptyFlags>()
594    }
595}
596
597impl<P: FpConfig<N>, const N: usize> CanonicalDeserializeWithFlags for Fp<P, N> {
598    fn deserialize_with_flags<R: ark_std::io::Read, F: Flags>(
599        reader: R,
600    ) -> Result<(Self, F), SerializationError> {
601        // All reasonable `Flags` should be less than 8 bits in size
602        // (256 values are enough for anyone!)
603        if F::BIT_SIZE > 8 {
604            return Err(SerializationError::NotEnoughSpace);
605        }
606        // Calculate the number of bytes required to represent a field element
607        // serialized with `flags`.
608        let output_byte_size = Self::zero().serialized_size_with_flags::<F>();
609
610        let mut masked_bytes = crate::const_helpers::SerBuffer::zeroed();
611        masked_bytes.read_exact_up_to(reader, output_byte_size)?;
612        let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1])
613            .ok_or(SerializationError::UnexpectedFlags)?;
614
615        let self_integer = masked_bytes.to_bigint();
616        Self::from_bigint(self_integer)
617            .map(|v| (v, flags))
618            .ok_or(SerializationError::InvalidData)
619    }
620}
621
622impl<P: FpConfig<N>, const N: usize> Valid for Fp<P, N> {
623    fn check(&self) -> Result<(), SerializationError> {
624        Ok(())
625    }
626}
627
628impl<P: FpConfig<N>, const N: usize> CanonicalDeserialize for Fp<P, N> {
629    fn deserialize_with_mode<R: ark_std::io::Read>(
630        reader: R,
631        _compress: Compress,
632        _validate: Validate,
633    ) -> Result<Self, SerializationError> {
634        Self::deserialize_with_flags::<R, EmptyFlags>(reader).map(|(r, _)| r)
635    }
636}
637
638impl<P: FpConfig<N>, const N: usize> FromStr for Fp<P, N> {
639    type Err = ();
640
641    /// Interpret a string of numbers as a (congruent) prime field element.
642    /// Does not accept unnecessary leading zeroes or a blank string.
643    fn from_str(s: &str) -> Result<Self, Self::Err> {
644        if s.is_empty() {
645            return Err(());
646        }
647
648        if s == "0" {
649            return Ok(Self::zero());
650        }
651
652        let mut res = Self::zero();
653
654        let ten = Self::from(BigInt::from(10u8));
655
656        let mut first_digit = true;
657
658        for c in s.chars() {
659            match c.to_digit(10) {
660                Some(c) => {
661                    if first_digit {
662                        if c == 0 {
663                            return Err(());
664                        }
665
666                        first_digit = false;
667                    }
668
669                    res.mul_assign(&ten);
670                    let digit = Self::from(u64::from(c));
671                    res.add_assign(&digit);
672                },
673                None => {
674                    return Err(());
675                },
676            }
677        }
678        if res.is_geq_modulus() {
679            Err(())
680        } else {
681            Ok(res)
682        }
683    }
684}
685
686/// Outputs a string containing the value of `self`,
687/// represented as a decimal without leading zeroes.
688impl<P: FpConfig<N>, const N: usize> Display for Fp<P, N> {
689    #[inline]
690    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
691        let string = self.into_bigint().to_string();
692        write!(f, "{}", string.trim_start_matches('0'))
693    }
694}
695
696impl<P: FpConfig<N>, const N: usize> Neg for Fp<P, N> {
697    type Output = Self;
698    #[inline]
699    #[must_use]
700    fn neg(mut self) -> Self {
701        P::neg_in_place(&mut self);
702        self
703    }
704}
705
706impl<'a, P: FpConfig<N>, const N: usize> Add<&'a Fp<P, N>> for Fp<P, N> {
707    type Output = Self;
708
709    #[inline]
710    fn add(mut self, other: &Self) -> Self {
711        self.add_assign(other);
712        self
713    }
714}
715
716impl<'a, P: FpConfig<N>, const N: usize> Sub<&'a Fp<P, N>> for Fp<P, N> {
717    type Output = Self;
718
719    #[inline]
720    fn sub(mut self, other: &Self) -> Self {
721        self.sub_assign(other);
722        self
723    }
724}
725
726impl<'a, P: FpConfig<N>, const N: usize> Mul<&'a Fp<P, N>> for Fp<P, N> {
727    type Output = Self;
728
729    #[inline]
730    fn mul(mut self, other: &Self) -> Self {
731        self.mul_assign(other);
732        self
733    }
734}
735
736impl<'a, P: FpConfig<N>, const N: usize> Div<&'a Fp<P, N>> for Fp<P, N> {
737    type Output = Self;
738
739    /// Returns `self * other.inverse()` if `other.inverse()` is `Some`, and
740    /// panics otherwise.
741    #[inline]
742    fn div(mut self, other: &Self) -> Self {
743        self.mul_assign(&other.inverse().unwrap());
744        self
745    }
746}
747
748impl<'a, 'b, P: FpConfig<N>, const N: usize> Add<&'b Fp<P, N>> for &'a Fp<P, N> {
749    type Output = Fp<P, N>;
750
751    #[inline]
752    fn add(self, other: &'b Fp<P, N>) -> Fp<P, N> {
753        let mut result = *self;
754        result.add_assign(other);
755        result
756    }
757}
758
759impl<'a, 'b, P: FpConfig<N>, const N: usize> Sub<&'b Fp<P, N>> for &'a Fp<P, N> {
760    type Output = Fp<P, N>;
761
762    #[inline]
763    fn sub(self, other: &Fp<P, N>) -> Fp<P, N> {
764        let mut result = *self;
765        result.sub_assign(other);
766        result
767    }
768}
769
770impl<'a, 'b, P: FpConfig<N>, const N: usize> Mul<&'b Fp<P, N>> for &'a Fp<P, N> {
771    type Output = Fp<P, N>;
772
773    #[inline]
774    fn mul(self, other: &Fp<P, N>) -> Fp<P, N> {
775        let mut result = *self;
776        result.mul_assign(other);
777        result
778    }
779}
780
781impl<'a, 'b, P: FpConfig<N>, const N: usize> Div<&'b Fp<P, N>> for &'a Fp<P, N> {
782    type Output = Fp<P, N>;
783
784    #[inline]
785    fn div(self, other: &Fp<P, N>) -> Fp<P, N> {
786        let mut result = *self;
787        result.div_assign(other);
788        result
789    }
790}
791
792impl<'a, P: FpConfig<N>, const N: usize> AddAssign<&'a Self> for Fp<P, N> {
793    #[inline]
794    fn add_assign(&mut self, other: &Self) {
795        P::add_assign(self, other)
796    }
797}
798
799impl<'a, P: FpConfig<N>, const N: usize> SubAssign<&'a Self> for Fp<P, N> {
800    #[inline]
801    fn sub_assign(&mut self, other: &Self) {
802        P::sub_assign(self, other);
803    }
804}
805
806#[allow(unused_qualifications)]
807impl<P: FpConfig<N>, const N: usize> core::ops::Add<Self> for Fp<P, N> {
808    type Output = Self;
809
810    #[inline]
811    fn add(mut self, other: Self) -> Self {
812        self.add_assign(&other);
813        self
814    }
815}
816
817#[allow(unused_qualifications)]
818impl<'a, P: FpConfig<N>, const N: usize> core::ops::Add<&'a mut Self> for Fp<P, N> {
819    type Output = Self;
820
821    #[inline]
822    fn add(mut self, other: &'a mut Self) -> Self {
823        self.add_assign(&*other);
824        self
825    }
826}
827
828#[allow(unused_qualifications)]
829impl<P: FpConfig<N>, const N: usize> core::ops::Sub<Self> for Fp<P, N> {
830    type Output = Self;
831
832    #[inline]
833    fn sub(mut self, other: Self) -> Self {
834        self.sub_assign(&other);
835        self
836    }
837}
838
839#[allow(unused_qualifications)]
840impl<'a, P: FpConfig<N>, const N: usize> core::ops::Sub<&'a mut Self> for Fp<P, N> {
841    type Output = Self;
842
843    #[inline]
844    fn sub(mut self, other: &'a mut Self) -> Self {
845        self.sub_assign(&*other);
846        self
847    }
848}
849
850#[allow(unused_qualifications)]
851impl<P: FpConfig<N>, const N: usize> core::iter::Sum<Self> for Fp<P, N> {
852    fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
853        iter.fold(Self::zero(), core::ops::Add::add)
854    }
855}
856
857#[allow(unused_qualifications)]
858impl<'a, P: FpConfig<N>, const N: usize> core::iter::Sum<&'a Self> for Fp<P, N> {
859    fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
860        iter.fold(Self::zero(), core::ops::Add::add)
861    }
862}
863
864#[allow(unused_qualifications)]
865impl<P: FpConfig<N>, const N: usize> core::ops::AddAssign<Self> for Fp<P, N> {
866    #[inline(always)]
867    fn add_assign(&mut self, other: Self) {
868        self.add_assign(&other)
869    }
870}
871
872#[allow(unused_qualifications)]
873impl<P: FpConfig<N>, const N: usize> core::ops::SubAssign<Self> for Fp<P, N> {
874    #[inline(always)]
875    fn sub_assign(&mut self, other: Self) {
876        self.sub_assign(&other)
877    }
878}
879
880#[allow(unused_qualifications)]
881impl<'a, P: FpConfig<N>, const N: usize> core::ops::AddAssign<&'a mut Self> for Fp<P, N> {
882    #[inline(always)]
883    fn add_assign(&mut self, other: &'a mut Self) {
884        self.add_assign(&*other)
885    }
886}
887
888#[allow(unused_qualifications)]
889impl<'a, P: FpConfig<N>, const N: usize> core::ops::SubAssign<&'a mut Self> for Fp<P, N> {
890    #[inline(always)]
891    fn sub_assign(&mut self, other: &'a mut Self) {
892        self.sub_assign(&*other)
893    }
894}
895
896impl<'a, P: FpConfig<N>, const N: usize> MulAssign<&'a Self> for Fp<P, N> {
897    fn mul_assign(&mut self, other: &Self) {
898        P::mul_assign(self, other)
899    }
900}
901
902/// Computes `self *= other.inverse()` if `other.inverse()` is `Some`, and
903/// panics otherwise.
904impl<'a, P: FpConfig<N>, const N: usize> DivAssign<&'a Self> for Fp<P, N> {
905    #[inline(always)]
906    fn div_assign(&mut self, other: &Self) {
907        self.mul_assign(&other.inverse().unwrap());
908    }
909}
910
911#[allow(unused_qualifications)]
912impl<P: FpConfig<N>, const N: usize> core::ops::Mul<Self> for Fp<P, N> {
913    type Output = Self;
914
915    #[inline(always)]
916    fn mul(mut self, other: Self) -> Self {
917        self.mul_assign(&other);
918        self
919    }
920}
921
922#[allow(unused_qualifications)]
923impl<P: FpConfig<N>, const N: usize> core::ops::Div<Self> for Fp<P, N> {
924    type Output = Self;
925
926    #[inline(always)]
927    fn div(mut self, other: Self) -> Self {
928        self.div_assign(&other);
929        self
930    }
931}
932
933#[allow(unused_qualifications)]
934impl<'a, P: FpConfig<N>, const N: usize> core::ops::Mul<&'a mut Self> for Fp<P, N> {
935    type Output = Self;
936
937    #[inline(always)]
938    fn mul(mut self, other: &'a mut Self) -> Self {
939        self.mul_assign(&*other);
940        self
941    }
942}
943
944#[allow(unused_qualifications)]
945impl<'a, P: FpConfig<N>, const N: usize> core::ops::Div<&'a mut Self> for Fp<P, N> {
946    type Output = Self;
947
948    #[inline(always)]
949    fn div(mut self, other: &'a mut Self) -> Self {
950        self.div_assign(&*other);
951        self
952    }
953}
954
955#[allow(unused_qualifications)]
956impl<P: FpConfig<N>, const N: usize> core::iter::Product<Self> for Fp<P, N> {
957    fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
958        iter.fold(Self::one(), core::ops::Mul::mul)
959    }
960}
961
962#[allow(unused_qualifications)]
963impl<'a, P: FpConfig<N>, const N: usize> core::iter::Product<&'a Self> for Fp<P, N> {
964    fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
965        iter.fold(Self::one(), Mul::mul)
966    }
967}
968
969#[allow(unused_qualifications)]
970impl<P: FpConfig<N>, const N: usize> core::ops::MulAssign<Self> for Fp<P, N> {
971    #[inline(always)]
972    fn mul_assign(&mut self, other: Self) {
973        self.mul_assign(&other)
974    }
975}
976
977#[allow(unused_qualifications)]
978impl<'a, P: FpConfig<N>, const N: usize> core::ops::DivAssign<&'a mut Self> for Fp<P, N> {
979    #[inline(always)]
980    fn div_assign(&mut self, other: &'a mut Self) {
981        self.div_assign(&*other)
982    }
983}
984
985#[allow(unused_qualifications)]
986impl<'a, P: FpConfig<N>, const N: usize> core::ops::MulAssign<&'a mut Self> for Fp<P, N> {
987    #[inline(always)]
988    fn mul_assign(&mut self, other: &'a mut Self) {
989        self.mul_assign(&*other)
990    }
991}
992
993#[allow(unused_qualifications)]
994impl<P: FpConfig<N>, const N: usize> core::ops::DivAssign<Self> for Fp<P, N> {
995    #[inline(always)]
996    fn div_assign(&mut self, other: Self) {
997        self.div_assign(&other)
998    }
999}
1000
1001impl<P: FpConfig<N>, const N: usize> zeroize::Zeroize for Fp<P, N> {
1002    // The phantom data does not contain element-specific data
1003    // and thus does not need to be zeroized.
1004    fn zeroize(&mut self) {
1005        self.0.zeroize();
1006    }
1007}
1008
1009impl<P: FpConfig<N>, const N: usize> From<num_bigint::BigUint> for Fp<P, N> {
1010    #[inline]
1011    fn from(val: num_bigint::BigUint) -> Fp<P, N> {
1012        Fp::<P, N>::from_le_bytes_mod_order(&val.to_bytes_le())
1013    }
1014}
1015
1016impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for num_bigint::BigUint {
1017    #[inline(always)]
1018    fn from(other: Fp<P, N>) -> Self {
1019        other.into_bigint().into()
1020    }
1021}
1022
1023impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for BigInt<N> {
1024    #[inline(always)]
1025    fn from(fp: Fp<P, N>) -> Self {
1026        fp.into_bigint()
1027    }
1028}
1029
1030impl<P: FpConfig<N>, const N: usize> From<BigInt<N>> for Fp<P, N> {
1031    /// Converts `Self::BigInteger` into `Self`
1032    #[inline(always)]
1033    fn from(int: BigInt<N>) -> Self {
1034        Self::from_bigint(int).unwrap()
1035    }
1036}