diff --git a/bellman/.gitignore b/bellman/.gitignore new file mode 100644 index 0000000..a9d37c5 --- /dev/null +++ b/bellman/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/bellman/COPYRIGHT b/bellman/COPYRIGHT new file mode 100644 index 0000000..8b5f8cf --- /dev/null +++ b/bellman/COPYRIGHT @@ -0,0 +1,14 @@ +Copyrights in the "bellman" library are retained by their contributors. No +copyright assignment is required to contribute to the "bellman" library. + +The "bellman" library is licensed under either of + + * Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT) + +at your option. + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff --git a/bellman/Cargo.toml b/bellman/Cargo.toml new file mode 100644 index 0000000..3a9105f --- /dev/null +++ b/bellman/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors = ["Sean Bowe "] +description = "zk-SNARK library" +documentation = "https://github.com/ebfull/bellman" +homepage = "https://github.com/ebfull/bellman" +license = "MIT/Apache-2.0" +name = "bellman" +repository = "https://github.com/ebfull/bellman" +version = "0.1.0" + +[dependencies] +rand = "0.4" +bit-vec = "0.4.4" +futures = "0.1" +futures-cpupool = "0.1" +num_cpus = "1" +crossbeam = "0.3" +pairing = "0.14" +byteorder = "1" + +[features] +default = [] diff --git a/bellman/LICENSE-APACHE b/bellman/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/bellman/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/bellman/LICENSE-MIT b/bellman/LICENSE-MIT new file mode 100644 index 0000000..31aa793 --- /dev/null +++ b/bellman/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/bellman/README.md b/bellman/README.md new file mode 100644 index 0000000..659a81c --- /dev/null +++ b/bellman/README.md @@ -0,0 +1,19 @@ +# bellman [![Crates.io](https://img.shields.io/crates/v/bellman.svg)](https://crates.io/crates/bellman) # + +This is a research project being built for [Zcash](https://z.cash/). + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff --git a/bellman/src/domain.rs b/bellman/src/domain.rs new file mode 100644 index 0000000..ff626e5 --- /dev/null +++ b/bellman/src/domain.rs @@ -0,0 +1,494 @@ +//! This module contains an `EvaluationDomain` abstraction for +//! performing various kinds of polynomial arithmetic on top of +//! the scalar field. +//! +//! In pairing-based SNARKs like Groth16, we need to calculate +//! a quotient polynomial over a target polynomial with roots +//! at distinct points associated with each constraint of the +//! constraint system. In order to be efficient, we choose these +//! roots to be the powers of a 2^n root of unity in the field. +//! This allows us to perform polynomial operations in O(n) +//! by performing an O(n log n) FFT over such a domain. + +use pairing::{ + Engine, + Field, + PrimeField, + CurveProjective +}; + +use super::{ + SynthesisError +}; + +use super::multicore::Worker; + +pub struct EvaluationDomain> { + coeffs: Vec, + exp: u32, + omega: E::Fr, + omegainv: E::Fr, + geninv: E::Fr, + minv: E::Fr +} + +impl> EvaluationDomain { + pub fn as_ref(&self) -> &[G] { + &self.coeffs + } + + pub fn as_mut(&mut self) -> &mut [G] { + &mut self.coeffs + } + + pub fn into_coeffs(self) -> Vec { + self.coeffs + } + + pub fn from_coeffs(mut coeffs: Vec) -> Result, SynthesisError> + { + // Compute the size of our evaluation domain + let mut m = 1; + let mut exp = 0; + while m < coeffs.len() { + m *= 2; + exp += 1; + + // The pairing-friendly curve may not be able to support + // large enough (radix2) evaluation domains. + if exp >= E::Fr::S { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + } + + // Compute omega, the 2^exp primitive root of unity + let mut omega = E::Fr::root_of_unity(); + for _ in exp..E::Fr::S { + omega.square(); + } + + // Extend the coeffs vector with zeroes if necessary + coeffs.resize(m, G::group_zero()); + + Ok(EvaluationDomain { + coeffs: coeffs, + exp: exp, + omega: omega, + omegainv: omega.inverse().unwrap(), + geninv: E::Fr::multiplicative_generator().inverse().unwrap(), + minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap() + }) + } + + pub fn fft(&mut self, worker: &Worker) + { + best_fft(&mut self.coeffs, worker, &self.omega, self.exp); + } + + pub fn ifft(&mut self, worker: &Worker) + { + best_fft(&mut self.coeffs, worker, &self.omegainv, self.exp); + + worker.scope(self.coeffs.len(), |scope, chunk| { + let minv = self.minv; + + for v in self.coeffs.chunks_mut(chunk) { + scope.spawn(move || { + for v in v { + v.group_mul_assign(&minv); + } + }); + } + }); + } + + pub fn distribute_powers(&mut self, worker: &Worker, g: E::Fr) + { + worker.scope(self.coeffs.len(), |scope, chunk| { + for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() { + scope.spawn(move || { + let mut u = g.pow(&[(i * chunk) as u64]); + for v in v.iter_mut() { + v.group_mul_assign(&u); + u.mul_assign(&g); + } + }); + } + }); + } + + pub fn coset_fft(&mut self, worker: &Worker) + { + self.distribute_powers(worker, E::Fr::multiplicative_generator()); + self.fft(worker); + } + + pub fn icoset_fft(&mut self, worker: &Worker) + { + let geninv = self.geninv; + + self.ifft(worker); + self.distribute_powers(worker, geninv); + } + + /// This evaluates t(tau) for this domain, which is + /// tau^m - 1 for these radix-2 domains. + pub fn z(&self, tau: &E::Fr) -> E::Fr { + let mut tmp = tau.pow(&[self.coeffs.len() as u64]); + tmp.sub_assign(&E::Fr::one()); + + tmp + } + + /// The target polynomial is the zero polynomial in our + /// evaluation domain, so we must perform division over + /// a coset. + pub fn divide_by_z_on_coset(&mut self, worker: &Worker) + { + let i = self.z(&E::Fr::multiplicative_generator()).inverse().unwrap(); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for v in self.coeffs.chunks_mut(chunk) { + scope.spawn(move || { + for v in v { + v.group_mul_assign(&i); + } + }); + } + }); + } + + /// Perform O(n) multiplication of two polynomials in the domain. + pub fn mul_assign(&mut self, worker: &Worker, other: &EvaluationDomain>) { + assert_eq!(self.coeffs.len(), other.coeffs.len()); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) { + scope.spawn(move || { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.group_mul_assign(&b.0); + } + }); + } + }); + } + + /// Perform O(n) subtraction of one polynomial from another in the domain. + pub fn sub_assign(&mut self, worker: &Worker, other: &EvaluationDomain) { + assert_eq!(self.coeffs.len(), other.coeffs.len()); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) { + scope.spawn(move || { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.group_sub_assign(&b); + } + }); + } + }); + } +} + +pub trait Group: Sized + Copy + Clone + Send + Sync { + fn group_zero() -> Self; + fn group_mul_assign(&mut self, by: &E::Fr); + fn group_add_assign(&mut self, other: &Self); + fn group_sub_assign(&mut self, other: &Self); +} + +pub struct Point(pub G); + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.0 == other.0 + } +} + +impl Copy for Point { } + +impl Clone for Point { + fn clone(&self) -> Point { + *self + } +} + +impl Group for Point { + fn group_zero() -> Self { + Point(G::zero()) + } + fn group_mul_assign(&mut self, by: &G::Scalar) { + self.0.mul_assign(by.into_repr()); + } + fn group_add_assign(&mut self, other: &Self) { + self.0.add_assign(&other.0); + } + fn group_sub_assign(&mut self, other: &Self) { + self.0.sub_assign(&other.0); + } +} + +pub struct Scalar(pub E::Fr); + +impl PartialEq for Scalar { + fn eq(&self, other: &Scalar) -> bool { + self.0 == other.0 + } +} + +impl Copy for Scalar { } + +impl Clone for Scalar { + fn clone(&self) -> Scalar { + *self + } +} + +impl Group for Scalar { + fn group_zero() -> Self { + Scalar(E::Fr::zero()) + } + fn group_mul_assign(&mut self, by: &E::Fr) { + self.0.mul_assign(by); + } + fn group_add_assign(&mut self, other: &Self) { + self.0.add_assign(&other.0); + } + fn group_sub_assign(&mut self, other: &Self) { + self.0.sub_assign(&other.0); + } +} + +fn best_fft>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32) +{ + let log_cpus = worker.log_num_cpus(); + + if log_n <= log_cpus { + serial_fft(a, omega, log_n); + } else { + parallel_fft(a, worker, omega, log_n, log_cpus); + } +} + +fn serial_fft>(a: &mut [T], omega: &E::Fr, log_n: u32) +{ + fn bitreverse(mut n: u32, l: u32) -> u32 { + let mut r = 0; + for _ in 0..l { + r = (r << 1) | (n & 1); + n >>= 1; + } + r + } + + let n = a.len() as u32; + assert_eq!(n, 1 << log_n); + + for k in 0..n { + let rk = bitreverse(k, log_n); + if k < rk { + a.swap(rk as usize, k as usize); + } + } + + let mut m = 1; + for _ in 0..log_n { + let w_m = omega.pow(&[(n / (2*m)) as u64]); + + let mut k = 0; + while k < n { + let mut w = E::Fr::one(); + for j in 0..m { + let mut t = a[(k+j+m) as usize]; + t.group_mul_assign(&w); + let mut tmp = a[(k+j) as usize]; + tmp.group_sub_assign(&t); + a[(k+j+m) as usize] = tmp; + a[(k+j) as usize].group_add_assign(&t); + w.mul_assign(&w_m); + } + + k += 2*m; + } + + m *= 2; + } +} + +fn parallel_fft>( + a: &mut [T], + worker: &Worker, + omega: &E::Fr, + log_n: u32, + log_cpus: u32 +) +{ + assert!(log_n >= log_cpus); + + let num_cpus = 1 << log_cpus; + let log_new_n = log_n - log_cpus; + let mut tmp = vec![vec![T::group_zero(); 1 << log_new_n]; num_cpus]; + let new_omega = omega.pow(&[num_cpus as u64]); + + worker.scope(0, |scope, _| { + let a = &*a; + + for (j, tmp) in tmp.iter_mut().enumerate() { + scope.spawn(move || { + // Shuffle into a sub-FFT + let omega_j = omega.pow(&[j as u64]); + let omega_step = omega.pow(&[(j as u64) << log_new_n]); + + let mut elt = E::Fr::one(); + for i in 0..(1 << log_new_n) { + for s in 0..num_cpus { + let idx = (i + (s << log_new_n)) % (1 << log_n); + let mut t = a[idx]; + t.group_mul_assign(&elt); + tmp[i].group_add_assign(&t); + elt.mul_assign(&omega_step); + } + elt.mul_assign(&omega_j); + } + + // Perform sub-FFT + serial_fft(tmp, &new_omega, log_new_n); + }); + } + }); + + // TODO: does this hurt or help? + worker.scope(a.len(), |scope, chunk| { + let tmp = &tmp; + + for (idx, a) in a.chunks_mut(chunk).enumerate() { + scope.spawn(move || { + let mut idx = idx * chunk; + let mask = (1 << log_cpus) - 1; + for a in a { + *a = tmp[idx & mask][idx >> log_cpus]; + idx += 1; + } + }); + } + }); +} + +// Test multiplying various (low degree) polynomials together and +// comparing with naive evaluations. +#[test] +fn polynomial_arith() { + use pairing::bls12_381::Bls12; + use rand::{self, Rand}; + + fn test_mul(rng: &mut R) + { + let worker = Worker::new(); + + for coeffs_a in 0..70 { + for coeffs_b in 0..70 { + let mut a: Vec<_> = (0..coeffs_a).map(|_| Scalar::(E::Fr::rand(rng))).collect(); + let mut b: Vec<_> = (0..coeffs_b).map(|_| Scalar::(E::Fr::rand(rng))).collect(); + + // naive evaluation + let mut naive = vec![Scalar(E::Fr::zero()); coeffs_a + coeffs_b]; + for (i1, a) in a.iter().enumerate() { + for (i2, b) in b.iter().enumerate() { + let mut prod = *a; + prod.group_mul_assign(&b.0); + naive[i1 + i2].group_add_assign(&prod); + } + } + + a.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero())); + b.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero())); + + let mut a = EvaluationDomain::from_coeffs(a).unwrap(); + let mut b = EvaluationDomain::from_coeffs(b).unwrap(); + + a.fft(&worker); + b.fft(&worker); + a.mul_assign(&worker, &b); + a.ifft(&worker); + + for (naive, fft) in naive.iter().zip(a.coeffs.iter()) { + assert!(naive == fft); + } + } + } + } + + let rng = &mut rand::thread_rng(); + + test_mul::(rng); +} + +#[test] +fn fft_composition() { + use pairing::bls12_381::Bls12; + use rand; + + fn test_comp(rng: &mut R) + { + let worker = Worker::new(); + + for coeffs in 0..10 { + let coeffs = 1 << coeffs; + + let mut v = vec![]; + for _ in 0..coeffs { + v.push(Scalar::(rng.gen())); + } + + let mut domain = EvaluationDomain::from_coeffs(v.clone()).unwrap(); + domain.ifft(&worker); + domain.fft(&worker); + assert!(v == domain.coeffs); + domain.fft(&worker); + domain.ifft(&worker); + assert!(v == domain.coeffs); + domain.icoset_fft(&worker); + domain.coset_fft(&worker); + assert!(v == domain.coeffs); + domain.coset_fft(&worker); + domain.icoset_fft(&worker); + assert!(v == domain.coeffs); + } + } + + let rng = &mut rand::thread_rng(); + + test_comp::(rng); +} + +#[test] +fn parallel_fft_consistency() { + use pairing::bls12_381::Bls12; + use rand::{self, Rand}; + use std::cmp::min; + + fn test_consistency(rng: &mut R) + { + let worker = Worker::new(); + + for _ in 0..5 { + for log_d in 0..10 { + let d = 1 << log_d; + + let v1 = (0..d).map(|_| Scalar::(E::Fr::rand(rng))).collect::>(); + let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap(); + let mut v2 = EvaluationDomain::from_coeffs(v1.coeffs.clone()).unwrap(); + + for log_cpus in log_d..min(log_d+1, 3) { + parallel_fft(&mut v1.coeffs, &worker, &v1.omega, log_d, log_cpus); + serial_fft(&mut v2.coeffs, &v2.omega, log_d); + + assert!(v1.coeffs == v2.coeffs); + } + } + } + } + + let rng = &mut rand::thread_rng(); + + test_consistency::(rng); +} diff --git a/bellman/src/groth16/generator.rs b/bellman/src/groth16/generator.rs new file mode 100644 index 0000000..1eed62d --- /dev/null +++ b/bellman/src/groth16/generator.rs @@ -0,0 +1,482 @@ +use rand::Rng; + +use std::sync::Arc; + +use pairing::{ + Engine, + PrimeField, + Field, + Wnaf, + CurveProjective, + CurveAffine +}; + +use super::{ + Parameters, + VerifyingKey +}; + +use ::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use ::domain::{ + EvaluationDomain, + Scalar +}; + +use ::multicore::{ + Worker +}; + +/// Generates a random common reference string for +/// a circuit. +pub fn generate_random_parameters( + circuit: C, + rng: &mut R +) -> Result, SynthesisError> + where E: Engine, C: Circuit, R: Rng +{ + let g1 = rng.gen(); + let g2 = rng.gen(); + let alpha = rng.gen(); + let beta = rng.gen(); + let gamma = rng.gen(); + let delta = rng.gen(); + let tau = rng.gen(); + + generate_parameters::( + circuit, + g1, + g2, + alpha, + beta, + gamma, + delta, + tau + ) +} + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into a QAP. +struct KeypairAssembly { + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + at_inputs: Vec>, + bt_inputs: Vec>, + ct_inputs: Vec>, + at_aux: Vec>, + bt_aux: Vec>, + ct_aux: Vec> +} + +impl ConstraintSystem for KeypairAssembly { + type Root = Self; + + fn alloc( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_aux; + self.num_aux += 1; + + self.at_aux.push(vec![]); + self.bt_aux.push(vec![]); + self.ct_aux.push(vec![]); + + Ok(Variable(Index::Aux(index))) + } + + fn alloc_input( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_inputs; + self.num_inputs += 1; + + self.at_inputs.push(vec![]); + self.bt_inputs.push(vec![]); + self.ct_inputs.push(vec![]); + + Ok(Variable(Index::Input(index))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + fn eval( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize + ) + { + for (index, coeff) in l.0 { + match index { + Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)), + Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)) + } + } + } + + eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints); + eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints); + eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints); + + self.num_constraints += 1; + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +/// Create parameters for a circuit, given some toxic waste. +pub fn generate_parameters( + circuit: C, + g1: E::G1, + g2: E::G2, + alpha: E::Fr, + beta: E::Fr, + gamma: E::Fr, + delta: E::Fr, + tau: E::Fr +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + at_inputs: vec![], + bt_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + bt_aux: vec![], + ct_aux: vec![] + }; + + // Allocate the "one" input variable + assembly.alloc_input(|| "", || Ok(E::Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + // Input constraints to ensure full density of IC query + // x * 0 = 0 + for i in 0..assembly.num_inputs { + assembly.enforce(|| "", + |lc| lc + Variable(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + // Create bases for blind evaluation of polynomials at tau + let powers_of_tau = vec![Scalar::(E::Fr::zero()); assembly.num_constraints]; + let mut powers_of_tau = EvaluationDomain::from_coeffs(powers_of_tau)?; + + // Compute G1 window table + let mut g1_wnaf = Wnaf::new(); + let g1_wnaf = g1_wnaf.base(g1, { + // H query + (powers_of_tau.as_ref().len() - 1) + // IC/L queries + + assembly.num_inputs + assembly.num_aux + // A query + + assembly.num_inputs + assembly.num_aux + // B query + + assembly.num_inputs + assembly.num_aux + }); + + // Compute G2 window table + let mut g2_wnaf = Wnaf::new(); + let g2_wnaf = g2_wnaf.base(g2, { + // B query + assembly.num_inputs + assembly.num_aux + }); + + let gamma_inverse = gamma.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + let delta_inverse = delta.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + + let worker = Worker::new(); + + let mut h = vec![E::G1::zero(); powers_of_tau.as_ref().len() - 1]; + { + // Compute powers of tau + { + let powers_of_tau = powers_of_tau.as_mut(); + worker.scope(powers_of_tau.len(), |scope, chunk| { + for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate() + { + scope.spawn(move || { + let mut current_tau_power = tau.pow(&[(i*chunk) as u64]); + + for p in powers_of_tau { + p.0 = current_tau_power; + current_tau_power.mul_assign(&tau); + } + }); + } + }); + } + + // coeff = t(x) / delta + let mut coeff = powers_of_tau.z(&tau); + coeff.mul_assign(&delta_inverse); + + // Compute the H query with multiple threads + worker.scope(h.len(), |scope, chunk| { + for (h, p) in h.chunks_mut(chunk).zip(powers_of_tau.as_ref().chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + + scope.spawn(move || { + // Set values of the H query to g1^{(tau^i * t(tau)) / delta} + for (h, p) in h.iter_mut().zip(p.iter()) + { + // Compute final exponent + let mut exp = p.0; + exp.mul_assign(&coeff); + + // Exponentiate + *h = g1_wnaf.scalar(exp.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(h); + }); + } + }); + } + + // Use inverse FFT to convert powers of tau to Lagrange coefficients + powers_of_tau.ifft(&worker); + let powers_of_tau = powers_of_tau.into_coeffs(); + + let mut a = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux]; + let mut ic = vec![E::G1::zero(); assembly.num_inputs]; + let mut l = vec![E::G1::zero(); assembly.num_aux]; + + fn eval( + // wNAF window tables + g1_wnaf: &Wnaf>, + g2_wnaf: &Wnaf>, + + // Lagrange coefficients for tau + powers_of_tau: &[Scalar], + + // QAP polynomials + at: &[Vec<(E::Fr, usize)>], + bt: &[Vec<(E::Fr, usize)>], + ct: &[Vec<(E::Fr, usize)>], + + // Resulting evaluated QAP polynomials + a: &mut [E::G1], + b_g1: &mut [E::G1], + b_g2: &mut [E::G2], + ext: &mut [E::G1], + + // Inverse coefficient for ext elements + inv: &E::Fr, + + // Trapdoors + alpha: &E::Fr, + beta: &E::Fr, + + // Worker + worker: &Worker + ) + { + // Sanity check + assert_eq!(a.len(), at.len()); + assert_eq!(a.len(), bt.len()); + assert_eq!(a.len(), ct.len()); + assert_eq!(a.len(), b_g1.len()); + assert_eq!(a.len(), b_g2.len()); + assert_eq!(a.len(), ext.len()); + + // Evaluate polynomials in multiple threads + worker.scope(a.len(), |scope, chunk| { + for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.chunks_mut(chunk) + .zip(b_g1.chunks_mut(chunk)) + .zip(b_g2.chunks_mut(chunk)) + .zip(ext.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(bt.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + let mut g2_wnaf = g2_wnaf.shared(); + + scope.spawn(move || { + for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.iter_mut() + .zip(b_g1.iter_mut()) + .zip(b_g2.iter_mut()) + .zip(ext.iter_mut()) + .zip(at.iter()) + .zip(bt.iter()) + .zip(ct.iter()) + { + fn eval_at_tau( + powers_of_tau: &[Scalar], + p: &[(E::Fr, usize)] + ) -> E::Fr + { + let mut acc = E::Fr::zero(); + + for &(ref coeff, index) in p { + let mut n = powers_of_tau[index].0; + n.mul_assign(coeff); + acc.add_assign(&n); + } + + acc + } + + // Evaluate QAP polynomials at tau + let mut at = eval_at_tau(powers_of_tau, at); + let mut bt = eval_at_tau(powers_of_tau, bt); + let ct = eval_at_tau(powers_of_tau, ct); + + // Compute A query (in G1) + if !at.is_zero() { + *a = g1_wnaf.scalar(at.into_repr()); + } + + // Compute B query (in G1/G2) + if !bt.is_zero() { + let bt_repr = bt.into_repr(); + *b_g1 = g1_wnaf.scalar(bt_repr); + *b_g2 = g2_wnaf.scalar(bt_repr); + } + + at.mul_assign(&beta); + bt.mul_assign(&alpha); + + let mut e = at; + e.add_assign(&bt); + e.add_assign(&ct); + e.mul_assign(inv); + + *ext = g1_wnaf.scalar(e.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(a); + E::G1::batch_normalization(b_g1); + E::G2::batch_normalization(b_g2); + E::G1::batch_normalization(ext); + }); + } + }); + } + + // Evaluate for inputs. + eval( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_inputs, + &assembly.bt_inputs, + &assembly.ct_inputs, + &mut a[0..assembly.num_inputs], + &mut b_g1[0..assembly.num_inputs], + &mut b_g2[0..assembly.num_inputs], + &mut ic, + &gamma_inverse, + &alpha, + &beta, + &worker + ); + + // Evaluate for auxillary variables. + eval( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_aux, + &assembly.bt_aux, + &assembly.ct_aux, + &mut a[assembly.num_inputs..], + &mut b_g1[assembly.num_inputs..], + &mut b_g2[assembly.num_inputs..], + &mut l, + &delta_inverse, + &alpha, + &beta, + &worker + ); + + // Don't allow any elements be unconstrained, so that + // the L query is always fully dense. + for e in l.iter() { + if e.is_zero() { + return Err(SynthesisError::UnconstrainedVariable); + } + } + + let g1 = g1.into_affine(); + let g2 = g2.into_affine(); + + let vk = VerifyingKey:: { + alpha_g1: g1.mul(alpha).into_affine(), + beta_g1: g1.mul(beta).into_affine(), + beta_g2: g2.mul(beta).into_affine(), + gamma_g2: g2.mul(gamma).into_affine(), + delta_g1: g1.mul(delta).into_affine(), + delta_g2: g2.mul(delta).into_affine(), + ic: ic.into_iter().map(|e| e.into_affine()).collect() + }; + + Ok(Parameters { + vk: vk, + h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()), + l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), + + // Filter points at infinity away from A/B queries + a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + }) +} diff --git a/bellman/src/groth16/mod.rs b/bellman/src/groth16/mod.rs new file mode 100644 index 0000000..3b8d671 --- /dev/null +++ b/bellman/src/groth16/mod.rs @@ -0,0 +1,576 @@ +use pairing::{ + Engine, + CurveAffine, + EncodedPoint +}; + +use ::{ + SynthesisError +}; + +use multiexp::SourceBuilder; +use std::io::{self, Read, Write}; +use std::sync::Arc; +use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; + +#[cfg(test)] +mod tests; + +mod generator; +mod prover; +mod verifier; + +pub use self::generator::*; +pub use self::prover::*; +pub use self::verifier::*; + +#[derive(Clone)] +pub struct Proof { + pub a: E::G1Affine, + pub b: E::G2Affine, + pub c: E::G1Affine +} + +impl PartialEq for Proof { + fn eq(&self, other: &Self) -> bool { + self.a == other.a && + self.b == other.b && + self.c == other.c + } +} + +impl Proof { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.a.into_compressed().as_ref())?; + writer.write_all(self.b.into_compressed().as_ref())?; + writer.write_all(self.c.into_compressed().as_ref())?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Compressed::empty(); + let mut g2_repr = ::Compressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let a = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g2_repr.as_mut())?; + let b = g2_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g1_repr.as_mut())?; + let c = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + Ok(Proof { + a: a, + b: b, + c: c + }) + } +} + +#[derive(Clone)] +pub struct VerifyingKey { + // alpha in g1 for verifying and for creating A/C elements of + // proof. Never the point at infinity. + pub alpha_g1: E::G1Affine, + + // beta in g1 and g2 for verifying and for creating B/C elements + // of proof. Never the point at infinity. + pub beta_g1: E::G1Affine, + pub beta_g2: E::G2Affine, + + // gamma in g2 for verifying. Never the point at infinity. + pub gamma_g2: E::G2Affine, + + // delta in g1/g2 for verifying and proving, essentially the magic + // trapdoor that forces the prover to evaluate the C element of the + // proof with only components from the CRS. Never the point at + // infinity. + pub delta_g1: E::G1Affine, + pub delta_g2: E::G2Affine, + + // Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma + // for all public inputs. Because all public inputs have a dummy constraint, + // this is the same size as the number of inputs, and never contains points + // at infinity. + pub ic: Vec +} + +impl PartialEq for VerifyingKey { + fn eq(&self, other: &Self) -> bool { + self.alpha_g1 == other.alpha_g1 && + self.beta_g1 == other.beta_g1 && + self.beta_g2 == other.beta_g2 && + self.gamma_g2 == other.gamma_g2 && + self.delta_g1 == other.delta_g1 && + self.delta_g2 == other.delta_g2 && + self.ic == other.ic + } +} + +impl VerifyingKey { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?; + writer.write_all(self.beta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.beta_g2.into_uncompressed().as_ref())?; + writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g2.into_uncompressed().as_ref())?; + writer.write_u32::(self.ic.len() as u32)?; + for ic in &self.ic { + writer.write_all(ic.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Uncompressed::empty(); + let mut g2_repr = ::Uncompressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let beta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let delta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let delta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let ic_len = reader.read_u32::()? as usize; + + let mut ic = vec![]; + + for _ in 0..ic_len { + reader.read_exact(g1_repr.as_mut())?; + let g1 = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + ic.push(g1); + } + + Ok(VerifyingKey { + alpha_g1: alpha_g1, + beta_g1: beta_g1, + beta_g2: beta_g2, + gamma_g2: gamma_g2, + delta_g1: delta_g1, + delta_g2: delta_g2, + ic: ic + }) + } +} + +#[derive(Clone)] +pub struct Parameters { + pub vk: VerifyingKey, + + // Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and + // m-2 inclusive. Never contains points at infinity. + pub h: Arc>, + + // Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta + // for all auxillary inputs. Variables can never be unconstrained, so this + // never contains points at infinity. + pub l: Arc>, + + // QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains + // points at infinity: polynomials that evaluate to zero are omitted from + // the CRS and the prover can deterministically skip their evaluation. + pub a: Arc>, + + // QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in + // G1 and G2 for C/B queries, respectively. Never contains points at + // infinity for the same reason as the "A" polynomials. + pub b_g1: Arc>, + pub b_g2: Arc> +} + +impl PartialEq for Parameters { + fn eq(&self, other: &Self) -> bool { + self.vk == other.vk && + self.h == other.h && + self.l == other.l && + self.a == other.a && + self.b_g1 == other.b_g1 && + self.b_g2 == other.b_g2 + } +} + +impl Parameters { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + self.vk.write(&mut writer)?; + + writer.write_u32::(self.h.len() as u32)?; + for g in &self.h[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.l.len() as u32)?; + for g in &self.l[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.a.len() as u32)?; + for g in &self.a[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.b_g1.len() as u32)?; + for g in &self.b_g1[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.b_g2.len() as u32)?; + for g in &self.b_g2[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + let read_g1 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let read_g2 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let vk = VerifyingKey::::read(&mut reader)?; + + let mut h = vec![]; + let mut l = vec![]; + let mut a = vec![]; + let mut b_g1 = vec![]; + let mut b_g2 = vec![]; + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + h.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + l.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + a.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + b_g1.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + b_g2.push(read_g2(&mut reader)?); + } + } + + Ok(Parameters { + vk: vk, + h: Arc::new(h), + l: Arc::new(l), + a: Arc::new(a), + b_g1: Arc::new(b_g1), + b_g2: Arc::new(b_g2) + }) + } +} + +pub struct PreparedVerifyingKey { + /// Pairing result of alpha*beta + alpha_g1_beta_g2: E::Fqk, + /// -gamma in G2 + neg_gamma_g2: ::Prepared, + /// -delta in G2 + neg_delta_g2: ::Prepared, + /// Copy of IC from `VerifiyingKey`. + ic: Vec +} + +pub trait ParameterSource { + type G1Builder: SourceBuilder; + type G2Builder: SourceBuilder; + + fn get_vk( + &mut self, + num_ic: usize + ) -> Result, SynthesisError>; + fn get_h( + &mut self, + num_h: usize + ) -> Result; + fn get_l( + &mut self, + num_l: usize + ) -> Result; + fn get_a( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; + fn get_b_g1( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; + fn get_b_g2( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>; +} + +impl<'a, E: Engine> ParameterSource for &'a Parameters { + type G1Builder = (Arc>, usize); + type G2Builder = (Arc>, usize); + + fn get_vk( + &mut self, + _: usize + ) -> Result, SynthesisError> + { + Ok(self.vk.clone()) + } + + fn get_h( + &mut self, + _: usize + ) -> Result + { + Ok((self.h.clone(), 0)) + } + + fn get_l( + &mut self, + _: usize + ) -> Result + { + Ok((self.l.clone(), 0)) + } + + fn get_a( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> + { + Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs))) + } + + fn get_b_g1( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> + { + Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs))) + } + + fn get_b_g2( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> + { + Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs))) + } +} + +#[cfg(test)] +mod test_with_bls12_381 { + use super::*; + use {Circuit, SynthesisError, ConstraintSystem}; + + use rand::{Rand, thread_rng}; + use pairing::{Field}; + use pairing::bls12_381::{Bls12, Fr}; + + #[test] + fn serialization() { + struct MySillyCircuit { + a: Option, + b: Option + } + + impl Circuit for MySillyCircuit { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.alloc_input(|| "c", || { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + + cs.enforce( + || "a*b=c", + |lc| lc + a, + |lc| lc + b, + |lc| lc + c + ); + + Ok(()) + } + } + + let rng = &mut thread_rng(); + + let params = generate_random_parameters::( + MySillyCircuit { a: None, b: None }, + rng + ).unwrap(); + + { + let mut v = vec![]; + + params.write(&mut v).unwrap(); + assert_eq!(v.len(), 2136); + + let de_params = Parameters::read(&v[..], true).unwrap(); + assert!(params == de_params); + + let de_params = Parameters::read(&v[..], false).unwrap(); + assert!(params == de_params); + } + + let pvk = prepare_verifying_key::(¶ms.vk); + + for _ in 0..100 { + let a = Fr::rand(rng); + let b = Fr::rand(rng); + let mut c = a; + c.mul_assign(&b); + + let proof = create_random_proof( + MySillyCircuit { + a: Some(a), + b: Some(b) + }, + ¶ms, + rng + ).unwrap(); + + let mut v = vec![]; + proof.write(&mut v).unwrap(); + + assert_eq!(v.len(), 192); + + let de_proof = Proof::read(&v[..]).unwrap(); + assert!(proof == de_proof); + + assert!(verify_proof(&pvk, &proof, &[c]).unwrap()); + assert!(!verify_proof(&pvk, &proof, &[a]).unwrap()); + } + } +} diff --git a/bellman/src/groth16/prover.rs b/bellman/src/groth16/prover.rs new file mode 100644 index 0000000..f21fcce --- /dev/null +++ b/bellman/src/groth16/prover.rs @@ -0,0 +1,334 @@ +use rand::Rng; + +use std::sync::Arc; + +use futures::Future; + +use pairing::{ + Engine, + PrimeField, + Field, + CurveProjective, + CurveAffine +}; + +use super::{ + ParameterSource, + Proof +}; + +use ::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use ::domain::{ + EvaluationDomain, + Scalar +}; + +use ::multiexp::{ + DensityTracker, + FullDensity, + multiexp +}; + +use ::multicore::{ + Worker +}; + +fn eval( + lc: &LinearCombination, + mut input_density: Option<&mut DensityTracker>, + mut aux_density: Option<&mut DensityTracker>, + input_assignment: &[E::Fr], + aux_assignment: &[E::Fr] +) -> E::Fr +{ + let mut acc = E::Fr::zero(); + + for &(index, coeff) in lc.0.iter() { + let mut tmp; + + match index { + Variable(Index::Input(i)) => { + tmp = input_assignment[i]; + if let Some(ref mut v) = input_density { + v.inc(i); + } + }, + Variable(Index::Aux(i)) => { + tmp = aux_assignment[i]; + if let Some(ref mut v) = aux_density { + v.inc(i); + } + } + } + + if coeff == E::Fr::one() { + acc.add_assign(&tmp); + } else { + tmp.mul_assign(&coeff); + acc.add_assign(&tmp); + } + } + + acc +} + +struct ProvingAssignment { + // Density of queries + a_aux_density: DensityTracker, + b_input_density: DensityTracker, + b_aux_density: DensityTracker, + + // Evaluations of A, B, C polynomials + a: Vec>, + b: Vec>, + c: Vec>, + + // Assignments of variables + input_assignment: Vec, + aux_assignment: Vec +} + +impl ConstraintSystem for ProvingAssignment { + type Root = Self; + + fn alloc( + &mut self, + _: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.aux_assignment.push(f()?); + self.a_aux_density.add_element(); + self.b_aux_density.add_element(); + + Ok(Variable(Index::Aux(self.aux_assignment.len() - 1))) + } + + fn alloc_input( + &mut self, + _: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.input_assignment.push(f()?); + self.b_input_density.add_element(); + + Ok(Variable(Index::Input(self.input_assignment.len() - 1))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.a.push(Scalar(eval( + &a, + // Inputs have full density in the A query + // because there are constraints of the + // form x * 0 = 0 for each input. + None, + Some(&mut self.a_aux_density), + &self.input_assignment, + &self.aux_assignment + ))); + self.b.push(Scalar(eval( + &b, + Some(&mut self.b_input_density), + Some(&mut self.b_aux_density), + &self.input_assignment, + &self.aux_assignment + ))); + self.c.push(Scalar(eval( + &c, + // There is no C polynomial query, + // though there is an (beta)A + (alpha)B + C + // query for all aux variables. + // However, that query has full density. + None, + None, + &self.input_assignment, + &self.aux_assignment + ))); + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +pub fn create_random_proof>( + circuit: C, + params: P, + rng: &mut R +) -> Result, SynthesisError> + where E: Engine, C: Circuit, R: Rng +{ + let r = rng.gen(); + let s = rng.gen(); + + create_proof::(circuit, params, r, s) +} + +pub fn create_proof>( + circuit: C, + mut params: P, + r: E::Fr, + s: E::Fr +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let mut prover = ProvingAssignment { + a_aux_density: DensityTracker::new(), + b_input_density: DensityTracker::new(), + b_aux_density: DensityTracker::new(), + a: vec![], + b: vec![], + c: vec![], + input_assignment: vec![], + aux_assignment: vec![] + }; + + prover.alloc_input(|| "", || Ok(E::Fr::one()))?; + + circuit.synthesize(&mut prover)?; + + for i in 0..prover.input_assignment.len() { + prover.enforce(|| "", + |lc| lc + Variable(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + let worker = Worker::new(); + + let vk = params.get_vk(prover.input_assignment.len())?; + + let h = { + let mut a = EvaluationDomain::from_coeffs(prover.a)?; + let mut b = EvaluationDomain::from_coeffs(prover.b)?; + let mut c = EvaluationDomain::from_coeffs(prover.c)?; + a.ifft(&worker); + a.coset_fft(&worker); + b.ifft(&worker); + b.coset_fft(&worker); + c.ifft(&worker); + c.coset_fft(&worker); + + a.mul_assign(&worker, &b); + drop(b); + a.sub_assign(&worker, &c); + drop(c); + a.divide_by_z_on_coset(&worker); + a.icoset_fft(&worker); + let mut a = a.into_coeffs(); + let a_len = a.len() - 1; + a.truncate(a_len); + // TODO: parallelize if it's even helpful + let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::>()); + + multiexp(&worker, params.get_h(a.len())?, FullDensity, a) + }; + + // TODO: parallelize if it's even helpful + let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::>()); + let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::>()); + + let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone()); + + let a_aux_density_total = prover.a_aux_density.get_total_density(); + + let (a_inputs_source, a_aux_source) = params.get_a(input_assignment.len(), a_aux_density_total)?; + + let a_inputs = multiexp(&worker, a_inputs_source, FullDensity, input_assignment.clone()); + let a_aux = multiexp(&worker, a_aux_source, Arc::new(prover.a_aux_density), aux_assignment.clone()); + + let b_input_density = Arc::new(prover.b_input_density); + let b_input_density_total = b_input_density.get_total_density(); + let b_aux_density = Arc::new(prover.b_aux_density); + let b_aux_density_total = b_aux_density.get_total_density(); + + let (b_g1_inputs_source, b_g1_aux_source) = params.get_b_g1(b_input_density_total, b_aux_density_total)?; + + let b_g1_inputs = multiexp(&worker, b_g1_inputs_source, b_input_density.clone(), input_assignment.clone()); + let b_g1_aux = multiexp(&worker, b_g1_aux_source, b_aux_density.clone(), aux_assignment.clone()); + + let (b_g2_inputs_source, b_g2_aux_source) = params.get_b_g2(b_input_density_total, b_aux_density_total)?; + + let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment); + let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment); + + if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() { + // If this element is zero, someone is trying to perform a + // subversion-CRS attack. + return Err(SynthesisError::UnexpectedIdentity); + } + + let mut g_a = vk.delta_g1.mul(r); + g_a.add_assign_mixed(&vk.alpha_g1); + let mut g_b = vk.delta_g2.mul(s); + g_b.add_assign_mixed(&vk.beta_g2); + let mut g_c; + { + let mut rs = r; + rs.mul_assign(&s); + + g_c = vk.delta_g1.mul(rs); + g_c.add_assign(&vk.alpha_g1.mul(s)); + g_c.add_assign(&vk.beta_g1.mul(r)); + } + let mut a_answer = a_inputs.wait()?; + a_answer.add_assign(&a_aux.wait()?); + g_a.add_assign(&a_answer); + a_answer.mul_assign(s); + g_c.add_assign(&a_answer); + + let mut b1_answer = b_g1_inputs.wait()?; + b1_answer.add_assign(&b_g1_aux.wait()?); + let mut b2_answer = b_g2_inputs.wait()?; + b2_answer.add_assign(&b_g2_aux.wait()?); + + g_b.add_assign(&b2_answer); + b1_answer.mul_assign(r); + g_c.add_assign(&b1_answer); + g_c.add_assign(&h.wait()?); + g_c.add_assign(&l.wait()?); + + Ok(Proof { + a: g_a.into_affine(), + b: g_b.into_affine(), + c: g_c.into_affine() + }) +} diff --git a/bellman/src/groth16/tests/dummy_engine.rs b/bellman/src/groth16/tests/dummy_engine.rs new file mode 100644 index 0000000..26c8996 --- /dev/null +++ b/bellman/src/groth16/tests/dummy_engine.rs @@ -0,0 +1,451 @@ +use pairing::{ + Engine, + PrimeField, + PrimeFieldRepr, + Field, + SqrtField, + LegendreSymbol, + CurveProjective, + CurveAffine, + PrimeFieldDecodingError, + GroupDecodingError, + EncodedPoint +}; + +use std::cmp::Ordering; +use std::fmt; +use rand::{Rand, Rng}; +use std::num::Wrapping; + +const MODULUS_R: Wrapping = Wrapping(64513); + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Fr(Wrapping); + +impl fmt::Display for Fr { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", (self.0).0) + } +} + +impl Rand for Fr { + fn rand(rng: &mut R) -> Self { + Fr(Wrapping(rng.gen()) % MODULUS_R) + } +} + +impl Field for Fr { + fn zero() -> Self { + Fr(Wrapping(0)) + } + + fn one() -> Self { + Fr(Wrapping(1)) + } + + fn is_zero(&self) -> bool { + (self.0).0 == 0 + } + + fn square(&mut self) { + self.0 = (self.0 * self.0) % MODULUS_R; + } + + fn double(&mut self) { + self.0 = (self.0 << 1) % MODULUS_R; + } + + fn negate(&mut self) { + if !::is_zero(self) { + self.0 = MODULUS_R - self.0; + } + } + + fn add_assign(&mut self, other: &Self) { + self.0 = (self.0 + other.0) % MODULUS_R; + } + + fn sub_assign(&mut self, other: &Self) { + self.0 = ((MODULUS_R + self.0) - other.0) % MODULUS_R; + } + + fn mul_assign(&mut self, other: &Self) { + self.0 = (self.0 * other.0) % MODULUS_R; + } + + fn inverse(&self) -> Option { + if ::is_zero(self) { + None + } else { + Some(self.pow(&[(MODULUS_R.0 as u64) - 2])) + } + } + + fn frobenius_map(&mut self, _: usize) { + // identity + } +} + +impl SqrtField for Fr { + fn legendre(&self) -> LegendreSymbol { + // s = self^((r - 1) // 2) + let s = self.pow([32256]); + if s == ::zero() { LegendreSymbol::Zero } + else if s == ::one() { LegendreSymbol::QuadraticResidue } + else { LegendreSymbol::QuadraticNonResidue } + } + + fn sqrt(&self) -> Option { + // Tonelli-Shank's algorithm for q mod 16 = 1 + // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) + match self.legendre() { + LegendreSymbol::Zero => Some(*self), + LegendreSymbol::QuadraticNonResidue => None, + LegendreSymbol::QuadraticResidue => { + let mut c = Fr::root_of_unity(); + // r = self^((t + 1) // 2) + let mut r = self.pow([32]); + // t = self^t + let mut t = self.pow([63]); + let mut m = Fr::S; + + while t != ::one() { + let mut i = 1; + { + let mut t2i = t; + t2i.square(); + loop { + if t2i == ::one() { + break; + } + t2i.square(); + i += 1; + } + } + + for _ in 0..(m - i - 1) { + c.square(); + } + ::mul_assign(&mut r, &c); + c.square(); + ::mul_assign(&mut t, &c); + m = i; + } + + Some(r) + } + } + } +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct FrRepr([u64; 1]); + +impl Ord for FrRepr { + fn cmp(&self, other: &FrRepr) -> Ordering { + (self.0)[0].cmp(&(other.0)[0]) + } +} + +impl PartialOrd for FrRepr { + fn partial_cmp(&self, other: &FrRepr) -> Option { + Some(self.cmp(other)) + } +} + +impl Rand for FrRepr { + fn rand(rng: &mut R) -> Self { + FrRepr([rng.gen()]) + } +} + +impl fmt::Display for FrRepr { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", (self.0)[0]) + } +} + +impl From for FrRepr { + fn from(v: u64) -> FrRepr { + FrRepr([v]) + } +} + +impl From for FrRepr { + fn from(v: Fr) -> FrRepr { + FrRepr([(v.0).0 as u64]) + } +} + +impl AsMut<[u64]> for FrRepr { + fn as_mut(&mut self) -> &mut [u64] { + &mut self.0[..] + } +} + +impl AsRef<[u64]> for FrRepr { + fn as_ref(&self) -> &[u64] { + &self.0[..] + } +} + +impl Default for FrRepr { + fn default() -> FrRepr { + FrRepr::from(0u64) + } +} + +impl PrimeFieldRepr for FrRepr { + fn sub_noborrow(&mut self, other: &Self) { + self.0[0] = self.0[0].wrapping_sub(other.0[0]); + } + fn add_nocarry(&mut self, other: &Self) { + self.0[0] = self.0[0].wrapping_add(other.0[0]); + } + fn num_bits(&self) -> u32 { + 64 - self.0[0].leading_zeros() + } + fn is_zero(&self) -> bool { + self.0[0] == 0 + } + fn is_odd(&self) -> bool { + !self.is_even() + } + fn is_even(&self) -> bool { + self.0[0] % 2 == 0 + } + fn div2(&mut self) { + self.shr(1) + } + fn shr(&mut self, amt: u32) { + self.0[0] >>= amt; + } + fn mul2(&mut self) { + self.shl(1) + } + fn shl(&mut self, amt: u32) { + self.0[0] <<= amt; + } +} + +impl PrimeField for Fr { + type Repr = FrRepr; + + const NUM_BITS: u32 = 16; + const CAPACITY: u32 = 15; + const S: u32 = 10; + + fn from_repr(repr: FrRepr) -> Result { + if repr.0[0] >= (MODULUS_R.0 as u64) { + Err(PrimeFieldDecodingError::NotInField(format!("{}", repr))) + } else { + Ok(Fr(Wrapping(repr.0[0] as u32))) + } + } + + fn into_repr(&self) -> FrRepr { + FrRepr::from(*self) + } + + fn char() -> FrRepr { + Fr(MODULUS_R).into() + } + + fn multiplicative_generator() -> Fr { + Fr(Wrapping(5)) + } + + fn root_of_unity() -> Fr { + Fr(Wrapping(57751)) + } +} + +#[derive(Clone)] +pub struct DummyEngine; + +impl Engine for DummyEngine { + type Fr = Fr; + type G1 = Fr; + type G1Affine = Fr; + type G2 = Fr; + type G2Affine = Fr; + type Fq = Fr; + type Fqe = Fr; + + // TODO: This should be F_645131 or something. Doesn't matter for now. + type Fqk = Fr; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where I: IntoIterator::Prepared, + &'a ::Prepared + )> + { + let mut acc = ::zero(); + + for &(a, b) in i { + let mut tmp = *a; + ::mul_assign(&mut tmp, b); + ::add_assign(&mut acc, &tmp); + } + + acc + } + + /// Perform final exponentiation of the result of a miller loop. + fn final_exponentiation(this: &Self::Fqk) -> Option + { + Some(*this) + } +} + +impl CurveProjective for Fr { + type Affine = Fr; + type Base = Fr; + type Scalar = Fr; + type Engine = DummyEngine; + + fn zero() -> Self { + ::zero() + } + + fn one() -> Self { + ::one() + } + + fn is_zero(&self) -> bool { + ::is_zero(self) + } + + fn batch_normalization(_: &mut [Self]) { + + } + + fn is_normalized(&self) -> bool { + true + } + + fn double(&mut self) { + ::double(self); + } + + fn add_assign(&mut self, other: &Self) { + ::add_assign(self, other); + } + + fn add_assign_mixed(&mut self, other: &Self) { + ::add_assign(self, other); + } + + fn negate(&mut self) { + ::negate(self); + } + + fn mul_assign::Repr>>(&mut self, other: S) + { + let tmp = Fr::from_repr(other.into()).unwrap(); + + ::mul_assign(self, &tmp); + } + + fn into_affine(&self) -> Fr { + *self + } + + fn recommended_wnaf_for_scalar(_: ::Repr) -> usize { + 3 + } + + fn recommended_wnaf_for_num_scalars(_: usize) -> usize { + 3 + } +} + +#[derive(Copy, Clone)] +pub struct FakePoint; + +impl AsMut<[u8]> for FakePoint { + fn as_mut(&mut self) -> &mut [u8] { + unimplemented!() + } +} + +impl AsRef<[u8]> for FakePoint { + fn as_ref(&self) -> &[u8] { + unimplemented!() + } +} + +impl EncodedPoint for FakePoint { + type Affine = Fr; + + fn empty() -> Self { + unimplemented!() + } + + fn size() -> usize { + unimplemented!() + } + + fn into_affine(&self) -> Result { + unimplemented!() + } + + fn into_affine_unchecked(&self) -> Result { + unimplemented!() + } + + fn from_affine(_: Self::Affine) -> Self { + unimplemented!() + } +} + +impl CurveAffine for Fr { + type Pair = Fr; + type PairingResult = Fr; + type Compressed = FakePoint; + type Uncompressed = FakePoint; + type Prepared = Fr; + type Projective = Fr; + type Base = Fr; + type Scalar = Fr; + type Engine = DummyEngine; + + fn zero() -> Self { + ::zero() + } + + fn one() -> Self { + ::one() + } + + fn is_zero(&self) -> bool { + ::is_zero(self) + } + + fn negate(&mut self) { + ::negate(self); + } + + fn mul::Repr>>(&self, other: S) -> Self::Projective + { + let mut res = *self; + let tmp = Fr::from_repr(other.into()).unwrap(); + + ::mul_assign(&mut res, &tmp); + + res + } + + fn prepare(&self) -> Self::Prepared { + *self + } + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + self.mul(*other) + } + + fn into_projective(&self) -> Self::Projective { + *self + } +} diff --git a/bellman/src/groth16/tests/mod.rs b/bellman/src/groth16/tests/mod.rs new file mode 100644 index 0000000..a8e2914 --- /dev/null +++ b/bellman/src/groth16/tests/mod.rs @@ -0,0 +1,400 @@ +use pairing::{ + Engine, + Field, + PrimeField +}; + +mod dummy_engine; +use self::dummy_engine::*; + +use std::marker::PhantomData; + +use ::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +use super::{ + generate_parameters, + prepare_verifying_key, + create_proof, + verify_proof +}; + +struct XORDemo { + a: Option, + b: Option, + _marker: PhantomData +} + +impl Circuit for XORDemo { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + let a_var = cs.alloc(|| "a", || { + if self.a.is_some() { + if self.a.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "a_boolean_constraint", + |lc| lc + CS::one() - a_var, + |lc| lc + a_var, + |lc| lc + ); + + let b_var = cs.alloc(|| "b", || { + if self.b.is_some() { + if self.b.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "b_boolean_constraint", + |lc| lc + CS::one() - b_var, + |lc| lc + b_var, + |lc| lc + ); + + let c_var = cs.alloc_input(|| "c", || { + if self.a.is_some() && self.b.is_some() { + if self.a.unwrap() ^ self.b.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "c_xor_constraint", + |lc| lc + a_var + a_var, + |lc| lc + b_var, + |lc| lc + a_var + b_var - c_var + ); + + Ok(()) + } +} + +#[test] +fn test_xordemo() { + let g1 = Fr::one(); + let g2 = Fr::one(); + let alpha = Fr::from_str("48577").unwrap(); + let beta = Fr::from_str("22580").unwrap(); + let gamma = Fr::from_str("53332").unwrap(); + let delta = Fr::from_str("5481").unwrap(); + let tau = Fr::from_str("3673").unwrap(); + + let params = { + let c = XORDemo:: { + a: None, + b: None, + _marker: PhantomData + }; + + generate_parameters( + c, + g1, + g2, + alpha, + beta, + gamma, + delta, + tau + ).unwrap() + }; + + // This will synthesize the constraint system: + // + // public inputs: a_0 = 1, a_1 = c + // aux inputs: a_2 = a, a_3 = b + // constraints: + // (a_0 - a_2) * (a_2) = 0 + // (a_0 - a_3) * (a_3) = 0 + // (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1) + // (a_0) * 0 = 0 + // (a_1) * 0 = 0 + + // The evaluation domain is 8. The H query should + // have 7 elements (it's a quotient polynomial) + assert_eq!(7, params.h.len()); + + let mut root_of_unity = Fr::root_of_unity(); + + // We expect this to be a 2^10 root of unity + assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 10])); + + // Let's turn it into a 2^3 root of unity. + root_of_unity = root_of_unity.pow(&[1 << 7]); + assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 3])); + assert_eq!(Fr::from_str("20201").unwrap(), root_of_unity); + + // Let's compute all the points in our evaluation domain. + let mut points = Vec::with_capacity(8); + for i in 0..8 { + points.push(root_of_unity.pow(&[i])); + } + + // Let's compute t(tau) = (tau - p_0)(tau - p_1)... + // = tau^8 - 1 + let mut t_at_tau = tau.pow(&[8]); + t_at_tau.sub_assign(&Fr::one()); + { + let mut tmp = Fr::one(); + for p in &points { + let mut term = tau; + term.sub_assign(p); + tmp.mul_assign(&term); + } + assert_eq!(tmp, t_at_tau); + } + + // We expect our H query to be 7 elements of the form... + // {tau^i t(tau) / delta} + let delta_inverse = delta.inverse().unwrap(); + let gamma_inverse = gamma.inverse().unwrap(); + { + let mut coeff = delta_inverse; + coeff.mul_assign(&t_at_tau); + + let mut cur = Fr::one(); + for h in params.h.iter() { + let mut tmp = cur; + tmp.mul_assign(&coeff); + + assert_eq!(*h, tmp); + + cur.mul_assign(&tau); + } + } + + // The density of the IC query is 2 (2 inputs) + assert_eq!(2, params.vk.ic.len()); + + // The density of the L query is 2 (2 aux variables) + assert_eq!(2, params.l.len()); + + // The density of the A query is 4 (each variable is in at least one A term) + assert_eq!(4, params.a.len()); + + // The density of the B query is 2 (two variables are in at least one B term) + assert_eq!(2, params.b_g1.len()); + assert_eq!(2, params.b_g2.len()); + + /* + Lagrange interpolation polynomials in our evaluation domain: + + ,-------------------------------. ,-------------------------------. ,-------------------------------. + | A TERM | | B TERM | | C TERM | + `-------------------------------. `-------------------------------' `-------------------------------' + | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | + | 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 | + | 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 | + | 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 | + | 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + | 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + `-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------' + + Example for u_0: + + sage: r = 64513 + sage: Fr = GF(r) + sage: omega = (Fr(5)^63)^(2^7) + sage: tau = Fr(3673) + sage: R. = PolynomialRing(Fr, 'x') + sage: def eval(tau, c0, c1, c2, c3, c4): + ....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)]) + ....: return p.substitute(tau) + sage: eval(tau, 1, 1, 0, 1, 0) + 59158 + */ + + let u_i = [59158, 48317, 21767, 10402].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + let v_i = [0, 0, 60619, 30791].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + let w_i = [0, 23320, 41193, 41193].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + + for (u, a) in u_i.iter() + .zip(¶ms.a[..]) + { + assert_eq!(u, a); + } + + for (v, b) in v_i.iter() + .filter(|&&e| e != Fr::zero()) + .zip(¶ms.b_g1[..]) + { + assert_eq!(v, b); + } + + for (v, b) in v_i.iter() + .filter(|&&e| e != Fr::zero()) + .zip(¶ms.b_g2[..]) + { + assert_eq!(v, b); + } + + for i in 0..4 { + let mut tmp1 = beta; + tmp1.mul_assign(&u_i[i]); + + let mut tmp2 = alpha; + tmp2.mul_assign(&v_i[i]); + + tmp1.add_assign(&tmp2); + tmp1.add_assign(&w_i[i]); + + if i < 2 { + // Check the correctness of the IC query elements + tmp1.mul_assign(&gamma_inverse); + + assert_eq!(tmp1, params.vk.ic[i]); + } else { + // Check the correctness of the L query elements + tmp1.mul_assign(&delta_inverse); + + assert_eq!(tmp1, params.l[i - 2]); + } + } + + // Check consistency of the other elements + assert_eq!(alpha, params.vk.alpha_g1); + assert_eq!(beta, params.vk.beta_g1); + assert_eq!(beta, params.vk.beta_g2); + assert_eq!(gamma, params.vk.gamma_g2); + assert_eq!(delta, params.vk.delta_g1); + assert_eq!(delta, params.vk.delta_g2); + + let pvk = prepare_verifying_key(¶ms.vk); + + let r = Fr::from_str("27134").unwrap(); + let s = Fr::from_str("17146").unwrap(); + + let proof = { + let c = XORDemo { + a: Some(true), + b: Some(false), + _marker: PhantomData + }; + + create_proof( + c, + ¶ms, + r, + s + ).unwrap() + }; + + // A(x) = + // a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) + + // a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) + + // a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) + + // a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) + + { + // proof A = alpha + A(tau) + delta * r + let mut expected_a = delta; + expected_a.mul_assign(&r); + expected_a.add_assign(&alpha); + expected_a.add_assign(&u_i[0]); // a_0 = 1 + expected_a.add_assign(&u_i[1]); // a_1 = 1 + expected_a.add_assign(&u_i[2]); // a_2 = 1 + // a_3 = 0 + assert_eq!(proof.a, expected_a); + } + + // B(x) = + // a_0 * (0) + + // a_1 * (0) + + // a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) + + // a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385) + { + // proof B = beta + B(tau) + delta * s + let mut expected_b = delta; + expected_b.mul_assign(&s); + expected_b.add_assign(&beta); + expected_b.add_assign(&v_i[0]); // a_0 = 1 + expected_b.add_assign(&v_i[1]); // a_1 = 1 + expected_b.add_assign(&v_i[2]); // a_2 = 1 + // a_3 = 0 + assert_eq!(proof.b, expected_b); + } + + // C(x) = + // a_0 * (0) + + // a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) + + // a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + + // a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + // + // If A * B = C at each point in the domain, then the following polynomial... + // P(x) = A(x) * B(x) - C(x) + // = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481 + // + // ... should be divisible by t(x), producing the quotient polynomial: + // h(x) = P(x) / t(x) + // = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032 + { + let mut expected_c = Fr::zero(); + + // A * s + let mut tmp = proof.a; + tmp.mul_assign(&s); + expected_c.add_assign(&tmp); + + // B * r + let mut tmp = proof.b; + tmp.mul_assign(&r); + expected_c.add_assign(&tmp); + + // delta * r * s + let mut tmp = delta; + tmp.mul_assign(&r); + tmp.mul_assign(&s); + expected_c.sub_assign(&tmp); + + // L query answer + // a_2 = 1, a_3 = 0 + expected_c.add_assign(¶ms.l[0]); + + // H query answer + for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() { + let coeff = Fr::from_str(&format!("{}", coeff)).unwrap(); + + let mut tmp = params.h[i]; + tmp.mul_assign(&coeff); + expected_c.add_assign(&tmp); + } + + assert_eq!(expected_c, proof.c); + } + + assert!(verify_proof( + &pvk, + &proof, + &[Fr::one()] + ).unwrap()); +} diff --git a/bellman/src/groth16/verifier.rs b/bellman/src/groth16/verifier.rs new file mode 100644 index 0000000..083e1d0 --- /dev/null +++ b/bellman/src/groth16/verifier.rs @@ -0,0 +1,66 @@ +use pairing::{ + Engine, + CurveProjective, + CurveAffine, + PrimeField +}; + +use super::{ + Proof, + VerifyingKey, + PreparedVerifyingKey +}; + +use ::{ + SynthesisError +}; + +pub fn prepare_verifying_key( + vk: &VerifyingKey +) -> PreparedVerifyingKey +{ + let mut gamma = vk.gamma_g2; + gamma.negate(); + let mut delta = vk.delta_g2; + delta.negate(); + + PreparedVerifyingKey { + alpha_g1_beta_g2: E::pairing(vk.alpha_g1, vk.beta_g2), + neg_gamma_g2: gamma.prepare(), + neg_delta_g2: delta.prepare(), + ic: vk.ic.clone() + } +} + +pub fn verify_proof<'a, E: Engine>( + pvk: &'a PreparedVerifyingKey, + proof: &Proof, + public_inputs: &[E::Fr] +) -> Result +{ + if (public_inputs.len() + 1) != pvk.ic.len() { + return Err(SynthesisError::MalformedVerifyingKey); + } + + let mut acc = pvk.ic[0].into_projective(); + + for (i, b) in public_inputs.iter().zip(pvk.ic.iter().skip(1)) { + acc.add_assign(&b.mul(i.into_repr())); + } + + // The original verification equation is: + // A * B = alpha * beta + inputs * gamma + C * delta + // ... however, we rearrange it so that it is: + // A * B - inputs * gamma - C * delta = alpha * beta + // or equivalently: + // A * B + inputs * (-gamma) + C * (-delta) = alpha * beta + // which allows us to do a single final exponentiation. + + Ok(E::final_exponentiation( + &E::miller_loop([ + (&proof.a.prepare(), &proof.b.prepare()), + (&acc.into_affine().prepare(), &pvk.neg_gamma_g2), + (&proof.c.prepare(), &pvk.neg_delta_g2) + ].into_iter()) + ).unwrap() == pvk.alpha_g1_beta_g2) +} diff --git a/bellman/src/lib.rs b/bellman/src/lib.rs new file mode 100644 index 0000000..fb8d043 --- /dev/null +++ b/bellman/src/lib.rs @@ -0,0 +1,424 @@ +extern crate pairing; +extern crate rand; +extern crate num_cpus; +extern crate futures; +extern crate futures_cpupool; +extern crate bit_vec; +extern crate crossbeam; +extern crate byteorder; + +pub mod multicore; +mod multiexp; +pub mod domain; +pub mod groth16; + +use pairing::{Engine, Field}; + +use std::ops::{Add, Sub}; +use std::fmt; +use std::error::Error; +use std::io; +use std::marker::PhantomData; + +/// Computations are expressed in terms of arithmetic circuits, in particular +/// rank-1 quadratic constraint systems. The `Circuit` trait represents a +/// circuit that can be synthesized. The `synthesize` method is called during +/// CRS generation and during proving. +pub trait Circuit { + /// Synthesize the circuit into a rank-1 quadratic constraint system + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError>; +} + +/// Represents a variable in our constraint system. +#[derive(Copy, Clone, Debug)] +pub struct Variable(Index); + +impl Variable { + /// This constructs a variable with an arbitrary index. + /// Circuit implementations are not recommended to use this. + pub fn new_unchecked(idx: Index) -> Variable { + Variable(idx) + } + + /// This returns the index underlying the variable. + /// Circuit implementations are not recommended to use this. + pub fn get_unchecked(&self) -> Index { + self.0 + } +} + +/// Represents the index of either an input variable or +/// auxillary variable. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum Index { + Input(usize), + Aux(usize) +} + +/// This represents a linear combination of some variables, with coefficients +/// in the scalar field of a pairing-friendly elliptic curve group. +#[derive(Clone)] +pub struct LinearCombination(Vec<(Variable, E::Fr)>); + +impl AsRef<[(Variable, E::Fr)]> for LinearCombination { + fn as_ref(&self) -> &[(Variable, E::Fr)] { + &self.0 + } +} + +impl LinearCombination { + pub fn zero() -> LinearCombination { + LinearCombination(vec![]) + } +} + +impl Add<(E::Fr, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, (coeff, var): (E::Fr, Variable)) -> LinearCombination { + self.0.push((var, coeff)); + + self + } +} + +impl Sub<(E::Fr, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, (mut coeff, var): (E::Fr, Variable)) -> LinearCombination { + coeff.negate(); + + self + (coeff, var) + } +} + +impl Add for LinearCombination { + type Output = LinearCombination; + + fn add(self, other: Variable) -> LinearCombination { + self + (E::Fr::one(), other) + } +} + +impl Sub for LinearCombination { + type Output = LinearCombination; + + fn sub(self, other: Variable) -> LinearCombination { + self - (E::Fr::one(), other) + } +} + +impl<'a, E: Engine> Add<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self + (s.1, s.0); + } + + self + } +} + +impl<'a, E: Engine> Sub<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn sub(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self - (s.1, s.0); + } + + self + } +} + +impl<'a, E: Engine> Add<(E::Fr, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, (coeff, other): (E::Fr, &'a LinearCombination)) -> LinearCombination { + for s in &other.0 { + let mut tmp = s.1; + tmp.mul_assign(&coeff); + self = self + (tmp, s.0); + } + + self + } +} + +impl<'a, E: Engine> Sub<(E::Fr, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn sub(mut self, (coeff, other): (E::Fr, &'a LinearCombination)) -> LinearCombination { + for s in &other.0 { + let mut tmp = s.1; + tmp.mul_assign(&coeff); + self = self - (tmp, s.0); + } + + self + } +} + +/// This is an error that could occur during circuit synthesis contexts, +/// such as CRS generation, proving or verification. +#[derive(Debug)] +pub enum SynthesisError { + /// During synthesis, we lacked knowledge of a variable assignment. + AssignmentMissing, + /// During synthesis, we divided by zero. + DivisionByZero, + /// During synthesis, we constructed an unsatisfiable constraint system. + Unsatisfiable, + /// During synthesis, our polynomials ended up being too high of degree + PolynomialDegreeTooLarge, + /// During proof generation, we encountered an identity in the CRS + UnexpectedIdentity, + /// During proof generation, we encountered an I/O error with the CRS + IoError(io::Error), + /// During verification, our verifying key was malformed. + MalformedVerifyingKey, + /// During CRS generation, we observed an unconstrained auxillary variable + UnconstrainedVariable +} + +impl From for SynthesisError { + fn from(e: io::Error) -> SynthesisError { + SynthesisError::IoError(e) + } +} + +impl Error for SynthesisError { + fn description(&self) -> &str { + match *self { + SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed", + SynthesisError::DivisionByZero => "division by zero", + SynthesisError::Unsatisfiable => "unsatisfiable constraint system", + SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large", + SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS", + SynthesisError::IoError(_) => "encountered an I/O error", + SynthesisError::MalformedVerifyingKey => "malformed verifying key", + SynthesisError::UnconstrainedVariable => "auxillary variable was unconstrained" + } + } +} + +impl fmt::Display for SynthesisError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + if let &SynthesisError::IoError(ref e) = self { + write!(f, "I/O error: ")?; + e.fmt(f) + } else { + write!(f, "{}", self.description()) + } + } +} + +/// Represents a constraint system which can have new variables +/// allocated and constrains between them formed. +pub trait ConstraintSystem: Sized { + /// Represents the type of the "root" of this constraint system + /// so that nested namespaces can minimize indirection. + type Root: ConstraintSystem; + + /// Return the "one" input variable + fn one() -> Variable { + Variable::new_unchecked(Index::Input(0)) + } + + /// Allocate a private variable in the constraint system. The provided function is used to + /// determine the assignment of the variable. The given `annotation` function is invoked + /// in testing contexts in order to derive a unique name for this variable in the current + /// namespace. + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into; + + /// Allocate a public variable in the constraint system. The provided function is used to + /// determine the assignment of the variable. + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into; + + /// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts + /// in order to derive a unique name for the constraint in the current namespace. + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination; + + /// Create a new (sub)namespace and enter into it. Not intended + /// for downstream use; use `namespace` instead. + fn push_namespace(&mut self, name_fn: N) + where NR: Into, N: FnOnce() -> NR; + + /// Exit out of the existing namespace. Not intended for + /// downstream use; use `namespace` instead. + fn pop_namespace(&mut self); + + /// Gets the "root" constraint system, bypassing the namespacing. + /// Not intended for downstream use; use `namespace` instead. + fn get_root(&mut self) -> &mut Self::Root; + + /// Begin a namespace for this constraint system. + fn namespace<'a, NR, N>( + &'a mut self, + name_fn: N + ) -> Namespace<'a, E, Self::Root> + where NR: Into, N: FnOnce() -> NR + { + self.get_root().push_namespace(name_fn); + + Namespace(self.get_root(), PhantomData) + } +} + +/// This is a "namespaced" constraint system which borrows a constraint system (pushing +/// a namespace context) and, when dropped, pops out of the namespace context. +pub struct Namespace<'a, E: Engine, CS: ConstraintSystem + 'a>(&'a mut CS, PhantomData); + +impl<'cs, E: Engine, CS: ConstraintSystem> ConstraintSystem for Namespace<'cs, E, CS> { + type Root = CS::Root; + + fn one() -> Variable { + CS::one() + } + + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.0.alloc(annotation, f) + } + + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.0.alloc_input(annotation, f) + } + + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + self.0.enforce(annotation, a, b, c) + } + + // Downstream users who use `namespace` will never interact with these + // functions and they will never be invoked because the namespace is + // never a root constraint system. + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + panic!("only the root's push_namespace should be called"); + } + + fn pop_namespace(&mut self) + { + panic!("only the root's pop_namespace should be called"); + } + + fn get_root(&mut self) -> &mut Self::Root + { + self.0.get_root() + } +} + +impl<'a, E: Engine, CS: ConstraintSystem> Drop for Namespace<'a, E, CS> { + fn drop(&mut self) { + self.get_root().pop_namespace() + } +} + +/// Convenience implementation of ConstraintSystem for mutable references to +/// constraint systems. +impl<'cs, E: Engine, CS: ConstraintSystem> ConstraintSystem for &'cs mut CS { + type Root = CS::Root; + + fn one() -> Variable { + CS::one() + } + + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + (**self).alloc(annotation, f) + } + + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + (**self).alloc_input(annotation, f) + } + + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + (**self).enforce(annotation, a, b, c) + } + + fn push_namespace(&mut self, name_fn: N) + where NR: Into, N: FnOnce() -> NR + { + (**self).push_namespace(name_fn) + } + + fn pop_namespace(&mut self) + { + (**self).pop_namespace() + } + + fn get_root(&mut self) -> &mut Self::Root + { + (**self).get_root() + } +} diff --git a/bellman/src/multicore.rs b/bellman/src/multicore.rs new file mode 100644 index 0000000..c0062fc --- /dev/null +++ b/bellman/src/multicore.rs @@ -0,0 +1,106 @@ +//! This is an interface for dealing with the kinds of +//! parallel computations involved in bellman. It's +//! currently just a thin wrapper around CpuPool and +//! crossbeam but may be extended in the future to +//! allow for various parallelism strategies. + +use num_cpus; +use futures::{Future, IntoFuture, Poll}; +use futures_cpupool::{CpuPool, CpuFuture}; +use crossbeam::{self, Scope}; + +#[derive(Clone)] +pub struct Worker { + cpus: usize, + pool: CpuPool +} + +impl Worker { + // We don't expose this outside the library so that + // all `Worker` instances have the same number of + // CPUs configured. + pub(crate) fn new_with_cpus(cpus: usize) -> Worker { + Worker { + cpus: cpus, + pool: CpuPool::new(cpus) + } + } + + pub fn new() -> Worker { + Self::new_with_cpus(num_cpus::get()) + } + + pub fn log_num_cpus(&self) -> u32 { + log2_floor(self.cpus) + } + + pub fn compute( + &self, f: F + ) -> WorkerFuture + where F: FnOnce() -> R + Send + 'static, + R: IntoFuture + 'static, + R::Future: Send + 'static, + R::Item: Send + 'static, + R::Error: Send + 'static + { + WorkerFuture { + future: self.pool.spawn_fn(f) + } + } + + pub fn scope<'a, F, R>( + &self, + elements: usize, + f: F + ) -> R + where F: FnOnce(&Scope<'a>, usize) -> R + { + let chunk_size = if elements < self.cpus { + 1 + } else { + elements / self.cpus + }; + + crossbeam::scope(|scope| { + f(scope, chunk_size) + }) + } +} + +pub struct WorkerFuture { + future: CpuFuture +} + +impl Future for WorkerFuture { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll + { + self.future.poll() + } +} + +fn log2_floor(num: usize) -> u32 { + assert!(num > 0); + + let mut pow = 0; + + while (1 << (pow+1)) <= num { + pow += 1; + } + + pow +} + +#[test] +fn test_log2_floor() { + assert_eq!(log2_floor(1), 0); + assert_eq!(log2_floor(2), 1); + assert_eq!(log2_floor(3), 1); + assert_eq!(log2_floor(4), 2); + assert_eq!(log2_floor(5), 2); + assert_eq!(log2_floor(6), 2); + assert_eq!(log2_floor(7), 2); + assert_eq!(log2_floor(8), 3); +} diff --git a/bellman/src/multiexp.rs b/bellman/src/multiexp.rs new file mode 100644 index 0000000..b1dc1f1 --- /dev/null +++ b/bellman/src/multiexp.rs @@ -0,0 +1,303 @@ +use pairing::{ + CurveAffine, + CurveProjective, + Engine, + PrimeField, + Field, + PrimeFieldRepr +}; +use std::sync::Arc; +use std::io; +use bit_vec::{self, BitVec}; +use std::iter; +use futures::{Future}; +use super::multicore::Worker; + +use super::SynthesisError; + +/// An object that builds a source of bases. +pub trait SourceBuilder: Send + Sync + 'static + Clone { + type Source: Source; + + fn new(self) -> Self::Source; +} + +/// A source of bases, like an iterator. +pub trait Source { + /// Parses the element from the source. Fails if the point is at infinity. + fn add_assign_mixed(&mut self, to: &mut ::Projective) -> Result<(), SynthesisError>; + + /// Skips `amt` elements from the source, avoiding deserialization. + fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>; +} + +impl SourceBuilder for (Arc>, usize) { + type Source = (Arc>, usize); + + fn new(self) -> (Arc>, usize) { + (self.0.clone(), self.1) + } +} + +impl Source for (Arc>, usize) { + fn add_assign_mixed(&mut self, to: &mut ::Projective) -> Result<(), SynthesisError> { + if self.0.len() <= self.1 { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into()); + } + + if self.0[self.1].is_zero() { + return Err(SynthesisError::UnexpectedIdentity) + } + + to.add_assign_mixed(&self.0[self.1]); + + self.1 += 1; + + Ok(()) + } + + fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> { + if self.0.len() <= self.1 { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into()); + } + + self.1 += amt; + + Ok(()) + } +} + +pub trait QueryDensity { + /// Returns whether the base exists. + type Iter: Iterator; + + fn iter(self) -> Self::Iter; + fn get_query_size(self) -> Option; +} + +#[derive(Clone)] +pub struct FullDensity; + +impl AsRef for FullDensity { + fn as_ref(&self) -> &FullDensity { + self + } +} + +impl<'a> QueryDensity for &'a FullDensity { + type Iter = iter::Repeat; + + fn iter(self) -> Self::Iter { + iter::repeat(true) + } + + fn get_query_size(self) -> Option { + None + } +} + +pub struct DensityTracker { + bv: BitVec, + total_density: usize +} + +impl<'a> QueryDensity for &'a DensityTracker { + type Iter = bit_vec::Iter<'a>; + + fn iter(self) -> Self::Iter { + self.bv.iter() + } + + fn get_query_size(self) -> Option { + Some(self.bv.len()) + } +} + +impl DensityTracker { + pub fn new() -> DensityTracker { + DensityTracker { + bv: BitVec::new(), + total_density: 0 + } + } + + pub fn add_element(&mut self) { + self.bv.push(false); + } + + pub fn inc(&mut self, idx: usize) { + if !self.bv.get(idx).unwrap() { + self.bv.set(idx, true); + self.total_density += 1; + } + } + + pub fn get_total_density(&self) -> usize { + self.total_density + } +} + +fn multiexp_inner( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Fr as PrimeField>::Repr>>, + mut skip: u32, + c: u32, + handle_trivial: bool +) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder +{ + // Perform this region of the multiexp + let this = { + let bases = bases.clone(); + let exponents = exponents.clone(); + let density_map = density_map.clone(); + + pool.compute(move || { + // Accumulate the result + let mut acc = G::Projective::zero(); + + // Build a source for the bases + let mut bases = bases.new(); + + // Create space for the buckets + let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; + + let zero = ::Fr::zero().into_repr(); + let one = ::Fr::one().into_repr(); + + // Sort the bases into buckets + for (&exp, density) in exponents.iter().zip(density_map.as_ref().iter()) { + if density { + if exp == zero { + bases.skip(1)?; + } else if exp == one { + if handle_trivial { + bases.add_assign_mixed(&mut acc)?; + } else { + bases.skip(1)?; + } + } else { + let mut exp = exp; + exp.shr(skip); + let exp = exp.as_ref()[0] % (1 << c); + + if exp != 0 { + bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?; + } else { + bases.skip(1)?; + } + } + } + } + + // Summation by parts + // e.g. 3a + 2b + 1c = a + + // (a) + b + + // ((a) + b) + c + let mut running_sum = G::Projective::zero(); + for exp in buckets.into_iter().rev() { + running_sum.add_assign(&exp); + acc.add_assign(&running_sum); + } + + Ok(acc) + }) + }; + + skip += c; + + if skip >= ::Fr::NUM_BITS { + // There isn't another region. + Box::new(this) + } else { + // There's another region more significant. Calculate and join it with + // this region recursively. + Box::new( + this.join(multiexp_inner(pool, bases, density_map, exponents, skip, c, false)) + .map(move |(this, mut higher)| { + for _ in 0..c { + higher.double(); + } + + higher.add_assign(&this); + + higher + }) + ) + } +} + +/// Perform multi-exponentiation. The caller is responsible for ensuring the +/// query size is the same as the number of exponents. +pub fn multiexp( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Fr as PrimeField>::Repr>> +) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder +{ + let c = if exponents.len() < 32 { + 3u32 + } else { + (f64::from(exponents.len() as u32)).ln().ceil() as u32 + }; + + if let Some(query_size) = density_map.as_ref().get_query_size() { + // If the density map has a known query size, it should not be + // inconsistent with the number of exponents. + + assert!(query_size == exponents.len()); + } + + multiexp_inner(pool, bases, density_map, exponents, 0, c, true) +} + +#[test] +fn test_with_bls12() { + fn naive_multiexp( + bases: Arc>, + exponents: Arc::Repr>> + ) -> G::Projective + { + assert_eq!(bases.len(), exponents.len()); + + let mut acc = G::Projective::zero(); + + for (base, exp) in bases.iter().zip(exponents.iter()) { + acc.add_assign(&base.mul(*exp)); + } + + acc + } + + use rand::{self, Rand}; + use pairing::bls12_381::Bls12; + + const SAMPLES: usize = 1 << 14; + + let rng = &mut rand::thread_rng(); + let v = Arc::new((0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>()); + let g = Arc::new((0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>()); + + let naive = naive_multiexp(g.clone(), v.clone()); + + let pool = Worker::new(); + + let fast = multiexp( + &pool, + (g, 0), + FullDensity, + v + ).wait().unwrap(); + + assert_eq!(naive, fast); +} diff --git a/bellman/tests/mimc.rs b/bellman/tests/mimc.rs new file mode 100644 index 0000000..d6ff72b --- /dev/null +++ b/bellman/tests/mimc.rs @@ -0,0 +1,251 @@ +extern crate bellman; +extern crate pairing; +extern crate rand; + +// For randomness (during paramgen and proof generation) +use rand::{thread_rng, Rng}; + +// For benchmarking +use std::time::{Duration, Instant}; + +// Bring in some tools for using pairing-friendly curves +use pairing::{ + Engine, + Field +}; + +// We're going to use the BLS12-381 pairing-friendly elliptic curve. +use pairing::bls12_381::{ + Bls12 +}; + +// We'll use these interfaces to construct our circuit. +use bellman::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +// We're going to use the Groth16 proving system. +use bellman::groth16::{ + Proof, + generate_random_parameters, + prepare_verifying_key, + create_random_proof, + verify_proof, +}; + +const MIMC_ROUNDS: usize = 322; + +/// This is an implementation of MiMC, specifically a +/// variant named `LongsightF322p3` for BLS12-381. +/// See http://eprint.iacr.org/2016/492 for more +/// information about this construction. +/// +/// ``` +/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) { +/// for i from 0 up to 321 { +/// xL, xR := xR + (xL + Ci)^3, xL +/// } +/// return xL +/// } +/// ``` +fn mimc( + mut xl: E::Fr, + mut xr: E::Fr, + constants: &[E::Fr] +) -> E::Fr +{ + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +struct MiMCDemo<'a, E: Engine> { + xl: Option, + xr: Option, + constants: &'a [E::Fr] +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, E: Engine> Circuit for MiMCDemo<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc(|| "preimage xl", || { + xl_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc(|| "preimage xr", || { + xr_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let mut tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let mut tmp = cs.alloc(|| "tmp", || { + tmp_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let mut new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let mut new_xl = if i == (MIMC_ROUNDS-1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.alloc_input(|| "image", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + } else { + cs.alloc(|| "new_xl", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +#[test] +fn test_mimc() { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + + println!("Creating parameters..."); + + // Create parameters for our circuit + let params = { + let c = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }; + + generate_random_parameters(c, rng).unwrap() + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + + println!("Creating proofs..."); + + // Let's benchmark stuff! + const SAMPLES: u32 = 50; + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + let mut proof_vec = vec![]; + + for _ in 0..SAMPLES { + // Generate a random preimage and compute the image + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + proof_vec.truncate(0); + + let start = Instant::now(); + { + // Create an instance of our circuit (with the + // witness) + let c = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + // Create a groth16 proof with our parameters. + let proof = create_random_proof(c, ¶ms, rng).unwrap(); + + proof.write(&mut proof_vec).unwrap(); + } + + total_proving += start.elapsed(); + + let start = Instant::now(); + let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + assert!(verify_proof( + &pvk, + &proof, + &[image] + ).unwrap()); + total_verifying += start.elapsed(); + } + let proving_avg = total_proving / SAMPLES; + let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / SAMPLES; + let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (verifying_avg.as_secs() as f64); + + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); +}