Merge pull request #3 from adityapk00/lightclient-work

Lightclient work
This commit is contained in:
adityapk00
2019-09-10 10:01:52 -07:00
committed by GitHub
130 changed files with 12616 additions and 5768 deletions

View File

@@ -1,8 +1,13 @@
language: rust
rust:
- 1.32.0
- 1.36.0
cache: cargo
before_script:
- rustup component add rustfmt
script:
- cargo build --verbose --release --all
- cargo fmt --all -- --check
- cargo test --verbose --release --all

704
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,8 +5,8 @@ members = [
"group",
"librustzcash",
"pairing",
"sapling-crypto",
"zcash_client_backend",
"zcash_client_sqlite",
"zcash_primitives",
"zcash_proofs",
]

View File

@@ -9,8 +9,8 @@ repository = "https://github.com/ebfull/bellman"
version = "0.1.0"
[dependencies]
rand = "0.4"
bit-vec = "0.4.4"
blake2s_simd = "0.5"
ff = { path = "../ff" }
futures = "0.1"
futures-cpupool = { version = "0.1", optional = true }
@@ -18,8 +18,15 @@ group = { path = "../group" }
num_cpus = { version = "1", optional = true }
crossbeam = { version = "0.3", optional = true }
pairing = { path = "../pairing", optional = true }
rand_core = "0.5"
byteorder = "1"
[dev-dependencies]
hex-literal = "0.1"
rand = "0.7"
rand_xorshift = "0.2"
sha2 = "0.8"
[features]
groth16 = ["pairing"]
multicore = ["futures-cpupool", "crossbeam", "num_cpus"]

View File

@@ -13,9 +13,7 @@
use ff::{Field, PrimeField, ScalarEngine};
use group::CurveProjective;
use super::{
SynthesisError
};
use super::SynthesisError;
use super::multicore::Worker;
@@ -25,7 +23,7 @@ pub struct EvaluationDomain<E: ScalarEngine, G: Group<E>> {
omega: E::Fr,
omegainv: E::Fr,
geninv: E::Fr,
minv: E::Fr
minv: E::Fr,
}
impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
@@ -41,8 +39,7 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
self.coeffs
}
pub fn from_coeffs(mut coeffs: Vec<G>) -> Result<EvaluationDomain<E, G>, SynthesisError>
{
pub fn from_coeffs(mut coeffs: Vec<G>) -> Result<EvaluationDomain<E, G>, SynthesisError> {
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
@@ -53,7 +50,7 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp >= E::Fr::S {
return Err(SynthesisError::PolynomialDegreeTooLarge)
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
}
@@ -72,17 +69,18 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
omega: omega,
omegainv: omega.inverse().unwrap(),
geninv: E::Fr::multiplicative_generator().inverse().unwrap(),
minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap()
minv: E::Fr::from_str(&format!("{}", m))
.unwrap()
.inverse()
.unwrap(),
})
}
pub fn fft(&mut self, worker: &Worker)
{
pub fn fft(&mut self, worker: &Worker) {
best_fft(&mut self.coeffs, worker, &self.omega, self.exp);
}
pub fn ifft(&mut self, worker: &Worker)
{
pub fn ifft(&mut self, worker: &Worker) {
best_fft(&mut self.coeffs, worker, &self.omegainv, self.exp);
worker.scope(self.coeffs.len(), |scope, chunk| {
@@ -98,8 +96,7 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
});
}
pub fn distribute_powers(&mut self, worker: &Worker, g: E::Fr)
{
pub fn distribute_powers(&mut self, worker: &Worker, g: E::Fr) {
worker.scope(self.coeffs.len(), |scope, chunk| {
for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
@@ -113,14 +110,12 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
});
}
pub fn coset_fft(&mut self, worker: &Worker)
{
pub fn coset_fft(&mut self, worker: &Worker) {
self.distribute_powers(worker, E::Fr::multiplicative_generator());
self.fft(worker);
}
pub fn icoset_fft(&mut self, worker: &Worker)
{
pub fn icoset_fft(&mut self, worker: &Worker) {
let geninv = self.geninv;
self.ifft(worker);
@@ -139,9 +134,11 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
/// The target polynomial is the zero polynomial in our
/// evaluation domain, so we must perform division over
/// a coset.
pub fn divide_by_z_on_coset(&mut self, worker: &Worker)
{
let i = self.z(&E::Fr::multiplicative_generator()).inverse().unwrap();
pub fn divide_by_z_on_coset(&mut self, worker: &Worker) {
let i = self
.z(&E::Fr::multiplicative_generator())
.inverse()
.unwrap();
worker.scope(self.coeffs.len(), |scope, chunk| {
for v in self.coeffs.chunks_mut(chunk) {
@@ -159,7 +156,11 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
for (a, b) in self
.coeffs
.chunks_mut(chunk)
.zip(other.coeffs.chunks(chunk))
{
scope.spawn(move || {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_mul_assign(&b.0);
@@ -174,7 +175,11 @@ impl<E: ScalarEngine, G: Group<E>> EvaluationDomain<E, G> {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
for (a, b) in self
.coeffs
.chunks_mut(chunk)
.zip(other.coeffs.chunks(chunk))
{
scope.spawn(move || {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_sub_assign(&b);
@@ -200,7 +205,7 @@ impl<G: CurveProjective> PartialEq for Point<G> {
}
}
impl<G: CurveProjective> Copy for Point<G> { }
impl<G: CurveProjective> Copy for Point<G> {}
impl<G: CurveProjective> Clone for Point<G> {
fn clone(&self) -> Point<G> {
@@ -231,7 +236,7 @@ impl<E: ScalarEngine> PartialEq for Scalar<E> {
}
}
impl<E: ScalarEngine> Copy for Scalar<E> { }
impl<E: ScalarEngine> Copy for Scalar<E> {}
impl<E: ScalarEngine> Clone for Scalar<E> {
fn clone(&self) -> Scalar<E> {
@@ -254,8 +259,7 @@ impl<E: ScalarEngine> Group<E> for Scalar<E> {
}
}
fn best_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32)
{
fn best_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32) {
let log_cpus = worker.log_num_cpus();
if log_n <= log_cpus {
@@ -265,8 +269,7 @@ fn best_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &
}
}
fn serial_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32)
{
fn serial_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32) {
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
for _ in 0..l {
@@ -288,22 +291,22 @@ fn serial_fft<E: ScalarEngine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u
let mut m = 1;
for _ in 0..log_n {
let w_m = omega.pow(&[(n / (2*m)) as u64]);
let w_m = omega.pow(&[(n / (2 * m)) as u64]);
let mut k = 0;
while k < n {
let mut w = E::Fr::one();
for j in 0..m {
let mut t = a[(k+j+m) as usize];
let mut t = a[(k + j + m) as usize];
t.group_mul_assign(&w);
let mut tmp = a[(k+j) as usize];
let mut tmp = a[(k + j) as usize];
tmp.group_sub_assign(&t);
a[(k+j+m) as usize] = tmp;
a[(k+j) as usize].group_add_assign(&t);
a[(k + j + m) as usize] = tmp;
a[(k + j) as usize].group_add_assign(&t);
w.mul_assign(&w_m);
}
k += 2*m;
k += 2 * m;
}
m *= 2;
@@ -315,9 +318,8 @@ fn parallel_fft<E: ScalarEngine, T: Group<E>>(
worker: &Worker,
omega: &E::Fr,
log_n: u32,
log_cpus: u32
)
{
log_cpus: u32,
) {
assert!(log_n >= log_cpus);
let num_cpus = 1 << log_cpus;
@@ -375,16 +377,19 @@ fn parallel_fft<E: ScalarEngine, T: Group<E>>(
#[test]
fn polynomial_arith() {
use pairing::bls12_381::Bls12;
use rand::{self, Rand};
use rand_core::RngCore;
fn test_mul<E: ScalarEngine, R: rand::Rng>(rng: &mut R)
{
fn test_mul<E: ScalarEngine, R: RngCore>(rng: &mut R) {
let worker = Worker::new();
for coeffs_a in 0..70 {
for coeffs_b in 0..70 {
let mut a: Vec<_> = (0..coeffs_a).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect();
let mut b: Vec<_> = (0..coeffs_b).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect();
let mut a: Vec<_> = (0..coeffs_a)
.map(|_| Scalar::<E>(E::Fr::random(rng)))
.collect();
let mut b: Vec<_> = (0..coeffs_b)
.map(|_| Scalar::<E>(E::Fr::random(rng)))
.collect();
// naive evaluation
let mut naive = vec![Scalar(E::Fr::zero()); coeffs_a + coeffs_b];
@@ -423,10 +428,9 @@ fn polynomial_arith() {
#[test]
fn fft_composition() {
use pairing::bls12_381::Bls12;
use rand;
use rand_core::RngCore;
fn test_comp<E: ScalarEngine, R: rand::Rng>(rng: &mut R)
{
fn test_comp<E: ScalarEngine, R: RngCore>(rng: &mut R) {
let worker = Worker::new();
for coeffs in 0..10 {
@@ -434,7 +438,7 @@ fn fft_composition() {
let mut v = vec![];
for _ in 0..coeffs {
v.push(Scalar::<E>(rng.gen()));
v.push(Scalar::<E>(E::Fr::random(rng)));
}
let mut domain = EvaluationDomain::from_coeffs(v.clone()).unwrap();
@@ -462,22 +466,23 @@ fn fft_composition() {
#[test]
fn parallel_fft_consistency() {
use pairing::bls12_381::Bls12;
use rand::{self, Rand};
use rand_core::RngCore;
use std::cmp::min;
fn test_consistency<E: ScalarEngine, R: rand::Rng>(rng: &mut R)
{
fn test_consistency<E: ScalarEngine, R: RngCore>(rng: &mut R) {
let worker = Worker::new();
for _ in 0..5 {
for log_d in 0..10 {
let d = 1 << log_d;
let v1 = (0..d).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect::<Vec<_>>();
let v1 = (0..d)
.map(|_| Scalar::<E>(E::Fr::random(rng)))
.collect::<Vec<_>>();
let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap();
let mut v2 = EvaluationDomain::from_coeffs(v1.coeffs.clone()).unwrap();
for log_cpus in log_d..min(log_d+1, 3) {
for log_cpus in log_d..min(log_d + 1, 3) {
parallel_fft(&mut v1.coeffs, &worker, &v1.omega, log_d, log_cpus);
serial_fft(&mut v2.coeffs, &v2.omega, log_d);

View File

@@ -1,23 +1,15 @@
#[cfg(test)]
pub mod test;
pub mod boolean;
pub mod multieq;
pub mod uint32;
pub mod blake2s;
pub mod num;
pub mod boolean;
pub mod lookup;
pub mod ecc;
pub mod pedersen_hash;
pub mod multieq;
pub mod multipack;
pub mod num;
pub mod sha256;
pub mod uint32;
pub mod sapling;
pub mod sprout;
use bellman::{
SynthesisError
};
use crate::SynthesisError;
// TODO: This should probably be removed and we
// should use existing helper methods on `Option`
@@ -25,7 +17,7 @@ use bellman::{
/// This basically is just an extension to `Option`
/// which allows for a convenient mapping to an
/// error on `None`.
trait Assignment<T> {
pub trait Assignment<T> {
fn get(&self) -> Result<&T, SynthesisError>;
}
@@ -33,7 +25,7 @@ impl<T> Assignment<T> for Option<T> {
fn get(&self) -> Result<&T, SynthesisError> {
match *self {
Some(ref v) => Ok(v),
None => Err(SynthesisError::AssignmentMissing)
None => Err(SynthesisError::AssignmentMissing),
}
}
}

View File

@@ -1,19 +1,10 @@
use pairing::{
Engine,
};
use pairing::Engine;
use bellman::{
SynthesisError,
ConstraintSystem
};
use crate::{ConstraintSystem, SynthesisError};
use super::boolean::{
Boolean
};
use super::boolean::Boolean;
use super::uint32::{
UInt32
};
use super::uint32::UInt32;
use super::multieq::MultiEq;
@@ -65,7 +56,7 @@ const SIGMA: [[usize; 16]; 10] = [
[12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11],
[13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10],
[6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5],
[10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0]
[10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0],
];
/*
@@ -98,17 +89,30 @@ fn mixing_g<E: Engine, CS: ConstraintSystem<E>, M>(
c: usize,
d: usize,
x: &UInt32,
y: &UInt32
y: &UInt32,
) -> Result<(), SynthesisError>
where M: ConstraintSystem<E, Root=MultiEq<E, CS>>
where
M: ConstraintSystem<E, Root = MultiEq<E, CS>>,
{
v[a] = UInt32::addmany(cs.namespace(|| "mixing step 1"), &[v[a].clone(), v[b].clone(), x.clone()])?;
v[a] = UInt32::addmany(
cs.namespace(|| "mixing step 1"),
&[v[a].clone(), v[b].clone(), x.clone()],
)?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 2"), &v[a])?.rotr(R1);
v[c] = UInt32::addmany(cs.namespace(|| "mixing step 3"), &[v[c].clone(), v[d].clone()])?;
v[c] = UInt32::addmany(
cs.namespace(|| "mixing step 3"),
&[v[c].clone(), v[d].clone()],
)?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 4"), &v[c])?.rotr(R2);
v[a] = UInt32::addmany(cs.namespace(|| "mixing step 5"), &[v[a].clone(), v[b].clone(), y.clone()])?;
v[a] = UInt32::addmany(
cs.namespace(|| "mixing step 5"),
&[v[a].clone(), v[b].clone(), y.clone()],
)?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 6"), &v[a])?.rotr(R3);
v[c] = UInt32::addmany(cs.namespace(|| "mixing step 7"), &[v[c].clone(), v[d].clone()])?;
v[c] = UInt32::addmany(
cs.namespace(|| "mixing step 7"),
&[v[c].clone(), v[d].clone()],
)?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 8"), &v[c])?.rotr(R4);
Ok(())
@@ -162,15 +166,13 @@ fn mixing_g<E: Engine, CS: ConstraintSystem<E>, M>(
END FUNCTION.
*/
fn blake2s_compression<E: Engine, CS: ConstraintSystem<E>>(
mut cs: CS,
h: &mut [UInt32],
m: &[UInt32],
t: u64,
f: bool
) -> Result<(), SynthesisError>
{
f: bool,
) -> Result<(), SynthesisError> {
assert_eq!(h.len(), 8);
assert_eq!(m.len(), 16);
@@ -196,10 +198,16 @@ fn blake2s_compression<E: Engine, CS: ConstraintSystem<E>>(
assert_eq!(v.len(), 16);
v[12] = v[12].xor(cs.namespace(|| "first xor"), &UInt32::constant(t as u32))?;
v[13] = v[13].xor(cs.namespace(|| "second xor"), &UInt32::constant((t >> 32) as u32))?;
v[13] = v[13].xor(
cs.namespace(|| "second xor"),
&UInt32::constant((t >> 32) as u32),
)?;
if f {
v[14] = v[14].xor(cs.namespace(|| "third xor"), &UInt32::constant(u32::max_value()))?;
v[14] = v[14].xor(
cs.namespace(|| "third xor"),
&UInt32::constant(u32::max_value()),
)?;
}
{
@@ -210,20 +218,92 @@ fn blake2s_compression<E: Engine, CS: ConstraintSystem<E>>(
let s = SIGMA[i % 10];
mixing_g(cs.namespace(|| "mixing invocation 1"), &mut v, 0, 4, 8, 12, &m[s[ 0]], &m[s[ 1]])?;
mixing_g(cs.namespace(|| "mixing invocation 2"), &mut v, 1, 5, 9, 13, &m[s[ 2]], &m[s[ 3]])?;
mixing_g(cs.namespace(|| "mixing invocation 3"), &mut v, 2, 6, 10, 14, &m[s[ 4]], &m[s[ 5]])?;
mixing_g(cs.namespace(|| "mixing invocation 4"), &mut v, 3, 7, 11, 15, &m[s[ 6]], &m[s[ 7]])?;
mixing_g(
cs.namespace(|| "mixing invocation 1"),
&mut v,
0,
4,
8,
12,
&m[s[0]],
&m[s[1]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 2"),
&mut v,
1,
5,
9,
13,
&m[s[2]],
&m[s[3]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 3"),
&mut v,
2,
6,
10,
14,
&m[s[4]],
&m[s[5]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 4"),
&mut v,
3,
7,
11,
15,
&m[s[6]],
&m[s[7]],
)?;
mixing_g(cs.namespace(|| "mixing invocation 5"), &mut v, 0, 5, 10, 15, &m[s[ 8]], &m[s[ 9]])?;
mixing_g(cs.namespace(|| "mixing invocation 6"), &mut v, 1, 6, 11, 12, &m[s[10]], &m[s[11]])?;
mixing_g(cs.namespace(|| "mixing invocation 7"), &mut v, 2, 7, 8, 13, &m[s[12]], &m[s[13]])?;
mixing_g(cs.namespace(|| "mixing invocation 8"), &mut v, 3, 4, 9, 14, &m[s[14]], &m[s[15]])?;
mixing_g(
cs.namespace(|| "mixing invocation 5"),
&mut v,
0,
5,
10,
15,
&m[s[8]],
&m[s[9]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 6"),
&mut v,
1,
6,
11,
12,
&m[s[10]],
&m[s[11]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 7"),
&mut v,
2,
7,
8,
13,
&m[s[12]],
&m[s[13]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 8"),
&mut v,
3,
4,
9,
14,
&m[s[14]],
&m[s[15]],
)?;
}
}
for i in 0..8 {
let mut cs = cs.namespace(|| format!("h[{i}] ^ v[{i}] ^ v[{i} + 8]", i=i));
let mut cs = cs.namespace(|| format!("h[{i}] ^ v[{i}] ^ v[{i} + 8]", i = i));
h[i] = h[i].xor(cs.namespace(|| "first xor"), &v[i])?;
h[i] = h[i].xor(cs.namespace(|| "second xor"), &v[i + 8])?;
@@ -262,9 +342,8 @@ fn blake2s_compression<E: Engine, CS: ConstraintSystem<E>>(
pub fn blake2s<E: Engine, CS: ConstraintSystem<E>>(
mut cs: CS,
input: &[Boolean],
personalization: &[u8]
) -> Result<Vec<Boolean>, SynthesisError>
{
personalization: &[u8],
) -> Result<Vec<Boolean>, SynthesisError> {
use byteorder::{ByteOrder, LittleEndian};
assert_eq!(personalization.len(), 8);
@@ -279,8 +358,12 @@ pub fn blake2s<E: Engine, CS: ConstraintSystem<E>>(
h.push(UInt32::constant(0x9B05688C));
// Personalization is stored here
h.push(UInt32::constant(0x1F83D9AB ^ LittleEndian::read_u32(&personalization[0..4])));
h.push(UInt32::constant(0x5BE0CD19 ^ LittleEndian::read_u32(&personalization[4..8])));
h.push(UInt32::constant(
0x1F83D9AB ^ LittleEndian::read_u32(&personalization[0..4]),
));
h.push(UInt32::constant(
0x5BE0CD19 ^ LittleEndian::read_u32(&personalization[4..8]),
));
let mut blocks: Vec<Vec<UInt32>> = vec![];
@@ -312,7 +395,13 @@ pub fn blake2s<E: Engine, CS: ConstraintSystem<E>>(
{
let cs = cs.namespace(|| "final block");
blake2s_compression(cs, &mut h, &blocks[blocks.len() - 1], (input.len() / 8) as u64, true)?;
blake2s_compression(
cs,
&mut h,
&blocks[blocks.len() - 1],
(input.len() / 8) as u64,
true,
)?;
}
Ok(h.iter().flat_map(|b| b.into_bits()).collect())
@@ -320,13 +409,15 @@ pub fn blake2s<E: Engine, CS: ConstraintSystem<E>>(
#[cfg(test)]
mod test {
use rand::{XorShiftRng, SeedableRng, Rng};
use pairing::bls12_381::{Bls12};
use ::circuit::boolean::{Boolean, AllocatedBit};
use ::circuit::test::TestConstraintSystem;
use blake2s_simd::Params as Blake2sParams;
use pairing::bls12_381::Bls12;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use super::blake2s;
use bellman::{ConstraintSystem};
use blake2_rfc::blake2s::Blake2s;
use crate::gadgets::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::TestConstraintSystem;
use crate::ConstraintSystem;
#[test]
fn test_blank_hash() {
@@ -354,7 +445,13 @@ mod test {
#[test]
fn test_blake2s_constraints() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bits: Vec<_> = (0..512).map(|i| AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true)).unwrap().into()).collect();
let input_bits: Vec<_> = (0..512)
.map(|i| {
AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true))
.unwrap()
.into()
})
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
@@ -366,12 +463,18 @@ mod test {
// doesn't result in more constraints.
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let input_bits: Vec<_> = (0..512)
.map(|_| Boolean::constant(rng.gen()))
.chain((0..512)
.map(|i| AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true)).unwrap().into()))
.collect();
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.chain((0..512).map(|i| {
AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true))
.unwrap()
.into()
}))
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
@@ -380,21 +483,31 @@ mod test {
#[test]
fn test_blake2s_constant_constraints() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let input_bits: Vec<_> = (0..512).map(|_| Boolean::constant(rng.gen())).collect();
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let input_bits: Vec<_> = (0..512)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert_eq!(cs.num_constraints(), 0);
}
#[test]
fn test_blake2s() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0))
{
let mut h = Blake2s::with_params(32, &[], &[], b"12345678");
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0)) {
let mut h = Blake2sParams::new()
.hash_length(32)
.personal(b"12345678")
.to_state();
let data: Vec<u8> = (0..input_len).map(|_| rng.gen()).collect();
let data: Vec<u8> = (0..input_len).map(|_| rng.next_u32() as u8).collect();
h.update(&data);
@@ -408,7 +521,11 @@ mod test {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8)).unwrap().into());
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
@@ -416,17 +533,19 @@ mod test {
assert!(cs.is_satisfied());
let mut s = hash_result.as_ref().iter()
.flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8));
let mut s = hash_result
.as_ref()
.iter()
.flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
},
}
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
},
}
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);

View File

@@ -1,23 +1,15 @@
use ff::Field;
use pairing::Engine;
use super::*;
use super::num::{
AllocatedNum,
Num
};
use super::boolean::Boolean;
use bellman::{
ConstraintSystem
};
use super::num::{AllocatedNum, Num};
use super::*;
use crate::ConstraintSystem;
// Synthesize the constants for each base pattern.
fn synth<'a, E: Engine, I>(
window_size: usize,
constants: I,
assignment: &mut [E::Fr]
)
where I: IntoIterator<Item=&'a E::Fr>
fn synth<'a, E: Engine, I>(window_size: usize, constants: I, assignment: &mut [E::Fr])
where
I: IntoIterator<Item = &'a E::Fr>,
{
assert_eq!(assignment.len(), 1 << window_size);
@@ -39,16 +31,20 @@ fn synth<'a, E: Engine, I>(
pub fn lookup3_xy<E: Engine, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(E::Fr, E::Fr)]
coords: &[(E::Fr, E::Fr)],
) -> Result<(AllocatedNum<E>, AllocatedNum<E>), SynthesisError>
where CS: ConstraintSystem<E>
where
CS: ConstraintSystem<E>,
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 8);
// Calculate the index into `coords`
let i =
match (bits[0].get_value(), bits[1].get_value(), bits[2].get_value()) {
let i = match (
bits[0].get_value(),
bits[1].get_value(),
bits[2].get_value(),
) {
(Some(a_value), Some(b_value), Some(c_value)) => {
let mut tmp = 0;
if a_value {
@@ -61,25 +57,15 @@ pub fn lookup3_xy<E: Engine, CS>(
tmp += 4;
}
Some(tmp)
},
_ => None
}
_ => None,
};
// Allocate the x-coordinate resulting from the lookup
let res_x = AllocatedNum::alloc(
cs.namespace(|| "x"),
|| {
Ok(coords[*i.get()?].0)
}
)?;
let res_x = AllocatedNum::alloc(cs.namespace(|| "x"), || Ok(coords[*i.get()?].0))?;
// Allocate the y-coordinate resulting from the lookup
let res_y = AllocatedNum::alloc(
cs.namespace(|| "y"),
|| {
Ok(coords[*i.get()?].1)
}
)?;
let res_y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(coords[*i.get()?].1))?;
// Compute the coefficients for the lookup constraints
let mut x_coeffs = [E::Fr::zero(); 8];
@@ -93,30 +79,38 @@ pub fn lookup3_xy<E: Engine, CS>(
cs.enforce(
|| "x-coordinate lookup",
|lc| lc + (x_coeffs[0b001], one)
|lc| {
lc + (x_coeffs[0b001], one)
+ &bits[1].lc::<E>(one, x_coeffs[0b011])
+ &bits[2].lc::<E>(one, x_coeffs[0b101])
+ &precomp.lc::<E>(one, x_coeffs[0b111]),
+ &precomp.lc::<E>(one, x_coeffs[0b111])
},
|lc| lc + &bits[0].lc::<E>(one, E::Fr::one()),
|lc| lc + res_x.get_variable()
|lc| {
lc + res_x.get_variable()
- (x_coeffs[0b000], one)
- &bits[1].lc::<E>(one, x_coeffs[0b010])
- &bits[2].lc::<E>(one, x_coeffs[0b100])
- &precomp.lc::<E>(one, x_coeffs[0b110]),
- &precomp.lc::<E>(one, x_coeffs[0b110])
},
);
cs.enforce(
|| "y-coordinate lookup",
|lc| lc + (y_coeffs[0b001], one)
|lc| {
lc + (y_coeffs[0b001], one)
+ &bits[1].lc::<E>(one, y_coeffs[0b011])
+ &bits[2].lc::<E>(one, y_coeffs[0b101])
+ &precomp.lc::<E>(one, y_coeffs[0b111]),
+ &precomp.lc::<E>(one, y_coeffs[0b111])
},
|lc| lc + &bits[0].lc::<E>(one, E::Fr::one()),
|lc| lc + res_y.get_variable()
|lc| {
lc + res_y.get_variable()
- (y_coeffs[0b000], one)
- &bits[1].lc::<E>(one, y_coeffs[0b010])
- &bits[2].lc::<E>(one, y_coeffs[0b100])
- &precomp.lc::<E>(one, y_coeffs[0b110]),
- &precomp.lc::<E>(one, y_coeffs[0b110])
},
);
Ok((res_x, res_y))
@@ -127,16 +121,16 @@ pub fn lookup3_xy<E: Engine, CS>(
pub fn lookup3_xy_with_conditional_negation<E: Engine, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(E::Fr, E::Fr)]
coords: &[(E::Fr, E::Fr)],
) -> Result<(Num<E>, Num<E>), SynthesisError>
where CS: ConstraintSystem<E>
where
CS: ConstraintSystem<E>,
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 4);
// Calculate the index into `coords`
let i =
match (bits[0].get_value(), bits[1].get_value()) {
let i = match (bits[0].get_value(), bits[1].get_value()) {
(Some(a_value), Some(b_value)) => {
let mut tmp = 0;
if a_value {
@@ -146,22 +140,19 @@ pub fn lookup3_xy_with_conditional_negation<E: Engine, CS>(
tmp += 2;
}
Some(tmp)
},
_ => None
}
_ => None,
};
// Allocate the y-coordinate resulting from the lookup
// and conditional negation
let y = AllocatedNum::alloc(
cs.namespace(|| "y"),
|| {
let mut tmp = coords[*i.get()?].1;
if *bits[2].get_value().get()? {
tmp.negate();
}
Ok(tmp)
let y = AllocatedNum::alloc(cs.namespace(|| "y"), || {
let mut tmp = coords[*i.get()?].1;
if *bits[2].get_value().get()? {
tmp.negate();
}
)?;
Ok(tmp)
})?;
let one = CS::one();
@@ -174,21 +165,21 @@ pub fn lookup3_xy_with_conditional_negation<E: Engine, CS>(
let precomp = Boolean::and(cs.namespace(|| "precomp"), &bits[0], &bits[1])?;
let x = Num::zero()
.add_bool_with_coeff(one, &Boolean::constant(true), x_coeffs[0b00])
.add_bool_with_coeff(one, &bits[0], x_coeffs[0b01])
.add_bool_with_coeff(one, &bits[1], x_coeffs[0b10])
.add_bool_with_coeff(one, &precomp, x_coeffs[0b11]);
.add_bool_with_coeff(one, &Boolean::constant(true), x_coeffs[0b00])
.add_bool_with_coeff(one, &bits[0], x_coeffs[0b01])
.add_bool_with_coeff(one, &bits[1], x_coeffs[0b10])
.add_bool_with_coeff(one, &precomp, x_coeffs[0b11]);
let y_lc = precomp.lc::<E>(one, y_coeffs[0b11]) +
&bits[1].lc::<E>(one, y_coeffs[0b10]) +
&bits[0].lc::<E>(one, y_coeffs[0b01]) +
(y_coeffs[0b00], one);
let y_lc = precomp.lc::<E>(one, y_coeffs[0b11])
+ &bits[1].lc::<E>(one, y_coeffs[0b10])
+ &bits[0].lc::<E>(one, y_coeffs[0b01])
+ (y_coeffs[0b00], one);
cs.enforce(
|| "y-coordinate lookup",
|lc| lc + &y_lc + &y_lc,
|lc| lc + &bits[2].lc::<E>(one, E::Fr::one()),
|lc| lc + &y_lc - y.get_variable()
|lc| lc + &y_lc - y.get_variable(),
);
Ok((x, y.into()))
@@ -196,46 +187,52 @@ pub fn lookup3_xy_with_conditional_negation<E: Engine, CS>(
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rand, Rng, XorShiftRng};
use super::*;
use ::circuit::test::*;
use ::circuit::boolean::{Boolean, AllocatedBit};
use crate::gadgets::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::*;
use pairing::bls12_381::{Bls12, Fr};
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_lookup3_xy() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0656]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a_val = rng.gen();
let a = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap()
);
let a_val = rng.next_u32() % 2 != 0;
let a = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap());
let b_val = rng.gen();
let b = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap()
);
let b_val = rng.next_u32() % 2 != 0;
let b = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap());
let c_val = rng.gen();
let c = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap()
);
let c_val = rng.next_u32() % 2 != 0;
let c = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap());
let bits = vec![a, b, c];
let points: Vec<(Fr, Fr)> = (0..8).map(|_| (rng.gen(), rng.gen())).collect();
let points: Vec<(Fr, Fr)> = (0..8)
.map(|_| (Fr::random(&mut rng), Fr::random(&mut rng)))
.collect();
let res = lookup3_xy(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val { index += 1 }
if b_val { index += 2 }
if c_val { index += 4 }
if a_val {
index += 1
}
if b_val {
index += 2
}
if c_val {
index += 4
}
assert_eq!(res.0.get_value().unwrap(), points[index].0);
assert_eq!(res.1.get_value().unwrap(), points[index].1);
@@ -244,53 +241,63 @@ mod test {
#[test]
fn test_lookup3_xy_with_conditional_negation() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a_val = rng.gen();
let a = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap()
);
let a_val = rng.next_u32() % 2 != 0;
let a = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap());
let b_val = rng.gen();
let b = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap()
);
let b_val = rng.next_u32() % 2 != 0;
let b = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap());
let c_val = rng.gen();
let c = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap()
);
let c_val = rng.next_u32() % 2 != 0;
let c = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap());
let bits = vec![a, b, c];
let points: Vec<(Fr, Fr)> = (0..4).map(|_| (rng.gen(), rng.gen())).collect();
let points: Vec<(Fr, Fr)> = (0..4)
.map(|_| (Fr::random(&mut rng), Fr::random(&mut rng)))
.collect();
let res = lookup3_xy_with_conditional_negation(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val { index += 1 }
if b_val { index += 2 }
if a_val {
index += 1
}
if b_val {
index += 2
}
assert_eq!(res.0.get_value().unwrap(), points[index].0);
let mut tmp = points[index].1;
if c_val { tmp.negate() }
if c_val {
tmp.negate()
}
assert_eq!(res.1.get_value().unwrap(), tmp);
}
}
#[test]
fn test_synth() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let window_size = 4;
let mut assignment = vec![Fr::zero(); 1 << window_size];
let constants: Vec<_> = (0..(1 << window_size)).map(|_| Fr::rand(&mut rng)).collect();
let constants: Vec<_> = (0..(1 << window_size))
.map(|_| Fr::random(&mut rng))
.collect();
synth::<Bls12, _>(window_size, &constants, &mut assignment);

View File

@@ -1,14 +1,9 @@
use ff::{Field, PrimeField};
use pairing::Engine;
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination,
Variable
};
use crate::{ConstraintSystem, LinearCombination, SynthesisError, Variable};
pub struct MultiEq<E: Engine, CS: ConstraintSystem<E>>{
pub struct MultiEq<E: Engine, CS: ConstraintSystem<E>> {
cs: CS,
ops: usize,
bits_used: usize,
@@ -23,12 +18,11 @@ impl<E: Engine, CS: ConstraintSystem<E>> MultiEq<E, CS> {
ops: 0,
bits_used: 0,
lhs: LinearCombination::zero(),
rhs: LinearCombination::zero()
rhs: LinearCombination::zero(),
}
}
fn accumulate(&mut self)
{
fn accumulate(&mut self) {
let ops = self.ops;
let lhs = self.lhs.clone();
let rhs = self.rhs.clone();
@@ -36,7 +30,7 @@ impl<E: Engine, CS: ConstraintSystem<E>> MultiEq<E, CS> {
|| format!("multieq {}", ops),
|_| lhs,
|lc| lc + CS::one(),
|_| rhs
|_| rhs,
);
self.lhs = LinearCombination::zero();
self.rhs = LinearCombination::zero();
@@ -48,9 +42,8 @@ impl<E: Engine, CS: ConstraintSystem<E>> MultiEq<E, CS> {
&mut self,
num_bits: usize,
lhs: &LinearCombination<E>,
rhs: &LinearCombination<E>
)
{
rhs: &LinearCombination<E>,
) {
// Check if we will exceed the capacity
if (E::Fr::CAPACITY as usize) <= (self.bits_used + num_bits) {
self.accumulate();
@@ -68,67 +61,60 @@ impl<E: Engine, CS: ConstraintSystem<E>> MultiEq<E, CS> {
impl<E: Engine, CS: ConstraintSystem<E>> Drop for MultiEq<E, CS> {
fn drop(&mut self) {
if self.bits_used > 0 {
self.accumulate();
self.accumulate();
}
}
}
impl<E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for MultiEq<E, CS>
{
impl<E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for MultiEq<E, CS> {
type Root = Self;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.cs.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.cs.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
self.cs.enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.get_root().push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
self.cs.get_root().pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
fn get_root(&mut self) -> &mut Self::Root {
self
}
}

View File

@@ -0,0 +1,110 @@
use super::boolean::Boolean;
use super::num::Num;
use super::Assignment;
use crate::{ConstraintSystem, SynthesisError};
use ff::{Field, PrimeField};
use pairing::Engine;
/// Takes a sequence of booleans and exposes them as compact
/// public inputs
pub fn pack_into_inputs<E, CS>(mut cs: CS, bits: &[Boolean]) -> Result<(), SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
for (i, bits) in bits.chunks(E::Fr::CAPACITY as usize).enumerate() {
let mut num = Num::<E>::zero();
let mut coeff = E::Fr::one();
for bit in bits {
num = num.add_bool_with_coeff(CS::one(), bit, coeff);
coeff.double();
}
let input = cs.alloc_input(|| format!("input {}", i), || Ok(*num.get_value().get()?))?;
// num * 1 = input
cs.enforce(
|| format!("packing constraint {}", i),
|_| num.lc(E::Fr::one()),
|lc| lc + CS::one(),
|lc| lc + input,
);
}
Ok(())
}
pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool> {
bytes
.iter()
.flat_map(|&v| (0..8).rev().map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn bytes_to_bits_le(bytes: &[u8]) -> Vec<bool> {
bytes
.iter()
.flat_map(|&v| (0..8).map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn compute_multipacking<E: Engine>(bits: &[bool]) -> Vec<E::Fr> {
let mut result = vec![];
for bits in bits.chunks(E::Fr::CAPACITY as usize) {
let mut cur = E::Fr::zero();
let mut coeff = E::Fr::one();
for bit in bits {
if *bit {
cur.add_assign(&coeff);
}
coeff.double();
}
result.push(cur);
}
result
}
#[test]
fn test_multipacking() {
use crate::ConstraintSystem;
use pairing::bls12_381::Bls12;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use super::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::*;
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for num_bits in 0..1500 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let bits: Vec<bool> = (0..num_bits).map(|_| rng.next_u32() % 2 != 0).collect();
let circuit_bits = bits
.iter()
.enumerate()
.map(|(i, &b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("bit {}", i)), Some(b)).unwrap(),
)
})
.collect::<Vec<_>>();
let expected_inputs = compute_multipacking::<Bls12>(&bits);
pack_into_inputs(cs.namespace(|| "pack"), &circuit_bits).unwrap();
assert!(cs.is_satisfied());
assert!(cs.verify(&expected_inputs));
}
}

View File

@@ -1,78 +1,61 @@
use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr};
use pairing::Engine;
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination,
Variable
};
use crate::{ConstraintSystem, LinearCombination, SynthesisError, Variable};
use super::{
Assignment
};
use super::Assignment;
use super::boolean::{
self,
Boolean,
AllocatedBit
};
use super::boolean::{self, AllocatedBit, Boolean};
pub struct AllocatedNum<E: Engine> {
value: Option<E::Fr>,
variable: Variable
variable: Variable,
}
impl<E: Engine> Clone for AllocatedNum<E> {
fn clone(&self) -> Self {
AllocatedNum {
value: self.value,
variable: self.variable
variable: self.variable,
}
}
}
impl<E: Engine> AllocatedNum<E> {
pub fn alloc<CS, F>(
mut cs: CS,
value: F,
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>,
F: FnOnce() -> Result<E::Fr, SynthesisError>
pub fn alloc<CS, F>(mut cs: CS, value: F) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<E>,
F: FnOnce() -> Result<E::Fr, SynthesisError>,
{
let mut new_value = None;
let var = cs.alloc(|| "num", || {
let tmp = value()?;
let var = cs.alloc(
|| "num",
|| {
let tmp = value()?;
new_value = Some(tmp);
new_value = Some(tmp);
Ok(tmp)
})?;
Ok(tmp)
},
)?;
Ok(AllocatedNum {
value: new_value,
variable: var
variable: var,
})
}
pub fn inputize<CS>(
&self,
mut cs: CS
) -> Result<(), SynthesisError>
where CS: ConstraintSystem<E>
pub fn inputize<CS>(&self, mut cs: CS) -> Result<(), SynthesisError>
where
CS: ConstraintSystem<E>,
{
let input = cs.alloc_input(
|| "input variable",
|| {
Ok(*self.value.get()?)
}
)?;
let input = cs.alloc_input(|| "input variable", || Ok(*self.value.get()?))?;
cs.enforce(
|| "enforce input is correct",
|lc| lc + input,
|lc| lc + CS::one(),
|lc| lc + self.variable
|lc| lc + self.variable,
);
Ok(())
@@ -83,18 +66,17 @@ impl<E: Engine> AllocatedNum<E> {
/// order, requiring that the representation
/// strictly exists "in the field" (i.e., a
/// congruency is not allowed.)
pub fn into_bits_le_strict<CS>(
&self,
mut cs: CS
) -> Result<Vec<Boolean>, SynthesisError>
where CS: ConstraintSystem<E>
pub fn into_bits_le_strict<CS>(&self, mut cs: CS) -> Result<Vec<Boolean>, SynthesisError>
where
CS: ConstraintSystem<E>,
{
pub fn kary_and<E, CS>(
mut cs: CS,
v: &[AllocatedBit]
v: &[AllocatedBit],
) -> Result<AllocatedBit, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
where
E: Engine,
CS: ConstraintSystem<E>,
{
assert!(v.len() > 0);
@@ -109,7 +91,7 @@ impl<E: Engine> AllocatedNum<E> {
cur = Some(AllocatedBit::and(
cs.namespace(|| format!("and {}", i)),
cur.as_ref().unwrap(),
v
v,
)?);
}
}
@@ -145,10 +127,7 @@ impl<E: Engine> AllocatedNum<E> {
if b {
// This is part of a run of ones. Let's just
// allocate the boolean with the expected value.
let a_bit = AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
a_bit
)?;
let a_bit = AllocatedBit::alloc(cs.namespace(|| format!("bit {}", i)), a_bit)?;
// ... and add it to the current run of ones.
current_run.push(a_bit.clone());
result.push(a_bit);
@@ -162,7 +141,7 @@ impl<E: Engine> AllocatedNum<E> {
}
last_run = Some(kary_and(
cs.namespace(|| format!("run ending at {}", i)),
&current_run
&current_run,
)?);
current_run.truncate(0);
}
@@ -175,7 +154,7 @@ impl<E: Engine> AllocatedNum<E> {
let a_bit = AllocatedBit::alloc_conditionally(
cs.namespace(|| format!("bit {}", i)),
a_bit,
&last_run.as_ref().expect("char always starts with a one")
&last_run.as_ref().expect("char always starts with a one"),
)?;
result.push(a_bit);
}
@@ -201,12 +180,7 @@ impl<E: Engine> AllocatedNum<E> {
lc = lc - self.variable;
cs.enforce(
|| "unpacking constraint",
|lc| lc,
|lc| lc,
|_| lc
);
cs.enforce(|| "unpacking constraint", |lc| lc, |lc| lc, |_| lc);
// Convert into booleans, and reverse for little-endian bit order
Ok(result.into_iter().map(|b| Boolean::from(b)).rev().collect())
@@ -215,16 +189,11 @@ impl<E: Engine> AllocatedNum<E> {
/// Convert the allocated number into its little-endian representation.
/// Note that this does not strongly enforce that the commitment is
/// "in the field."
pub fn into_bits_le<CS>(
&self,
mut cs: CS
) -> Result<Vec<Boolean>, SynthesisError>
where CS: ConstraintSystem<E>
pub fn into_bits_le<CS>(&self, mut cs: CS) -> Result<Vec<Boolean>, SynthesisError>
where
CS: ConstraintSystem<E>,
{
let bits = boolean::field_into_allocated_bits_le(
&mut cs,
self.value
)?;
let bits = boolean::field_into_allocated_bits_le(&mut cs, self.value)?;
let mut lc = LinearCombination::zero();
let mut coeff = E::Fr::one();
@@ -237,94 +206,91 @@ impl<E: Engine> AllocatedNum<E> {
lc = lc - self.variable;
cs.enforce(
|| "unpacking constraint",
|lc| lc,
|lc| lc,
|_| lc
);
cs.enforce(|| "unpacking constraint", |lc| lc, |lc| lc, |_| lc);
Ok(bits.into_iter().map(|b| Boolean::from(b)).collect())
}
pub fn mul<CS>(
&self,
mut cs: CS,
other: &Self
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
pub fn mul<CS>(&self, mut cs: CS, other: &Self) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<E>,
{
let mut value = None;
let var = cs.alloc(|| "product num", || {
let mut tmp = *self.value.get()?;
tmp.mul_assign(other.value.get()?);
let var = cs.alloc(
|| "product num",
|| {
let mut tmp = *self.value.get()?;
tmp.mul_assign(other.value.get()?);
value = Some(tmp);
value = Some(tmp);
Ok(tmp)
})?;
Ok(tmp)
},
)?;
// Constrain: a * b = ab
cs.enforce(
|| "multiplication constraint",
|lc| lc + self.variable,
|lc| lc + other.variable,
|lc| lc + var
|lc| lc + var,
);
Ok(AllocatedNum {
value: value,
variable: var
variable: var,
})
}
pub fn square<CS>(
&self,
mut cs: CS
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
pub fn square<CS>(&self, mut cs: CS) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<E>,
{
let mut value = None;
let var = cs.alloc(|| "squared num", || {
let mut tmp = *self.value.get()?;
tmp.square();
let var = cs.alloc(
|| "squared num",
|| {
let mut tmp = *self.value.get()?;
tmp.square();
value = Some(tmp);
value = Some(tmp);
Ok(tmp)
})?;
Ok(tmp)
},
)?;
// Constrain: a * a = aa
cs.enforce(
|| "squaring constraint",
|lc| lc + self.variable,
|lc| lc + self.variable,
|lc| lc + var
|lc| lc + var,
);
Ok(AllocatedNum {
value: value,
variable: var
variable: var,
})
}
pub fn assert_nonzero<CS>(
&self,
mut cs: CS
) -> Result<(), SynthesisError>
where CS: ConstraintSystem<E>
pub fn assert_nonzero<CS>(&self, mut cs: CS) -> Result<(), SynthesisError>
where
CS: ConstraintSystem<E>,
{
let inv = cs.alloc(|| "ephemeral inverse", || {
let tmp = *self.value.get()?;
if tmp.is_zero() {
Err(SynthesisError::DivisionByZero)
} else {
Ok(tmp.inverse().unwrap())
}
})?;
let inv = cs.alloc(
|| "ephemeral inverse",
|| {
let tmp = *self.value.get()?;
if tmp.is_zero() {
Err(SynthesisError::DivisionByZero)
} else {
Ok(tmp.inverse().unwrap())
}
},
)?;
// Constrain a * inv = 1, which is only valid
// iff a has a multiplicative inverse, untrue
@@ -333,7 +299,7 @@ impl<E: Engine> AllocatedNum<E> {
|| "nonzero assertion constraint",
|lc| lc + self.variable,
|lc| lc + inv,
|lc| lc + CS::one()
|lc| lc + CS::one(),
);
Ok(())
@@ -346,44 +312,39 @@ impl<E: Engine> AllocatedNum<E> {
mut cs: CS,
a: &Self,
b: &Self,
condition: &Boolean
condition: &Boolean,
) -> Result<(Self, Self), SynthesisError>
where CS: ConstraintSystem<E>
where
CS: ConstraintSystem<E>,
{
let c = Self::alloc(
cs.namespace(|| "conditional reversal result 1"),
|| {
if *condition.get_value().get()? {
Ok(*b.value.get()?)
} else {
Ok(*a.value.get()?)
}
let c = Self::alloc(cs.namespace(|| "conditional reversal result 1"), || {
if *condition.get_value().get()? {
Ok(*b.value.get()?)
} else {
Ok(*a.value.get()?)
}
)?;
})?;
cs.enforce(
|| "first conditional reversal",
|lc| lc + a.variable - b.variable,
|_| condition.lc(CS::one(), E::Fr::one()),
|lc| lc + a.variable - c.variable
|lc| lc + a.variable - c.variable,
);
let d = Self::alloc(
cs.namespace(|| "conditional reversal result 2"),
|| {
if *condition.get_value().get()? {
Ok(*a.value.get()?)
} else {
Ok(*b.value.get()?)
}
let d = Self::alloc(cs.namespace(|| "conditional reversal result 2"), || {
if *condition.get_value().get()? {
Ok(*a.value.get()?)
} else {
Ok(*b.value.get()?)
}
)?;
})?;
cs.enforce(
|| "second conditional reversal",
|lc| lc + b.variable - a.variable,
|_| condition.lc(CS::one(), E::Fr::one()),
|lc| lc + b.variable - d.variable
|lc| lc + b.variable - d.variable,
);
Ok((c, d))
@@ -400,14 +361,14 @@ impl<E: Engine> AllocatedNum<E> {
pub struct Num<E: Engine> {
value: Option<E::Fr>,
lc: LinearCombination<E>
lc: LinearCombination<E>,
}
impl<E: Engine> From<AllocatedNum<E>> for Num<E> {
fn from(num: AllocatedNum<E>) -> Num<E> {
Num {
value: num.value,
lc: LinearCombination::<E>::zero() + num.variable
lc: LinearCombination::<E>::zero() + num.variable,
}
}
}
@@ -416,7 +377,7 @@ impl<E: Engine> Num<E> {
pub fn zero() -> Self {
Num {
value: Some(E::Fr::zero()),
lc: LinearCombination::zero()
lc: LinearCombination::zero(),
}
}
@@ -428,13 +389,7 @@ impl<E: Engine> Num<E> {
LinearCombination::zero() + (coeff, &self.lc)
}
pub fn add_bool_with_coeff(
self,
one: Variable,
bit: &Boolean,
coeff: E::Fr
) -> Self
{
pub fn add_bool_with_coeff(self, one: Variable, bit: &Boolean, coeff: E::Fr) -> Self {
let newval = match (self.value, bit.get_value()) {
(Some(mut curval), Some(bval)) => {
if bval {
@@ -442,25 +397,27 @@ impl<E: Engine> Num<E> {
}
Some(curval)
},
_ => None
}
_ => None,
};
Num {
value: newval,
lc: self.lc + &bit.lc(one, coeff)
lc: self.lc + &bit.lc(one, coeff),
}
}
}
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rand, Rng, XorShiftRng};
use bellman::{ConstraintSystem};
use crate::ConstraintSystem;
use ff::{BitIterator, Field, PrimeField};
use pairing::bls12_381::{Bls12, Fr};
use ::circuit::test::*;
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
use super::{AllocatedNum, Boolean};
use crate::gadgets::test::*;
#[test]
fn test_allocated_num() {
@@ -489,8 +446,10 @@ mod test {
fn test_num_multiplication() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Fr::from_str("12").unwrap())).unwrap();
let n2 = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Fr::from_str("10").unwrap())).unwrap();
let n =
AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Fr::from_str("12").unwrap())).unwrap();
let n2 =
AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Fr::from_str("10").unwrap())).unwrap();
let n3 = n.mul(&mut cs, &n2).unwrap();
assert!(cs.is_satisfied());
@@ -502,12 +461,15 @@ mod test {
#[test]
fn test_num_conditional_reversal() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(rng.gen())).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(rng.gen())).unwrap();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Fr::random(&mut rng))).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Fr::random(&mut rng))).unwrap();
let condition = Boolean::constant(false);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
@@ -520,8 +482,8 @@ mod test {
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(rng.gen())).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(rng.gen())).unwrap();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Fr::random(&mut rng))).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Fr::random(&mut rng))).unwrap();
let condition = Boolean::constant(true);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
@@ -568,15 +530,21 @@ mod test {
cs.set("bit 254/boolean", Fr::one());
// this makes the conditional boolean constraint fail
assert_eq!(cs.which_is_unsatisfied().unwrap(), "bit 254/boolean constraint");
assert_eq!(
cs.which_is_unsatisfied().unwrap(),
"bit 254/boolean constraint"
);
}
#[test]
fn test_into_bits() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for i in 0..200 {
let r = Fr::rand(&mut rng);
let r = Fr::random(&mut rng);
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(r)).unwrap();
@@ -589,7 +557,10 @@ mod test {
assert!(cs.is_satisfied());
for (b, a) in BitIterator::new(r.into_repr()).skip(1).zip(bits.iter().rev()) {
for (b, a) in BitIterator::new(r.into_repr())
.skip(1)
.zip(bits.iter().rev())
{
if let &Boolean::Is(ref a) = a {
assert_eq!(b, a.get_value().unwrap());
} else {
@@ -597,7 +568,7 @@ mod test {
}
}
cs.set("num", Fr::rand(&mut rng));
cs.set("num", Fr::random(&mut rng));
assert!(!cs.is_satisfied());
cs.set("num", r);
assert!(cs.is_satisfied());

View File

@@ -1,7 +1,7 @@
use super::uint32::UInt32;
use super::multieq::MultiEq;
use super::boolean::Boolean;
use bellman::{ConstraintSystem, SynthesisError};
use super::multieq::MultiEq;
use super::uint32::UInt32;
use crate::{ConstraintSystem, SynthesisError};
use pairing::Engine;
const ROUND_CONSTANTS: [u32; 64] = [
@@ -12,37 +12,35 @@ const ROUND_CONSTANTS: [u32; 64] = [
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
];
const IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
pub fn sha256_block_no_padding<E, CS>(
mut cs: CS,
input: &[Boolean]
input: &[Boolean],
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
where
E: Engine,
CS: ConstraintSystem<E>,
{
assert_eq!(input.len(), 512);
Ok(sha256_compression_function(
&mut cs,
&input,
&get_sha256_iv()
)?
.into_iter()
.flat_map(|e| e.into_bits_be())
.collect())
Ok(
sha256_compression_function(&mut cs, &input, &get_sha256_iv())?
.into_iter()
.flat_map(|e| e.into_bits_be())
.collect(),
)
}
pub fn sha256<E, CS>(
mut cs: CS,
input: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
pub fn sha256<E, CS>(mut cs: CS, input: &[Boolean]) -> Result<Vec<Boolean>, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
assert!(input.len() % 8 == 0);
@@ -62,16 +60,10 @@ pub fn sha256<E, CS>(
let mut cur = get_sha256_iv();
for (i, block) in padded.chunks(512).enumerate() {
cur = sha256_compression_function(
cs.namespace(|| format!("block {}", i)),
block,
&cur
)?;
cur = sha256_compression_function(cs.namespace(|| format!("block {}", i)), block, &cur)?;
}
Ok(cur.into_iter()
.flat_map(|e| e.into_bits_be())
.collect())
Ok(cur.into_iter().flat_map(|e| e.into_bits_be()).collect())
}
fn get_sha256_iv() -> Vec<UInt32> {
@@ -81,16 +73,19 @@ fn get_sha256_iv() -> Vec<UInt32> {
fn sha256_compression_function<E, CS>(
cs: CS,
input: &[Boolean],
current_hash_value: &[UInt32]
current_hash_value: &[UInt32],
) -> Result<Vec<UInt32>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
where
E: Engine,
CS: ConstraintSystem<E>,
{
assert_eq!(input.len(), 512);
assert_eq!(current_hash_value.len(), 8);
let mut w = input.chunks(32)
.map(|e| UInt32::from_bits_be(e))
.collect::<Vec<_>>();
let mut w = input
.chunks(32)
.map(|e| UInt32::from_bits_be(e))
.collect::<Vec<_>>();
// We can save some constraints by combining some of
// the constraints in different u32 additions
@@ -100,30 +95,18 @@ fn sha256_compression_function<E, CS>(
let cs = &mut cs.namespace(|| format!("w extension {}", i));
// s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3)
let mut s0 = w[i-15].rotr(7);
s0 = s0.xor(
cs.namespace(|| "first xor for s0"),
&w[i-15].rotr(18)
)?;
s0 = s0.xor(
cs.namespace(|| "second xor for s0"),
&w[i-15].shr(3)
)?;
let mut s0 = w[i - 15].rotr(7);
s0 = s0.xor(cs.namespace(|| "first xor for s0"), &w[i - 15].rotr(18))?;
s0 = s0.xor(cs.namespace(|| "second xor for s0"), &w[i - 15].shr(3))?;
// s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10)
let mut s1 = w[i-2].rotr(17);
s1 = s1.xor(
cs.namespace(|| "first xor for s1"),
&w[i-2].rotr(19)
)?;
s1 = s1.xor(
cs.namespace(|| "second xor for s1"),
&w[i-2].shr(10)
)?;
let mut s1 = w[i - 2].rotr(17);
s1 = s1.xor(cs.namespace(|| "first xor for s1"), &w[i - 2].rotr(19))?;
s1 = s1.xor(cs.namespace(|| "second xor for s1"), &w[i - 2].shr(10))?;
let tmp = UInt32::addmany(
cs.namespace(|| "computation of w[i]"),
&[w[i-16].clone(), s0, w[i-7].clone(), s1]
&[w[i - 16].clone(), s0, w[i - 7].clone(), s1],
)?;
// w[i] := w[i-16] + s0 + w[i-7] + s1
@@ -134,29 +117,21 @@ fn sha256_compression_function<E, CS>(
enum Maybe {
Deferred(Vec<UInt32>),
Concrete(UInt32)
Concrete(UInt32),
}
impl Maybe {
fn compute<E, CS, M>(
self,
cs: M,
others: &[UInt32]
) -> Result<UInt32, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root=MultiEq<E, CS>>
fn compute<E, CS, M>(self, cs: M, others: &[UInt32]) -> Result<UInt32, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root = MultiEq<E, CS>>,
{
Ok(match self {
Maybe::Concrete(ref v) => {
return Ok(v.clone())
},
Maybe::Concrete(ref v) => return Ok(v.clone()),
Maybe::Deferred(mut v) => {
v.extend(others.into_iter().cloned());
UInt32::addmany(
cs,
&v
)?
UInt32::addmany(cs, &v)?
}
})
}
@@ -177,22 +152,11 @@ fn sha256_compression_function<E, CS>(
// S1 := (e rightrotate 6) xor (e rightrotate 11) xor (e rightrotate 25)
let new_e = e.compute(cs.namespace(|| "deferred e computation"), &[])?;
let mut s1 = new_e.rotr(6);
s1 = s1.xor(
cs.namespace(|| "first xor for s1"),
&new_e.rotr(11)
)?;
s1 = s1.xor(
cs.namespace(|| "second xor for s1"),
&new_e.rotr(25)
)?;
s1 = s1.xor(cs.namespace(|| "first xor for s1"), &new_e.rotr(11))?;
s1 = s1.xor(cs.namespace(|| "second xor for s1"), &new_e.rotr(25))?;
// ch := (e and f) xor ((not e) and g)
let ch = UInt32::sha256_ch(
cs.namespace(|| "ch"),
&new_e,
&f,
&g
)?;
let ch = UInt32::sha256_ch(cs.namespace(|| "ch"), &new_e, &f, &g)?;
// temp1 := h + S1 + ch + k[i] + w[i]
let temp1 = vec![
@@ -200,28 +164,17 @@ fn sha256_compression_function<E, CS>(
s1,
ch,
UInt32::constant(ROUND_CONSTANTS[i]),
w[i].clone()
w[i].clone(),
];
// S0 := (a rightrotate 2) xor (a rightrotate 13) xor (a rightrotate 22)
let new_a = a.compute(cs.namespace(|| "deferred a computation"), &[])?;
let mut s0 = new_a.rotr(2);
s0 = s0.xor(
cs.namespace(|| "first xor for s0"),
&new_a.rotr(13)
)?;
s0 = s0.xor(
cs.namespace(|| "second xor for s0"),
&new_a.rotr(22)
)?;
s0 = s0.xor(cs.namespace(|| "first xor for s0"), &new_a.rotr(13))?;
s0 = s0.xor(cs.namespace(|| "second xor for s0"), &new_a.rotr(22))?;
// maj := (a and b) xor (a and c) xor (b and c)
let maj = UInt32::sha256_maj(
cs.namespace(|| "maj"),
&new_a,
&b,
&c
)?;
let maj = UInt32::sha256_maj(cs.namespace(|| "maj"), &new_a, &b, &c)?;
// temp2 := S0 + maj
let temp2 = vec![s0, maj];
@@ -244,7 +197,13 @@ fn sha256_compression_function<E, CS>(
d = c;
c = b;
b = new_a;
a = Maybe::Deferred(temp1.iter().cloned().chain(temp2.iter().cloned()).collect::<Vec<_>>());
a = Maybe::Deferred(
temp1
.iter()
.cloned()
.chain(temp2.iter().cloned())
.collect::<Vec<_>>(),
);
}
/*
@@ -261,42 +220,42 @@ fn sha256_compression_function<E, CS>(
let h0 = a.compute(
cs.namespace(|| "deferred h0 computation"),
&[current_hash_value[0].clone()]
&[current_hash_value[0].clone()],
)?;
let h1 = UInt32::addmany(
cs.namespace(|| "new h1"),
&[current_hash_value[1].clone(), b]
&[current_hash_value[1].clone(), b],
)?;
let h2 = UInt32::addmany(
cs.namespace(|| "new h2"),
&[current_hash_value[2].clone(), c]
&[current_hash_value[2].clone(), c],
)?;
let h3 = UInt32::addmany(
cs.namespace(|| "new h3"),
&[current_hash_value[3].clone(), d]
&[current_hash_value[3].clone(), d],
)?;
let h4 = e.compute(
cs.namespace(|| "deferred h4 computation"),
&[current_hash_value[4].clone()]
&[current_hash_value[4].clone()],
)?;
let h5 = UInt32::addmany(
cs.namespace(|| "new h5"),
&[current_hash_value[5].clone(), f]
&[current_hash_value[5].clone(), f],
)?;
let h6 = UInt32::addmany(
cs.namespace(|| "new h6"),
&[current_hash_value[6].clone(), g]
&[current_hash_value[6].clone(), g],
)?;
let h7 = UInt32::addmany(
cs.namespace(|| "new h7"),
&[current_hash_value[7].clone(), h]
&[current_hash_value[7].clone(), h],
)?;
Ok(vec![h0, h1, h2, h3, h4, h5, h6, h7])
@@ -305,10 +264,11 @@ fn sha256_compression_function<E, CS>(
#[cfg(test)]
mod test {
use super::*;
use circuit::boolean::AllocatedBit;
use crate::gadgets::boolean::AllocatedBit;
use crate::gadgets::test::TestConstraintSystem;
use pairing::bls12_381::Bls12;
use circuit::test::TestConstraintSystem;
use rand::{XorShiftRng, SeedableRng, Rng};
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_blank_hash() {
@@ -317,11 +277,7 @@ mod test {
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut input_bits: Vec<_> = (0..512).map(|_| Boolean::Constant(false)).collect();
input_bits[0] = Boolean::Constant(true);
let out = sha256_compression_function(
&mut cs,
&input_bits,
&iv
).unwrap();
let out = sha256_compression_function(&mut cs, &input_bits, &iv).unwrap();
let out_bits: Vec<_> = out.into_iter().flat_map(|e| e.into_bits_be()).collect();
assert!(cs.is_satisfied());
@@ -341,25 +297,27 @@ mod test {
#[test]
fn test_full_block() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let iv = get_sha256_iv();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bits: Vec<_> = (0..512).map(|i| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("input bit {}", i)),
Some(rng.gen())
).unwrap()
)
}).collect();
let input_bits: Vec<_> = (0..512)
.map(|i| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("input bit {}", i)),
Some(rng.next_u32() % 2 != 0),
)
.unwrap(),
)
})
.collect();
sha256_compression_function(
cs.namespace(|| "sha256"),
&input_bits,
&iv
).unwrap();
sha256_compression_function(cs.namespace(|| "sha256"), &input_bits, &iv).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints() - 512, 25840);
@@ -367,18 +325,18 @@ mod test {
#[test]
fn test_against_vectors() {
use crypto::sha2::Sha256;
use crypto::digest::Digest;
use sha2::{Digest, Sha256};
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0))
{
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0)) {
let mut h = Sha256::new();
let data: Vec<u8> = (0..input_len).map(|_| rng.gen()).collect();
let data: Vec<u8> = (0..input_len).map(|_| rng.next_u32() as u8).collect();
h.input(&data);
let mut hash_result = [0u8; 32];
h.result(&mut hash_result[..]);
let hash_result = h.result();
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut input_bits = vec![];
@@ -387,7 +345,11 @@ mod test {
for bit_i in (0..8).rev() {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8)).unwrap().into());
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
@@ -395,17 +357,19 @@ mod test {
assert!(cs.is_satisfied());
let mut s = hash_result.as_ref().iter()
.flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8));
let mut s = hash_result
.as_ref()
.iter()
.flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
},
}
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
},
}
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);

View File

@@ -1,13 +1,7 @@
use ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::Engine;
use bellman::{
LinearCombination,
SynthesisError,
ConstraintSystem,
Variable,
Index
};
use crate::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use std::collections::HashMap;
use std::fmt::Write;
@@ -16,13 +10,13 @@ use byteorder::{BigEndian, ByteOrder};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use blake2_rfc::blake2s::Blake2s;
use blake2s_simd::{Params as Blake2sParams, State as Blake2sState};
#[derive(Debug)]
enum NamedObject {
Constraint(usize),
Var(Variable),
Namespace
Namespace,
}
/// Constraint system for testing purposes.
@@ -33,10 +27,10 @@ pub struct TestConstraintSystem<E: Engine> {
LinearCombination<E>,
LinearCombination<E>,
LinearCombination<E>,
String
String,
)>,
inputs: Vec<(E::Fr, String)>,
aux: Vec<(E::Fr, String)>
aux: Vec<(E::Fr, String)>,
}
#[derive(Clone, Copy)]
@@ -48,7 +42,7 @@ impl PartialEq for OrderedVariable {
match (self.0.get_unchecked(), other.0.get_unchecked()) {
(Index::Input(ref a), Index::Input(ref b)) => a == b,
(Index::Aux(ref a), Index::Aux(ref b)) => a == b,
_ => false
_ => false,
}
}
}
@@ -63,20 +57,17 @@ impl Ord for OrderedVariable {
(Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),
(Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),
(Index::Input(_), Index::Aux(_)) => Ordering::Less,
(Index::Aux(_), Index::Input(_)) => Ordering::Greater
(Index::Aux(_), Index::Input(_)) => Ordering::Greater,
}
}
}
fn proc_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
) -> BTreeMap<OrderedVariable, E::Fr>
{
fn proc_lc<E: Engine>(terms: &[(Variable, E::Fr)]) -> BTreeMap<OrderedVariable, E::Fr> {
let mut map = BTreeMap::new();
for &(var, coeff) in terms {
map.entry(OrderedVariable(var))
.or_insert(E::Fr::zero())
.add_assign(&coeff);
.or_insert(E::Fr::zero())
.add_assign(&coeff);
}
// Remove terms that have a zero coefficient to normalize
@@ -94,11 +85,7 @@ fn proc_lc<E: Engine>(
map
}
fn hash_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
h: &mut Blake2s
)
{
fn hash_lc<E: Engine>(terms: &[(Variable, E::Fr)], h: &mut Blake2sState) {
let map = proc_lc::<E>(terms);
let mut buf = [0u8; 9 + 32];
@@ -110,13 +97,13 @@ fn hash_lc<E: Engine>(
Index::Input(i) => {
buf[0] = b'I';
BigEndian::write_u64(&mut buf[1..9], i as u64);
},
}
Index::Aux(i) => {
buf[0] = b'A';
BigEndian::write_u64(&mut buf[1..9], i as u64);
}
}
coeff.into_repr().write_be(&mut buf[9..]).unwrap();
h.update(&buf);
@@ -126,15 +113,14 @@ fn hash_lc<E: Engine>(
fn eval_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
inputs: &[(E::Fr, String)],
aux: &[(E::Fr, String)]
) -> E::Fr
{
aux: &[(E::Fr, String)],
) -> E::Fr {
let mut acc = E::Fr::zero();
for &(var, ref coeff) in terms {
let mut tmp = match var.get_unchecked() {
Index::Input(index) => inputs[index].0,
Index::Aux(index) => aux[index].0
Index::Aux(index) => aux[index].0,
};
tmp.mul_assign(&coeff);
@@ -147,14 +133,17 @@ fn eval_lc<E: Engine>(
impl<E: Engine> TestConstraintSystem<E> {
pub fn new() -> TestConstraintSystem<E> {
let mut map = HashMap::new();
map.insert("ONE".into(), NamedObject::Var(TestConstraintSystem::<E>::one()));
map.insert(
"ONE".into(),
NamedObject::Var(TestConstraintSystem::<E>::one()),
);
TestConstraintSystem {
named_objects: map,
current_namespace: vec![],
constraints: vec![],
inputs: vec![(E::Fr::one(), "ONE".into())],
aux: vec![]
aux: vec![],
}
}
@@ -167,9 +156,9 @@ impl<E: Engine> TestConstraintSystem<E> {
tmp
};
let powers_of_two = (0..E::Fr::NUM_BITS).map(|i| {
E::Fr::from_str("2").unwrap().pow(&[i as u64])
}).collect::<Vec<_>>();
let powers_of_two = (0..E::Fr::NUM_BITS)
.map(|i| E::Fr::from_str("2").unwrap().pow(&[i as u64]))
.collect::<Vec<_>>();
let pp = |s: &mut String, lc: &LinearCombination<E>| {
write!(s, "(").unwrap();
@@ -196,7 +185,7 @@ impl<E: Engine> TestConstraintSystem<E> {
match var.0.get_unchecked() {
Index::Input(i) => {
write!(s, "`{}`", &self.inputs[i].1).unwrap();
},
}
Index::Aux(i) => {
write!(s, "`{}`", &self.aux[i].1).unwrap();
}
@@ -226,7 +215,7 @@ impl<E: Engine> TestConstraintSystem<E> {
}
pub fn hash(&self) -> String {
let mut h = Blake2s::new(32);
let mut h = Blake2sParams::new().hash_length(32).to_state();
{
let mut buf = [0u8; 24];
@@ -259,45 +248,41 @@ impl<E: Engine> TestConstraintSystem<E> {
a.mul_assign(&b);
if a != c {
return Some(&*path)
return Some(&*path);
}
}
None
}
pub fn is_satisfied(&self) -> bool
{
pub fn is_satisfied(&self) -> bool {
self.which_is_unsatisfied().is_none()
}
pub fn num_constraints(&self) -> usize
{
pub fn num_constraints(&self) -> usize {
self.constraints.len()
}
pub fn set(&mut self, path: &str, to: E::Fr)
{
pub fn set(&mut self, path: &str, to: E::Fr) {
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => {
match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0 = to,
Index::Aux(index) => self.aux[index].0 = to
}
}
Some(e) => panic!("tried to set path `{}` to value, but `{:?}` already exists there.", path, e),
_ => panic!("no variable exists at path: {}", path)
Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0 = to,
Index::Aux(index) => self.aux[index].0 = to,
},
Some(e) => panic!(
"tried to set path `{}` to value, but `{:?}` already exists there.",
path, e
),
_ => panic!("no variable exists at path: {}", path),
}
}
pub fn verify(&self, expected: &[E::Fr]) -> bool
{
pub fn verify(&self, expected: &[E::Fr]) -> bool {
assert_eq!(expected.len() + 1, self.inputs.len());
for (a, b) in self.inputs.iter().skip(1).zip(expected.iter())
{
for (a, b) in self.inputs.iter().skip(1).zip(expected.iter()) {
if &a.0 != b {
return false
return false;
}
}
@@ -308,8 +293,7 @@ impl<E: Engine> TestConstraintSystem<E> {
self.inputs.len()
}
pub fn get_input(&mut self, index: usize, path: &str) -> E::Fr
{
pub fn get_input(&mut self, index: usize, path: &str) -> E::Fr {
let (assignment, name) = self.inputs[index].clone();
assert_eq!(path, name);
@@ -317,17 +301,17 @@ impl<E: Engine> TestConstraintSystem<E> {
assignment
}
pub fn get(&mut self, path: &str) -> E::Fr
{
pub fn get(&mut self, path: &str) -> E::Fr {
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => {
match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0,
Index::Aux(index) => self.aux[index].0
}
}
Some(e) => panic!("tried to get value of path `{}`, but `{:?}` exists there (not a variable)", path, e),
_ => panic!("no variable exists at path: {}", path)
Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0,
Index::Aux(index) => self.aux[index].0,
},
Some(e) => panic!(
"tried to get value of path `{}`, but `{:?}` exists there (not a variable)",
path, e
),
_ => panic!("no variable exists at path: {}", path),
}
}
@@ -348,8 +332,7 @@ fn compute_path(ns: &[String], this: String) -> String {
let mut name = String::new();
let mut needs_separation = false;
for ns in ns.iter().chain(Some(&this).into_iter())
{
for ns in ns.iter().chain(Some(&this).into_iter()) {
if needs_separation {
name += "/";
}
@@ -364,12 +347,11 @@ fn compute_path(ns: &[String], this: String) -> String {
impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let index = self.aux.len();
let path = compute_path(&self.current_namespace, annotation().into());
@@ -380,12 +362,11 @@ impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
Ok(var)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let index = self.inputs.len();
let path = compute_path(&self.current_namespace, annotation().into());
@@ -396,17 +377,13 @@ impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
Ok(var)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
let path = compute_path(&self.current_namespace, annotation().into());
let index = self.constraints.len();
@@ -420,7 +397,9 @@ impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let name = name_fn().into();
let path = compute_path(&self.current_namespace, name.clone());
@@ -428,13 +407,11 @@ impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
self.current_namespace.push(name);
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
assert!(self.current_namespace.pop().is_some());
}
fn get_root(&mut self) -> &mut Self::Root
{
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
@@ -447,28 +424,26 @@ fn test_cs() {
let mut cs = TestConstraintSystem::<Bls12>::new();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
let a = cs.namespace(|| "a").alloc(|| "var", || Ok(Fr::from_str("10").unwrap())).unwrap();
let b = cs.namespace(|| "b").alloc(|| "var", || Ok(Fr::from_str("4").unwrap())).unwrap();
let c = cs.alloc(|| "product", || Ok(Fr::from_str("40").unwrap())).unwrap();
let a = cs
.namespace(|| "a")
.alloc(|| "var", || Ok(Fr::from_str("10").unwrap()))
.unwrap();
let b = cs
.namespace(|| "b")
.alloc(|| "var", || Ok(Fr::from_str("4").unwrap()))
.unwrap();
let c = cs
.alloc(|| "product", || Ok(Fr::from_str("40").unwrap()))
.unwrap();
cs.enforce(
|| "mult",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
cs.enforce(|| "mult", |lc| lc + a, |lc| lc + b, |lc| lc + c);
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1);
cs.set("a/var", Fr::from_str("4").unwrap());
let one = TestConstraintSystem::<Bls12>::one();
cs.enforce(
|| "eq",
|lc| lc + a,
|lc| lc + one,
|lc| lc + b
);
cs.enforce(|| "eq", |lc| lc + a, |lc| lc + one, |lc| lc + b);
assert!(!cs.is_satisfied());
assert!(cs.which_is_unsatisfied() == Some("mult"));

View File

@@ -1,16 +1,9 @@
use ff::{Field, PrimeField};
use pairing::Engine;
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination
};
use crate::{ConstraintSystem, LinearCombination, SynthesisError};
use super::boolean::{
Boolean,
AllocatedBit
};
use super::boolean::{AllocatedBit, Boolean};
use super::multieq::MultiEq;
@@ -20,13 +13,12 @@ use super::multieq::MultiEq;
pub struct UInt32 {
// Least significant bit first
bits: Vec<Boolean>,
value: Option<u32>
value: Option<u32>,
}
impl UInt32 {
/// Construct a constant `UInt32` from a `u32`
pub fn constant(value: u32) -> Self
{
pub fn constant(value: u32) -> Self {
let mut bits = Vec::with_capacity(32);
let mut tmp = value;
@@ -42,17 +34,15 @@ impl UInt32 {
UInt32 {
bits: bits,
value: Some(value)
value: Some(value),
}
}
/// Allocate a `UInt32` in the constraint system
pub fn alloc<E, CS>(
mut cs: CS,
value: Option<u32>
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
pub fn alloc<E, CS>(mut cs: CS, value: Option<u32>) -> Result<Self, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
let values = match value {
Some(mut val) => {
@@ -64,23 +54,24 @@ impl UInt32 {
}
v
},
None => vec![None; 32]
}
None => vec![None; 32],
};
let bits = values.into_iter()
.enumerate()
.map(|(i, v)| {
Ok(Boolean::from(AllocatedBit::alloc(
cs.namespace(|| format!("allocated bit {}", i)),
v
)?))
})
.collect::<Result<Vec<_>, SynthesisError>>()?;
let bits = values
.into_iter()
.enumerate()
.map(|(i, v)| {
Ok(Boolean::from(AllocatedBit::alloc(
cs.namespace(|| format!("allocated bit {}", i)),
v,
)?))
})
.collect::<Result<Vec<_>, SynthesisError>>()?;
Ok(UInt32 {
bits: bits,
value: value
value: value,
})
}
@@ -96,19 +87,22 @@ impl UInt32 {
value.as_mut().map(|v| *v <<= 1);
match b.get_value() {
Some(true) => { value.as_mut().map(|v| *v |= 1); },
Some(false) => {},
None => { value = None; }
Some(true) => {
value.as_mut().map(|v| *v |= 1);
}
Some(false) => {}
None => {
value = None;
}
}
}
UInt32 {
value: value,
bits: bits.iter().rev().cloned().collect()
bits: bits.iter().rev().cloned().collect(),
}
}
/// Turns this `UInt32` into its little-endian byte order representation.
pub fn into_bits(&self) -> Vec<Boolean> {
self.bits.clone()
@@ -116,8 +110,7 @@ impl UInt32 {
/// Converts a little-endian byte order representation of bits into a
/// `UInt32`.
pub fn from_bits(bits: &[Boolean]) -> Self
{
pub fn from_bits(bits: &[Boolean]) -> Self {
assert_eq!(bits.len(), 32);
let new_bits = bits.to_vec();
@@ -131,43 +124,45 @@ impl UInt32 {
if b {
value.as_mut().map(|v| *v |= 1);
}
},
&Boolean::Is(ref b) => {
match b.get_value() {
Some(true) => { value.as_mut().map(|v| *v |= 1); },
Some(false) => {},
None => { value = None }
}
},
&Boolean::Not(ref b) => {
match b.get_value() {
Some(false) => { value.as_mut().map(|v| *v |= 1); },
Some(true) => {},
None => { value = None }
}
}
&Boolean::Is(ref b) => match b.get_value() {
Some(true) => {
value.as_mut().map(|v| *v |= 1);
}
Some(false) => {}
None => value = None,
},
&Boolean::Not(ref b) => match b.get_value() {
Some(false) => {
value.as_mut().map(|v| *v |= 1);
}
Some(true) => {}
None => value = None,
},
}
}
UInt32 {
value: value,
bits: new_bits
bits: new_bits,
}
}
pub fn rotr(&self, by: usize) -> Self {
let by = by % 32;
let new_bits = self.bits.iter()
.skip(by)
.chain(self.bits.iter())
.take(32)
.cloned()
.collect();
let new_bits = self
.bits
.iter()
.skip(by)
.chain(self.bits.iter())
.take(32)
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v.rotate_right(by as u32))
value: self.value.map(|v| v.rotate_right(by as u32)),
}
}
@@ -176,17 +171,18 @@ impl UInt32 {
let fill = Boolean::constant(false);
let new_bits = self.bits
.iter() // The bits are least significant first
.skip(by) // Skip the bits that will be lost during the shift
.chain(Some(&fill).into_iter().cycle()) // Rest will be zeros
.take(32) // Only 32 bits needed!
.cloned()
.collect();
let new_bits = self
.bits
.iter() // The bits are least significant first
.skip(by) // Skip the bits that will be lost during the shift
.chain(Some(&fill).into_iter().cycle()) // Rest will be zeros
.take(32) // Only 32 bits needed!
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v >> by as u32)
value: self.value.map(|v| v >> by as u32),
}
}
@@ -196,121 +192,99 @@ impl UInt32 {
b: &Self,
c: &Self,
tri_fn: F,
circuit_fn: U
circuit_fn: U,
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
F: Fn(u32, u32, u32) -> u32,
U: Fn(&mut CS, usize, &Boolean, &Boolean, &Boolean) -> Result<Boolean, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
F: Fn(u32, u32, u32) -> u32,
U: Fn(&mut CS, usize, &Boolean, &Boolean, &Boolean) -> Result<Boolean, SynthesisError>,
{
let new_value = match (a.value, b.value, c.value) {
(Some(a), Some(b), Some(c)) => {
Some(tri_fn(a, b, c))
},
_ => None
(Some(a), Some(b), Some(c)) => Some(tri_fn(a, b, c)),
_ => None,
};
let bits = a.bits.iter()
.zip(b.bits.iter())
.zip(c.bits.iter())
.enumerate()
.map(|(i, ((a, b), c))| circuit_fn(&mut cs, i, a, b, c))
.collect::<Result<_, _>>()?;
let bits = a
.bits
.iter()
.zip(b.bits.iter())
.zip(c.bits.iter())
.enumerate()
.map(|(i, ((a, b), c))| circuit_fn(&mut cs, i, a, b, c))
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits: bits,
value: new_value
value: new_value,
})
}
/// Compute the `maj` value (a and b) xor (a and c) xor (b and c)
/// during SHA256.
pub fn sha256_maj<E, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
pub fn sha256_maj<E, CS>(cs: CS, a: &Self, b: &Self, c: &Self) -> Result<Self, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
Self::triop(cs, a, b, c, |a, b, c| (a & b) ^ (a & c) ^ (b & c),
|cs, i, a, b, c| {
Boolean::sha256_maj(
cs.namespace(|| format!("maj {}", i)),
a,
b,
c
)
}
Self::triop(
cs,
a,
b,
c,
|a, b, c| (a & b) ^ (a & c) ^ (b & c),
|cs, i, a, b, c| Boolean::sha256_maj(cs.namespace(|| format!("maj {}", i)), a, b, c),
)
}
/// Compute the `ch` value `(a and b) xor ((not a) and c)`
/// during SHA256.
pub fn sha256_ch<E, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
pub fn sha256_ch<E, CS>(cs: CS, a: &Self, b: &Self, c: &Self) -> Result<Self, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
Self::triop(cs, a, b, c, |a, b, c| (a & b) ^ ((!a) & c),
|cs, i, a, b, c| {
Boolean::sha256_ch(
cs.namespace(|| format!("ch {}", i)),
a,
b,
c
)
}
Self::triop(
cs,
a,
b,
c,
|a, b, c| (a & b) ^ ((!a) & c),
|cs, i, a, b, c| Boolean::sha256_ch(cs.namespace(|| format!("ch {}", i)), a, b, c),
)
}
/// XOR this `UInt32` with another `UInt32`
pub fn xor<E, CS>(
&self,
mut cs: CS,
other: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
pub fn xor<E, CS>(&self, mut cs: CS, other: &Self) -> Result<Self, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
{
let new_value = match (self.value, other.value) {
(Some(a), Some(b)) => {
Some(a ^ b)
},
_ => None
(Some(a), Some(b)) => Some(a ^ b),
_ => None,
};
let bits = self.bits.iter()
.zip(other.bits.iter())
.enumerate()
.map(|(i, (a, b))| {
Boolean::xor(
cs.namespace(|| format!("xor of bit {}", i)),
a,
b
)
})
.collect::<Result<_, _>>()?;
let bits = self
.bits
.iter()
.zip(other.bits.iter())
.enumerate()
.map(|(i, (a, b))| Boolean::xor(cs.namespace(|| format!("xor of bit {}", i)), a, b))
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits: bits,
value: new_value
value: new_value,
})
}
/// Perform modular addition of several `UInt32` objects.
pub fn addmany<E, CS, M>(
mut cs: M,
operands: &[Self]
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root=MultiEq<E, CS>>
pub fn addmany<E, CS, M>(mut cs: M, operands: &[Self]) -> Result<Self, SynthesisError>
where
E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root = MultiEq<E, CS>>,
{
// Make some arbitrary bounds for ourselves to avoid overflows
// in the scalar field
@@ -337,7 +311,7 @@ impl UInt32 {
match op.value {
Some(val) => {
result_value.as_mut().map(|v| *v += val as u64);
},
}
None => {
// If any of our operands have unknown value, we won't
// know the value of the result
@@ -381,7 +355,7 @@ impl UInt32 {
// Allocate the bit
let b = AllocatedBit::alloc(
cs.namespace(|| format!("result bit {}", i)),
result_value.map(|v| (v >> i) & 1 == 1)
result_value.map(|v| (v >> i) & 1 == 1),
)?;
// Add this bit to the result combination
@@ -402,28 +376,34 @@ impl UInt32 {
Ok(UInt32 {
bits: result_bits,
value: modular_value
value: modular_value,
})
}
}
#[cfg(test)]
mod test {
use rand::{XorShiftRng, SeedableRng, Rng};
use ::circuit::boolean::{Boolean};
use super::{UInt32};
use super::UInt32;
use crate::gadgets::boolean::Boolean;
use crate::gadgets::multieq::MultiEq;
use crate::gadgets::test::*;
use crate::ConstraintSystem;
use ff::Field;
use pairing::bls12_381::{Bls12};
use ::circuit::test::*;
use bellman::{ConstraintSystem};
use circuit::multieq::MultiEq;
use pairing::bls12_381::Bls12;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_uint32_from_bits_be() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut v = (0..32).map(|_| Boolean::constant(rng.gen())).collect::<Vec<_>>();
let mut v = (0..32)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect::<Vec<_>>();
let b = UInt32::from_bits_be(&v);
@@ -431,19 +411,18 @@ mod test {
match bit {
&Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
},
_ => unreachable!()
}
_ => unreachable!(),
}
}
let expected_to_be_same = b.into_bits_be();
for x in v.iter().zip(expected_to_be_same.iter())
{
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {},
(&Boolean::Constant(false), &Boolean::Constant(false)) => {},
_ => unreachable!()
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
@@ -451,10 +430,15 @@ mod test {
#[test]
fn test_uint32_from_bits() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut v = (0..32).map(|_| Boolean::constant(rng.gen())).collect::<Vec<_>>();
let mut v = (0..32)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect::<Vec<_>>();
let b = UInt32::from_bits(&v);
@@ -462,19 +446,18 @@ mod test {
match bit {
&Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
},
_ => unreachable!()
}
_ => unreachable!(),
}
}
let expected_to_be_same = b.into_bits();
for x in v.iter().zip(expected_to_be_same.iter())
{
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {},
(&Boolean::Constant(false), &Boolean::Constant(false)) => {},
_ => unreachable!()
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
@@ -482,14 +465,17 @@ mod test {
#[test]
fn test_uint32_xor() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = a ^ b ^ c;
@@ -508,10 +494,10 @@ mod test {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
@@ -524,14 +510,17 @@ mod test {
#[test]
fn test_uint32_addmany_constants() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let a_bit = UInt32::constant(a);
let b_bit = UInt32::constant(b);
@@ -541,7 +530,8 @@ mod test {
let r = {
let mut cs = MultiEq::new(&mut cs);
let r = UInt32::addmany(cs.namespace(|| "addition"), &[a_bit, b_bit, c_bit]).unwrap();
let r =
UInt32::addmany(cs.namespace(|| "addition"), &[a_bit, b_bit, c_bit]).unwrap();
r
};
@@ -563,15 +553,18 @@ mod test {
#[test]
fn test_uint32_addmany() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let d: u32 = rng.gen();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let d = rng.next_u32();
let mut expected = (a ^ b).wrapping_add(c).wrapping_add(d);
@@ -595,13 +588,11 @@ mod test {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Constant(_) => {
unreachable!()
}
&Boolean::Constant(_) => unreachable!(),
}
expected >>= 1;
@@ -620,9 +611,12 @@ mod test {
#[test]
fn test_uint32_rotr() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let mut num = rng.gen();
let mut num = rng.next_u32();
let a = UInt32::constant(num);
@@ -637,8 +631,8 @@ mod test {
match b {
&Boolean::Constant(b) => {
assert_eq!(b, tmp & 1 == 1);
},
_ => unreachable!()
}
_ => unreachable!(),
}
tmp >>= 1;
@@ -650,11 +644,14 @@ mod test {
#[test]
fn test_uint32_shr() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..50 {
for i in 0..60 {
let num = rng.gen();
let num = rng.next_u32();
let a = UInt32::constant(num).shr(i);
let b = UInt32::constant(num.wrapping_shr(i as u32));
@@ -670,14 +667,17 @@ mod test {
#[test]
fn test_uint32_sha256_maj() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = (a & b) ^ (a & c) ^ (b & c);
@@ -695,10 +695,10 @@ mod test {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
@@ -711,14 +711,17 @@ mod test {
#[test]
fn test_uint32_sha256_ch() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = (a & b) ^ ((!a) & c);
@@ -736,10 +739,10 @@ mod test {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
}
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}

View File

@@ -1,4 +1,4 @@
use rand::Rng;
use rand_core::RngCore;
use std::sync::Arc;
@@ -6,55 +6,34 @@ use ff::{Field, PrimeField};
use group::{CurveAffine, CurveProjective, Wnaf};
use pairing::Engine;
use super::{
Parameters,
VerifyingKey
};
use super::{Parameters, VerifyingKey};
use ::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use {Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use ::domain::{
EvaluationDomain,
Scalar
};
use domain::{EvaluationDomain, Scalar};
use ::multicore::{
Worker
};
use multicore::Worker;
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
rng: &mut R
rng: &mut R,
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
where
E: Engine,
C: Circuit<E>,
R: RngCore,
{
let g1 = rng.gen();
let g2 = rng.gen();
let alpha = rng.gen();
let beta = rng.gen();
let gamma = rng.gen();
let delta = rng.gen();
let tau = rng.gen();
let g1 = E::G1::random(rng);
let g2 = E::G2::random(rng);
let alpha = E::Fr::random(rng);
let beta = E::Fr::random(rng);
let gamma = E::Fr::random(rng);
let delta = E::Fr::random(rng);
let tau = E::Fr::random(rng);
generate_parameters::<E, C>(
circuit,
g1,
g2,
alpha,
beta,
gamma,
delta,
tau
)
generate_parameters::<E, C>(circuit, g1, g2, alpha, beta, gamma, delta, tau)
}
/// This is our assembly structure that we'll use to synthesize the
@@ -68,18 +47,17 @@ struct KeypairAssembly<E: Engine> {
ct_inputs: Vec<Vec<(E::Fr, usize)>>,
at_aux: Vec<Vec<(E::Fr, usize)>>,
bt_aux: Vec<Vec<(E::Fr, usize)>>,
ct_aux: Vec<Vec<(E::Fr, usize)>>
ct_aux: Vec<Vec<(E::Fr, usize)>>,
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
@@ -94,12 +72,11 @@ impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
@@ -114,48 +91,59 @@ impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
this_constraint: usize,
) {
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)),
}
}
}
eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints);
eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
eval(
a(LinearCombination::zero()),
&mut self.at_inputs,
&mut self.at_aux,
self.num_constraints,
);
eval(
b(LinearCombination::zero()),
&mut self.bt_inputs,
&mut self.bt_aux,
self.num_constraints,
);
eval(
c(LinearCombination::zero()),
&mut self.ct_inputs,
&mut self.ct_aux,
self.num_constraints,
);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
@@ -173,9 +161,11 @@ pub fn generate_parameters<E, C>(
beta: E::Fr,
gamma: E::Fr,
delta: E::Fr,
tau: E::Fr
tau: E::Fr,
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
where
E: Engine,
C: Circuit<E>,
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
@@ -186,7 +176,7 @@ pub fn generate_parameters<E, C>(
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![]
ct_aux: vec![],
};
// Allocate the "one" input variable
@@ -198,11 +188,7 @@ pub fn generate_parameters<E, C>(
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc,
|lc| lc,
);
assembly.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
// Create bases for blind evaluation of polynomials at tau
@@ -240,10 +226,9 @@ pub fn generate_parameters<E, C>(
{
let powers_of_tau = powers_of_tau.as_mut();
worker.scope(powers_of_tau.len(), |scope, chunk| {
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate()
{
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
let mut current_tau_power = tau.pow(&[(i*chunk) as u64]);
let mut current_tau_power = tau.pow(&[(i * chunk) as u64]);
for p in powers_of_tau {
p.0 = current_tau_power;
@@ -260,14 +245,15 @@ pub fn generate_parameters<E, C>(
// Compute the H query with multiple threads
worker.scope(h.len(), |scope, chunk| {
for (h, p) in h.chunks_mut(chunk).zip(powers_of_tau.as_ref().chunks(chunk))
for (h, p) in h
.chunks_mut(chunk)
.zip(powers_of_tau.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move || {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (h, p) in h.iter_mut().zip(p.iter())
{
for (h, p) in h.iter_mut().zip(p.iter()) {
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&coeff);
@@ -320,9 +306,8 @@ pub fn generate_parameters<E, C>(
beta: &E::Fr,
// Worker
worker: &Worker
)
{
worker: &Worker,
) {
// Sanity check
assert_eq!(a.len(), at.len());
assert_eq!(a.len(), bt.len());
@@ -333,31 +318,32 @@ pub fn generate_parameters<E, C>(
// Evaluate polynomials in multiple threads
worker.scope(a.len(), |scope, chunk| {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a
.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move || {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())
.zip(ext.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a
.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())
.zip(ext.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
fn eval_at_tau<E: Engine>(
powers_of_tau: &[Scalar<E>],
p: &[(E::Fr, usize)]
) -> E::Fr
{
p: &[(E::Fr, usize)],
) -> E::Fr {
let mut acc = E::Fr::zero();
for &(ref coeff, index) in p {
@@ -422,7 +408,7 @@ pub fn generate_parameters<E, C>(
&gamma_inverse,
&alpha,
&beta,
&worker
&worker,
);
// Evaluate for auxiliary variables.
@@ -440,7 +426,7 @@ pub fn generate_parameters<E, C>(
&delta_inverse,
&alpha,
&beta,
&worker
&worker,
);
// Don't allow any elements be unconstrained, so that
@@ -461,7 +447,7 @@ pub fn generate_parameters<E, C>(
gamma_g2: g2.mul(gamma).into_affine(),
delta_g1: g1.mul(delta).into_affine(),
delta_g2: g2.mul(delta).into_affine(),
ic: ic.into_iter().map(|e| e.into_affine()).collect()
ic: ic.into_iter().map(|e| e.into_affine()).collect(),
};
Ok(Parameters {
@@ -470,8 +456,23 @@ pub fn generate_parameters<E, C>(
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// Filter points at infinity away from A/B queries
a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
a: Arc::new(
a.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
b_g1: Arc::new(
b_g1.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
b_g2: Arc::new(
b_g2.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
})
}

View File

@@ -1,17 +1,12 @@
use group::{CurveAffine, EncodedPoint};
use pairing::{
Engine,
PairingCurveAffine,
};
use pairing::{Engine, PairingCurveAffine};
use ::{
SynthesisError
};
use SynthesisError;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use multiexp::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[cfg(test)]
mod tests;
@@ -28,23 +23,17 @@ pub use self::verifier::*;
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine
pub c: E::G1Affine,
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a &&
self.b == other.b &&
self.c == other.c
self.a == other.a && self.b == other.b && self.c == other.c
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.a.into_compressed().as_ref())?;
writer.write_all(self.b.into_compressed().as_ref())?;
writer.write_all(self.c.into_compressed().as_ref())?;
@@ -52,48 +41,56 @@ impl<E: Engine> Proof<E> {
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Compressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let a = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
})?;
}
})?;
reader.read_exact(g2_repr.as_mut())?;
let b = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
})?;
}
})?;
reader.read_exact(g1_repr.as_mut())?;
let c = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
})?;
}
})?;
Ok(Proof {
a: a,
b: b,
c: c
})
Ok(Proof { a: a, b: b, c: c })
}
}
@@ -122,27 +119,23 @@ pub struct VerifyingKey<E: Engine> {
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>
pub ic: Vec<E::G1Affine>,
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.alpha_g1 == other.alpha_g1 &&
self.beta_g1 == other.beta_g1 &&
self.beta_g2 == other.beta_g2 &&
self.gamma_g2 == other.gamma_g2 &&
self.delta_g1 == other.delta_g1 &&
self.delta_g2 == other.delta_g2 &&
self.ic == other.ic
self.alpha_g1 == other.alpha_g1
&& self.beta_g1 == other.beta_g1
&& self.beta_g2 == other.beta_g2
&& self.gamma_g2 == other.gamma_g2
&& self.delta_g1 == other.delta_g1
&& self.delta_g2 == other.delta_g2
&& self.ic == other.ic
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.into_uncompressed().as_ref())?;
@@ -157,30 +150,39 @@ impl<E: Engine> VerifyingKey<E> {
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let mut g1_repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let alpha_g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let beta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let beta_g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let beta_g2 = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let gamma_g2 = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let delta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let delta_g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let delta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let delta_g2 = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
@@ -189,13 +191,18 @@ impl<E: Engine> VerifyingKey<E> {
for _ in 0..ic_len {
reader.read_exact(g1_repr.as_mut())?;
let g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})?;
ic.push(g1);
}
@@ -207,7 +214,7 @@ impl<E: Engine> VerifyingKey<E> {
gamma_g2: gamma_g2,
delta_g1: delta_g1,
delta_g2: delta_g2,
ic: ic
ic: ic,
})
}
}
@@ -216,7 +223,7 @@ impl<E: Engine> VerifyingKey<E> {
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// m-2 inclusive. Never contains points at infinity.
pub h: Arc<Vec<E::G1Affine>>,
@@ -234,26 +241,22 @@ pub struct Parameters<E: Engine> {
// G1 and G2 for C/B queries, respectively. Never contains points at
// infinity for the same reason as the "A" polynomials.
pub b_g1: Arc<Vec<E::G1Affine>>,
pub b_g2: Arc<Vec<E::G2Affine>>
pub b_g2: Arc<Vec<E::G2Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk &&
self.h == other.h &&
self.l == other.l &&
self.a == other.a &&
self.b_g1 == other.b_g1 &&
self.b_g2 == other.b_g2
self.vk == other.vk
&& self.h == other.h
&& self.l == other.l
&& self.a == other.a
&& self.b_g1 == other.b_g1
&& self.b_g2 == other.b_g2
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
self.vk.write(&mut writer)?;
writer.write_u32::<BigEndian>(self.h.len() as u32)?;
@@ -284,27 +287,26 @@ impl<E: Engine> Parameters<E> {
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
pub fn read<R: Read>(mut reader: R, checked: bool) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
repr.into_affine()
} else {
repr
.into_affine_unchecked()
repr.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
@@ -313,17 +315,20 @@ impl<E: Engine> Parameters<E> {
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
repr.into_affine()
} else {
repr
.into_affine_unchecked()
repr.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
@@ -376,7 +381,7 @@ impl<E: Engine> Parameters<E> {
l: Arc::new(l),
a: Arc::new(a),
b_g1: Arc::new(b_g1),
b_g2: Arc::new(b_g2)
b_g2: Arc::new(b_g2),
})
}
}
@@ -389,39 +394,30 @@ pub struct PreparedVerifyingKey<E: Engine> {
/// -delta in G2
neg_delta_g2: <E::G2Affine as PairingCurveAffine>::Prepared,
/// Copy of IC from `VerifiyingKey`.
ic: Vec<E::G1Affine>
ic: Vec<E::G1Affine>,
}
pub trait ParameterSource<E: Engine> {
type G1Builder: SourceBuilder<E::G1Affine>;
type G2Builder: SourceBuilder<E::G2Affine>;
fn get_vk(
&mut self,
num_ic: usize
) -> Result<VerifyingKey<E>, SynthesisError>;
fn get_h(
&mut self,
num_h: usize
) -> Result<Self::G1Builder, SynthesisError>;
fn get_l(
&mut self,
num_l: usize
) -> Result<Self::G1Builder, SynthesisError>;
fn get_vk(&mut self, num_ic: usize) -> Result<VerifyingKey<E>, SynthesisError>;
fn get_h(&mut self, num_h: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_l(&mut self, num_l: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_a(
&mut self,
num_inputs: usize,
num_aux: usize
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g1(
&mut self,
num_inputs: usize,
num_aux: usize
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g2(
&mut self,
num_inputs: usize,
num_aux: usize
num_aux: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
}
@@ -429,54 +425,39 @@ impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
fn get_vk(
&mut self,
_: usize
) -> Result<VerifyingKey<E>, SynthesisError>
{
fn get_vk(&mut self, _: usize) -> Result<VerifyingKey<E>, SynthesisError> {
Ok(self.vk.clone())
}
fn get_h(
&mut self,
_: usize
) -> Result<Self::G1Builder, SynthesisError>
{
fn get_h(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.h.clone(), 0))
}
fn get_l(
&mut self,
_: usize
) -> Result<Self::G1Builder, SynthesisError>
{
fn get_l(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.l.clone(), 0))
}
fn get_a(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
{
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
}
fn get_b_g1(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
{
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
}
fn get_b_g2(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>
{
_: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> {
Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
}
}
@@ -484,41 +465,38 @@ impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
#[cfg(test)]
mod test_with_bls12_381 {
use super::*;
use {Circuit, SynthesisError, ConstraintSystem};
use {Circuit, ConstraintSystem, SynthesisError};
use ff::Field;
use rand::{Rand, thread_rng};
use pairing::bls12_381::{Bls12, Fr};
use rand::thread_rng;
#[test]
fn serialization() {
struct MySillyCircuit<E: Engine> {
a: Option<E::Fr>,
b: Option<E::Fr>
b: Option<E::Fr>,
}
impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
cs: &mut CS,
) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(|| "c", || {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
let c = cs.alloc_input(
|| "c",
|| {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
})?;
a.mul_assign(&b);
Ok(a)
},
)?;
cs.enforce(
|| "a*b=c",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
cs.enforce(|| "a*b=c", |lc| lc + a, |lc| lc + b, |lc| lc + c);
Ok(())
}
@@ -526,10 +504,9 @@ mod test_with_bls12_381 {
let rng = &mut thread_rng();
let params = generate_random_parameters::<Bls12, _, _>(
MySillyCircuit { a: None, b: None },
rng
).unwrap();
let params =
generate_random_parameters::<Bls12, _, _>(MySillyCircuit { a: None, b: None }, rng)
.unwrap();
{
let mut v = vec![];
@@ -547,19 +524,20 @@ mod test_with_bls12_381 {
let pvk = prepare_verifying_key::<Bls12>(&params.vk);
for _ in 0..100 {
let a = Fr::rand(rng);
let b = Fr::rand(rng);
let a = Fr::random(rng);
let b = Fr::random(rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_random_proof(
MySillyCircuit {
a: Some(a),
b: Some(b)
b: Some(b),
},
&params,
rng
).unwrap();
rng,
)
.unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();

View File

@@ -1,4 +1,4 @@
use rand::Rng;
use rand_core::RngCore;
use std::sync::Arc;
@@ -8,43 +8,23 @@ use ff::{Field, PrimeField};
use group::{CurveAffine, CurveProjective};
use pairing::Engine;
use super::{
ParameterSource,
Proof
};
use super::{ParameterSource, Proof};
use ::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use {Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use ::domain::{
EvaluationDomain,
Scalar
};
use domain::{EvaluationDomain, Scalar};
use ::multiexp::{
DensityTracker,
FullDensity,
multiexp
};
use multiexp::{multiexp, DensityTracker, FullDensity};
use ::multicore::{
Worker
};
use multicore::Worker;
fn eval<E: Engine>(
lc: &LinearCombination<E>,
mut input_density: Option<&mut DensityTracker>,
mut aux_density: Option<&mut DensityTracker>,
input_assignment: &[E::Fr],
aux_assignment: &[E::Fr]
) -> E::Fr
{
aux_assignment: &[E::Fr],
) -> E::Fr {
let mut acc = E::Fr::zero();
for &(index, coeff) in lc.0.iter() {
@@ -56,7 +36,7 @@ fn eval<E: Engine>(
if let Some(ref mut v) = input_density {
v.inc(i);
}
},
}
Variable(Index::Aux(i)) => {
tmp = aux_assignment[i];
if let Some(ref mut v) = aux_density {
@@ -66,10 +46,10 @@ fn eval<E: Engine>(
}
if coeff == E::Fr::one() {
acc.add_assign(&tmp);
acc.add_assign(&tmp);
} else {
tmp.mul_assign(&coeff);
acc.add_assign(&tmp);
tmp.mul_assign(&coeff);
acc.add_assign(&tmp);
}
}
@@ -89,18 +69,17 @@ struct ProvingAssignment<E: Engine> {
// Assignments of variables
input_assignment: Vec<E::Fr>,
aux_assignment: Vec<E::Fr>
aux_assignment: Vec<E::Fr>,
}
impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.aux_assignment.push(f()?);
self.a_aux_density.add_element();
@@ -109,12 +88,11 @@ impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
Ok(Variable(Index::Aux(self.aux_assignment.len() - 1)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.input_assignment.push(f()?);
self.b_input_density.add_element();
@@ -122,17 +100,13 @@ impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
Ok(Variable(Index::Input(self.input_assignment.len() - 1)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
@@ -146,14 +120,14 @@ impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
None,
Some(&mut self.a_aux_density),
&self.input_assignment,
&self.aux_assignment
&self.aux_assignment,
)));
self.b.push(Scalar(eval(
&b,
Some(&mut self.b_input_density),
Some(&mut self.b_aux_density),
&self.input_assignment,
&self.aux_assignment
&self.aux_assignment,
)));
self.c.push(Scalar(eval(
&c,
@@ -164,18 +138,19 @@ impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
None,
None,
&self.input_assignment,
&self.aux_assignment
&self.aux_assignment,
)));
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
@@ -187,12 +162,15 @@ impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
circuit: C,
params: P,
rng: &mut R
rng: &mut R,
) -> Result<Proof<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
where
E: Engine,
C: Circuit<E>,
R: RngCore,
{
let r = rng.gen();
let s = rng.gen();
let r = E::Fr::random(rng);
let s = E::Fr::random(rng);
create_proof::<E, C, P>(circuit, params, r, s)
}
@@ -201,9 +179,11 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
circuit: C,
mut params: P,
r: E::Fr,
s: E::Fr
s: E::Fr,
) -> Result<Proof<E>, SynthesisError>
where E: Engine, C: Circuit<E>
where
E: Engine,
C: Circuit<E>,
{
let mut prover = ProvingAssignment {
a_aux_density: DensityTracker::new(),
@@ -213,7 +193,7 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
b: vec![],
c: vec![],
input_assignment: vec![],
aux_assignment: vec![]
aux_assignment: vec![],
};
prover.alloc_input(|| "", || Ok(E::Fr::one()))?;
@@ -221,11 +201,7 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
circuit.synthesize(&mut prover)?;
for i in 0..prover.input_assignment.len() {
prover.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc,
|lc| lc,
);
prover.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
let worker = Worker::new();
@@ -259,31 +235,76 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
};
// TODO: parallelize if it's even helpful
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let input_assignment = Arc::new(
prover
.input_assignment
.into_iter()
.map(|s| s.into_repr())
.collect::<Vec<_>>(),
);
let aux_assignment = Arc::new(
prover
.aux_assignment
.into_iter()
.map(|s| s.into_repr())
.collect::<Vec<_>>(),
);
let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone());
let l = multiexp(
&worker,
params.get_l(aux_assignment.len())?,
FullDensity,
aux_assignment.clone(),
);
let a_aux_density_total = prover.a_aux_density.get_total_density();
let (a_inputs_source, a_aux_source) = params.get_a(input_assignment.len(), a_aux_density_total)?;
let (a_inputs_source, a_aux_source) =
params.get_a(input_assignment.len(), a_aux_density_total)?;
let a_inputs = multiexp(&worker, a_inputs_source, FullDensity, input_assignment.clone());
let a_aux = multiexp(&worker, a_aux_source, Arc::new(prover.a_aux_density), aux_assignment.clone());
let a_inputs = multiexp(
&worker,
a_inputs_source,
FullDensity,
input_assignment.clone(),
);
let a_aux = multiexp(
&worker,
a_aux_source,
Arc::new(prover.a_aux_density),
aux_assignment.clone(),
);
let b_input_density = Arc::new(prover.b_input_density);
let b_input_density_total = b_input_density.get_total_density();
let b_aux_density = Arc::new(prover.b_aux_density);
let b_aux_density_total = b_aux_density.get_total_density();
let (b_g1_inputs_source, b_g1_aux_source) = params.get_b_g1(b_input_density_total, b_aux_density_total)?;
let (b_g1_inputs_source, b_g1_aux_source) =
params.get_b_g1(b_input_density_total, b_aux_density_total)?;
let b_g1_inputs = multiexp(&worker, b_g1_inputs_source, b_input_density.clone(), input_assignment.clone());
let b_g1_aux = multiexp(&worker, b_g1_aux_source, b_aux_density.clone(), aux_assignment.clone());
let b_g1_inputs = multiexp(
&worker,
b_g1_inputs_source,
b_input_density.clone(),
input_assignment.clone(),
);
let b_g1_aux = multiexp(
&worker,
b_g1_aux_source,
b_aux_density.clone(),
aux_assignment.clone(),
);
let (b_g2_inputs_source, b_g2_aux_source) = params.get_b_g2(b_input_density_total, b_aux_density_total)?;
let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment);
let (b_g2_inputs_source, b_g2_aux_source) =
params.get_b_g2(b_input_density_total, b_aux_density_total)?;
let b_g2_inputs = multiexp(
&worker,
b_g2_inputs_source,
b_input_density,
input_assignment,
);
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);
if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() {
@@ -325,6 +346,6 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
Ok(Proof {
a: g_a.into_affine(),
b: g_b.into_affine(),
c: g_c.into_affine()
c: g_c.into_affine(),
})
}

View File

@@ -1,12 +1,13 @@
use ff::{
Field, LegendreSymbol, PrimeField, PrimeFieldDecodingError,
PrimeFieldRepr, ScalarEngine, SqrtField};
Field, LegendreSymbol, PrimeField, PrimeFieldDecodingError, PrimeFieldRepr, ScalarEngine,
SqrtField,
};
use group::{CurveAffine, CurveProjective, EncodedPoint, GroupDecodingError};
use pairing::{Engine, PairingCurveAffine};
use rand_core::RngCore;
use std::cmp::Ordering;
use std::fmt;
use rand::{Rand, Rng};
use std::num::Wrapping;
const MODULUS_R: Wrapping<u32> = Wrapping(64513);
@@ -20,13 +21,11 @@ impl fmt::Display for Fr {
}
}
impl Rand for Fr {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fr(Wrapping(rng.gen()) % MODULUS_R)
}
}
impl Field for Fr {
fn random<R: RngCore>(rng: &mut R) -> Self {
Fr(Wrapping(rng.next_u32()) % MODULUS_R)
}
fn zero() -> Self {
Fr(Wrapping(0))
}
@@ -82,9 +81,13 @@ impl SqrtField for Fr {
fn legendre(&self) -> LegendreSymbol {
// s = self^((r - 1) // 2)
let s = self.pow([32256]);
if s == <Fr as Field>::zero() { LegendreSymbol::Zero }
else if s == <Fr as Field>::one() { LegendreSymbol::QuadraticResidue }
else { LegendreSymbol::QuadraticNonResidue }
if s == <Fr as Field>::zero() {
LegendreSymbol::Zero
} else if s == <Fr as Field>::one() {
LegendreSymbol::QuadraticResidue
} else {
LegendreSymbol::QuadraticNonResidue
}
}
fn sqrt(&self) -> Option<Self> {
@@ -102,7 +105,7 @@ impl SqrtField for Fr {
let mut m = Fr::S;
while t != <Fr as Field>::one() {
let mut i = 1;
let mut i = 1;
{
let mut t2i = t;
t2i.square();
@@ -145,12 +148,6 @@ impl PartialOrd for FrRepr {
}
}
impl Rand for FrRepr {
fn rand<R: Rng>(rng: &mut R) -> Self {
FrRepr([rng.gen()])
}
}
impl fmt::Display for FrRepr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", (self.0)[0])
@@ -266,15 +263,18 @@ impl Engine for DummyEngine {
type G2Affine = Fr;
type Fq = Fr;
type Fqe = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Fqk = Fr;
fn miller_loop<'a, I>(i: I) -> Self::Fqk
where I: IntoIterator<Item=&'a (
&'a <Self::G1Affine as PairingCurveAffine>::Prepared,
&'a <Self::G2Affine as PairingCurveAffine>::Prepared
)>
where
I: IntoIterator<
Item = &'a (
&'a <Self::G1Affine as PairingCurveAffine>::Prepared,
&'a <Self::G2Affine as PairingCurveAffine>::Prepared,
),
>,
{
let mut acc = <Fr as Field>::zero();
@@ -288,8 +288,7 @@ impl Engine for DummyEngine {
}
/// Perform final exponentiation of the result of a miller loop.
fn final_exponentiation(this: &Self::Fqk) -> Option<Self::Fqk>
{
fn final_exponentiation(this: &Self::Fqk) -> Option<Self::Fqk> {
Some(*this)
}
}
@@ -300,6 +299,10 @@ impl CurveProjective for Fr {
type Scalar = Fr;
type Engine = DummyEngine;
fn random<R: RngCore>(rng: &mut R) -> Self {
<Fr as Field>::random(rng)
}
fn zero() -> Self {
<Fr as Field>::zero()
}
@@ -312,9 +315,7 @@ impl CurveProjective for Fr {
<Fr as Field>::is_zero(self)
}
fn batch_normalization(_: &mut [Self]) {
}
fn batch_normalization(_: &mut [Self]) {}
fn is_normalized(&self) -> bool {
true
@@ -336,8 +337,7 @@ impl CurveProjective for Fr {
<Fr as Field>::negate(self);
}
fn mul_assign<S: Into<<Self::Scalar as PrimeField>::Repr>>(&mut self, other: S)
{
fn mul_assign<S: Into<<Self::Scalar as PrimeField>::Repr>>(&mut self, other: S) {
let tmp = Fr::from_repr(other.into()).unwrap();
<Fr as Field>::mul_assign(self, &tmp);
@@ -419,8 +419,7 @@ impl CurveAffine for Fr {
<Fr as Field>::negate(self);
}
fn mul<S: Into<<Self::Scalar as PrimeField>::Repr>>(&self, other: S) -> Self::Projective
{
fn mul<S: Into<<Self::Scalar as PrimeField>::Repr>>(&self, other: S) -> Self::Projective {
let mut res = *self;
let tmp = Fr::from_repr(other.into()).unwrap();

View File

@@ -6,86 +6,82 @@ use self::dummy_engine::*;
use std::marker::PhantomData;
use ::{
Circuit,
ConstraintSystem,
SynthesisError
};
use {Circuit, ConstraintSystem, SynthesisError};
use super::{
generate_parameters,
prepare_verifying_key,
create_proof,
verify_proof
};
use super::{create_proof, generate_parameters, prepare_verifying_key, verify_proof};
struct XORDemo<E: Engine> {
a: Option<bool>,
b: Option<bool>,
_marker: PhantomData<E>
_marker: PhantomData<E>,
}
impl<E: Engine> Circuit<E> for XORDemo<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
let a_var = cs.alloc(
|| "a",
|| {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Ok(E::Fr::zero())
Err(SynthesisError::AssignmentMissing)
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
},
)?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc
|lc| lc,
);
let b_var = cs.alloc(|| "b", || {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
let b_var = cs.alloc(
|| "b",
|| {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Ok(E::Fr::zero())
Err(SynthesisError::AssignmentMissing)
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
},
)?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc
|lc| lc,
);
let c_var = cs.alloc_input(|| "c", || {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
let c_var = cs.alloc_input(
|| "c",
|| {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Ok(E::Fr::zero())
Err(SynthesisError::AssignmentMissing)
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
},
)?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var
|lc| lc + a_var + b_var - c_var,
);
Ok(())
@@ -106,19 +102,10 @@ fn test_xordemo() {
let c = XORDemo::<DummyEngine> {
a: None,
b: None,
_marker: PhantomData
_marker: PhantomData,
};
generate_parameters(
c,
g1,
g2,
alpha,
beta,
gamma,
delta,
tau
).unwrap()
generate_parameters(c, g1, g2, alpha, beta, gamma, delta, tau).unwrap()
};
// This will synthesize the constraint system:
@@ -226,32 +213,35 @@ fn test_xordemo() {
59158
*/
let u_i = [59158, 48317, 21767, 10402].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
let v_i = [0, 0, 60619, 30791].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
let w_i = [0, 23320, 41193, 41193].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
let u_i = [59158, 48317, 21767, 10402]
.iter()
.map(|e| Fr::from_str(&format!("{}", e)).unwrap())
.collect::<Vec<Fr>>();
let v_i = [0, 0, 60619, 30791]
.iter()
.map(|e| Fr::from_str(&format!("{}", e)).unwrap())
.collect::<Vec<Fr>>();
let w_i = [0, 23320, 41193, 41193]
.iter()
.map(|e| Fr::from_str(&format!("{}", e)).unwrap())
.collect::<Vec<Fr>>();
for (u, a) in u_i.iter()
.zip(&params.a[..])
{
for (u, a) in u_i.iter().zip(&params.a[..]) {
assert_eq!(u, a);
}
for (v, b) in v_i.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g1[..])
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g1[..])
{
assert_eq!(v, b);
}
for (v, b) in v_i.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g2[..])
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g2[..])
{
assert_eq!(v, b);
}
@@ -296,15 +286,10 @@ fn test_xordemo() {
let c = XORDemo {
a: Some(true),
b: Some(false),
_marker: PhantomData
_marker: PhantomData,
};
create_proof(
c,
&params,
r,
s
).unwrap()
create_proof(c, &params, r, s).unwrap()
};
// A(x) =
@@ -320,7 +305,7 @@ fn test_xordemo() {
expected_a.add_assign(&u_i[0]); // a_0 = 1
expected_a.add_assign(&u_i[1]); // a_1 = 1
expected_a.add_assign(&u_i[2]); // a_2 = 1
// a_3 = 0
// a_3 = 0
assert_eq!(proof.a, expected_a);
}
@@ -337,7 +322,7 @@ fn test_xordemo() {
expected_b.add_assign(&v_i[0]); // a_0 = 1
expected_b.add_assign(&v_i[1]); // a_1 = 1
expected_b.add_assign(&v_i[2]); // a_2 = 1
// a_3 = 0
// a_3 = 0
assert_eq!(proof.b, expected_b);
}
@@ -378,7 +363,10 @@ fn test_xordemo() {
expected_c.add_assign(&params.l[0]);
// H query answer
for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() {
for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739]
.iter()
.enumerate()
{
let coeff = Fr::from_str(&format!("{}", coeff)).unwrap();
let mut tmp = params.h[i];
@@ -389,9 +377,5 @@ fn test_xordemo() {
assert_eq!(expected_c, proof.c);
}
assert!(verify_proof(
&pvk,
&proof,
&[Fr::one()]
).unwrap());
assert!(verify_proof(&pvk, &proof, &[Fr::one()]).unwrap());
}

View File

@@ -2,20 +2,11 @@ use ff::PrimeField;
use group::{CurveAffine, CurveProjective};
use pairing::{Engine, PairingCurveAffine};
use super::{
Proof,
VerifyingKey,
PreparedVerifyingKey
};
use super::{PreparedVerifyingKey, Proof, VerifyingKey};
use ::{
SynthesisError
};
use SynthesisError;
pub fn prepare_verifying_key<E: Engine>(
vk: &VerifyingKey<E>
) -> PreparedVerifyingKey<E>
{
pub fn prepare_verifying_key<E: Engine>(vk: &VerifyingKey<E>) -> PreparedVerifyingKey<E> {
let mut gamma = vk.gamma_g2;
gamma.negate();
let mut delta = vk.delta_g2;
@@ -25,16 +16,15 @@ pub fn prepare_verifying_key<E: Engine>(
alpha_g1_beta_g2: E::pairing(vk.alpha_g1, vk.beta_g2),
neg_gamma_g2: gamma.prepare(),
neg_delta_g2: delta.prepare(),
ic: vk.ic.clone()
ic: vk.ic.clone(),
}
}
pub fn verify_proof<'a, E: Engine>(
pvk: &'a PreparedVerifyingKey<E>,
proof: &Proof<E>,
public_inputs: &[E::Fr]
) -> Result<bool, SynthesisError>
{
public_inputs: &[E::Fr],
) -> Result<bool, SynthesisError> {
if (public_inputs.len() + 1) != pvk.ic.len() {
return Err(SynthesisError::MalformedVerifyingKey);
}
@@ -53,11 +43,14 @@ pub fn verify_proof<'a, E: Engine>(
// A * B + inputs * (-gamma) + C * (-delta) = alpha * beta
// which allows us to do a single final exponentiation.
Ok(E::final_exponentiation(
&E::miller_loop([
Ok(E::final_exponentiation(&E::miller_loop(
[
(&proof.a.prepare(), &proof.b.prepare()),
(&acc.into_affine().prepare(), &pvk.neg_gamma_g2),
(&proof.c.prepare(), &pvk.neg_delta_g2)
].into_iter())
).unwrap() == pvk.alpha_g1_beta_g2)
(&proof.c.prepare(), &pvk.neg_delta_g2),
]
.into_iter(),
))
.unwrap()
== pvk.alpha_g1_beta_g2)
}

View File

@@ -2,11 +2,12 @@ extern crate ff;
extern crate group;
#[cfg(feature = "pairing")]
extern crate pairing;
extern crate rand;
extern crate rand_core;
extern crate futures;
extern crate bit_vec;
extern crate blake2s_simd;
extern crate byteorder;
extern crate futures;
#[cfg(feature = "multicore")]
extern crate crossbeam;
@@ -15,19 +16,33 @@ extern crate futures_cpupool;
#[cfg(feature = "multicore")]
extern crate num_cpus;
pub mod multicore;
mod multiexp;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate rand_xorshift;
#[cfg(test)]
extern crate sha2;
pub mod domain;
pub mod gadgets;
#[cfg(feature = "groth16")]
pub mod groth16;
pub mod multicore;
mod multiexp;
use ff::{Field, ScalarEngine};
use std::ops::{Add, Sub};
use std::fmt;
use std::error::Error;
use std::fmt;
use std::io;
use std::marker::PhantomData;
use std::ops::{Add, Sub};
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
@@ -35,10 +50,7 @@ use std::marker::PhantomData;
/// CRS generation and during proving.
pub trait Circuit<E: ScalarEngine> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>;
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError>;
}
/// Represents a variable in our constraint system.
@@ -64,7 +76,7 @@ impl Variable {
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Index {
Input(usize),
Aux(usize)
Aux(usize),
}
/// This represents a linear combination of some variables, with coefficients
@@ -191,7 +203,7 @@ pub enum SynthesisError {
/// During verification, our verifying key was malformed.
MalformedVerifyingKey,
/// During CRS generation, we observed an unconstrained auxiliary variable
UnconstrainedVariable
UnconstrainedVariable,
}
impl From<io::Error> for SynthesisError {
@@ -203,14 +215,16 @@ impl From<io::Error> for SynthesisError {
impl Error for SynthesisError {
fn description(&self) -> &str {
match *self {
SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed",
SynthesisError::AssignmentMissing => {
"an assignment for a variable could not be computed"
}
SynthesisError::DivisionByZero => "division by zero",
SynthesisError::Unsatisfiable => "unsatisfiable constraint system",
SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large",
SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS",
SynthesisError::IoError(_) => "encountered an I/O error",
SynthesisError::MalformedVerifyingKey => "malformed verifying key",
SynthesisError::UnconstrainedVariable => "auxiliary variable was unconstrained"
SynthesisError::UnconstrainedVariable => "auxiliary variable was unconstrained",
}
}
}
@@ -242,40 +256,36 @@ pub trait ConstraintSystem<E: ScalarEngine>: Sized {
/// determine the assignment of the variable. The given `annotation` function is invoked
/// in testing contexts in order to derive a unique name for this variable in the current
/// namespace.
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>;
/// Allocate a public variable in the constraint system. The provided function is used to
/// determine the assignment of the variable.
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>;
/// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts
/// in order to derive a unique name for the constraint in the current namespace.
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>;
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>;
/// Create a new (sub)namespace and enter into it. Not intended
/// for downstream use; use `namespace` instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR;
where
NR: Into<String>,
N: FnOnce() -> NR;
/// Exit out of the existing namespace. Not intended for
/// downstream use; use `namespace` instead.
@@ -286,11 +296,10 @@ pub trait ConstraintSystem<E: ScalarEngine>: Sized {
fn get_root(&mut self) -> &mut Self::Root;
/// Begin a namespace for this constraint system.
fn namespace<'a, NR, N>(
&'a mut self,
name_fn: N
) -> Namespace<'a, E, Self::Root>
where NR: Into<String>, N: FnOnce() -> NR
fn namespace<'a, NR, N>(&'a mut self, name_fn: N) -> Namespace<'a, E, Self::Root>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.get_root().push_namespace(name_fn);
@@ -309,37 +318,31 @@ impl<'cs, E: ScalarEngine, CS: ConstraintSystem<E>> ConstraintSystem<E> for Name
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.0.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.0.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
self.0.enforce(annotation, a, b, c)
}
@@ -349,18 +352,18 @@ impl<'cs, E: ScalarEngine, CS: ConstraintSystem<E>> ConstraintSystem<E> for Name
// never a root constraint system.
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
panic!("only the root's push_namespace should be called");
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
panic!("only the root's pop_namespace should be called");
}
fn get_root(&mut self) -> &mut Self::Root
{
fn get_root(&mut self) -> &mut Self::Root {
self.0.get_root()
}
}
@@ -380,54 +383,48 @@ impl<'cs, E: ScalarEngine, CS: ConstraintSystem<E>> ConstraintSystem<E> for &'cs
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
(**self).alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
(**self).alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
(**self).enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
where
NR: Into<String>,
N: FnOnce() -> NR,
{
(**self).push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
fn pop_namespace(&mut self) {
(**self).pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
fn get_root(&mut self) -> &mut Self::Root {
(**self).get_root()
}
}

View File

@@ -6,15 +6,15 @@
#[cfg(feature = "multicore")]
mod implementation {
use num_cpus;
use futures::{Future, IntoFuture, Poll};
use futures_cpupool::{CpuPool, CpuFuture};
use crossbeam::{self, Scope};
use futures::{Future, IntoFuture, Poll};
use futures_cpupool::{CpuFuture, CpuPool};
use num_cpus;
#[derive(Clone)]
pub struct Worker {
cpus: usize,
pool: CpuPool
pool: CpuPool,
}
impl Worker {
@@ -24,7 +24,7 @@ mod implementation {
pub(crate) fn new_with_cpus(cpus: usize) -> Worker {
Worker {
cpus: cpus,
pool: CpuPool::new(cpus)
pool: CpuPool::new(cpus),
}
}
@@ -36,26 +36,22 @@ mod implementation {
log2_floor(self.cpus)
}
pub fn compute<F, R>(
&self, f: F
) -> WorkerFuture<R::Item, R::Error>
where F: FnOnce() -> R + Send + 'static,
R: IntoFuture + 'static,
R::Future: Send + 'static,
R::Item: Send + 'static,
R::Error: Send + 'static
pub fn compute<F, R>(&self, f: F) -> WorkerFuture<R::Item, R::Error>
where
F: FnOnce() -> R + Send + 'static,
R: IntoFuture + 'static,
R::Future: Send + 'static,
R::Item: Send + 'static,
R::Error: Send + 'static,
{
WorkerFuture {
future: self.pool.spawn_fn(f)
future: self.pool.spawn_fn(f),
}
}
pub fn scope<'a, F, R>(
&self,
elements: usize,
f: F
) -> R
where F: FnOnce(&Scope<'a>, usize) -> R
pub fn scope<'a, F, R>(&self, elements: usize, f: F) -> R
where
F: FnOnce(&Scope<'a>, usize) -> R,
{
let chunk_size = if elements < self.cpus {
1
@@ -63,22 +59,19 @@ mod implementation {
elements / self.cpus
};
crossbeam::scope(|scope| {
f(scope, chunk_size)
})
crossbeam::scope(|scope| f(scope, chunk_size))
}
}
pub struct WorkerFuture<T, E> {
future: CpuFuture<T, E>
future: CpuFuture<T, E>,
}
impl<T: Send + 'static, E: Send + 'static> Future for WorkerFuture<T, E> {
type Item = T;
type Error = E;
fn poll(&mut self) -> Poll<Self::Item, Self::Error>
{
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.future.poll()
}
}
@@ -88,7 +81,7 @@ mod implementation {
let mut pow = 0;
while (1 << (pow+1)) <= num {
while (1 << (pow + 1)) <= num {
pow += 1;
}

View File

@@ -1,11 +1,11 @@
use ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use group::{CurveAffine, CurveProjective};
use std::sync::Arc;
use std::io;
use bit_vec::{self, BitVec};
use std::iter;
use futures::{Future};
use super::multicore::Worker;
use bit_vec::{self, BitVec};
use ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use futures::Future;
use group::{CurveAffine, CurveProjective};
use std::io;
use std::iter;
use std::sync::Arc;
use super::SynthesisError;
@@ -19,7 +19,10 @@ pub trait SourceBuilder<G: CurveAffine>: Send + Sync + 'static + Clone {
/// A source of bases, like an iterator.
pub trait Source<G: CurveAffine> {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError>;
fn add_assign_mixed(
&mut self,
to: &mut <G as CurveAffine>::Projective,
) -> Result<(), SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
@@ -34,13 +37,20 @@ impl<G: CurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
}
impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError> {
fn add_assign_mixed(
&mut self,
to: &mut <G as CurveAffine>::Projective,
) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into());
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"expected more bases from source",
)
.into());
}
if self.0[self.1].is_zero() {
return Err(SynthesisError::UnexpectedIdentity)
return Err(SynthesisError::UnexpectedIdentity);
}
to.add_assign_mixed(&self.0[self.1]);
@@ -52,7 +62,11 @@ impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into());
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"expected more bases from source",
)
.into());
}
self.1 += amt;
@@ -63,7 +77,7 @@ impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item=bool>;
type Iter: Iterator<Item = bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
@@ -92,7 +106,7 @@ impl<'a> QueryDensity for &'a FullDensity {
pub struct DensityTracker {
bv: BitVec,
total_density: usize
total_density: usize,
}
impl<'a> QueryDensity for &'a DensityTracker {
@@ -111,7 +125,7 @@ impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker {
bv: BitVec::new(),
total_density: 0
total_density: 0,
}
}
@@ -138,12 +152,13 @@ fn multiexp_inner<Q, D, G, S>(
exponents: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Box<Future<Item=<G as CurveAffine>::Projective, Error=SynthesisError>>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
handle_trivial: bool,
) -> Box<Future<Item = <G as CurveAffine>::Projective, Error = SynthesisError>>
where
for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>,
{
// Perform this region of the multiexp
let this = {
@@ -212,16 +227,24 @@ fn multiexp_inner<Q, D, G, S>(
// There's another region more significant. Calculate and join it with
// this region recursively.
Box::new(
this.join(multiexp_inner(pool, bases, density_map, exponents, skip, c, false))
.map(move |(this, mut higher)| {
for _ in 0..c {
higher.double();
}
this.join(multiexp_inner(
pool,
bases,
density_map,
exponents,
skip,
c,
false,
))
.map(move |(this, mut higher)| {
for _ in 0..c {
higher.double();
}
higher.add_assign(&this);
higher.add_assign(&this);
higher
})
higher
}),
)
}
}
@@ -232,12 +255,13 @@ pub fn multiexp<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>
) -> Box<Future<Item=<G as CurveAffine>::Projective, Error=SynthesisError>>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
exponents: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
) -> Box<Future<Item = <G as CurveAffine>::Projective, Error = SynthesisError>>
where
for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>,
{
let c = if exponents.len() < 32 {
3u32
@@ -260,9 +284,8 @@ pub fn multiexp<Q, D, G, S>(
fn test_with_bls12() {
fn naive_multiexp<G: CurveAffine>(
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>
) -> G::Projective
{
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
) -> G::Projective {
assert_eq!(bases.len(), exponents.len());
let mut acc = G::Projective::zero();
@@ -274,25 +297,28 @@ fn test_with_bls12() {
acc
}
use rand::{self, Rand};
use pairing::{bls12_381::Bls12, Engine};
use rand;
const SAMPLES: usize = 1 << 14;
let rng = &mut rand::thread_rng();
let v = Arc::new((0..SAMPLES).map(|_| <Bls12 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>());
let g = Arc::new((0..SAMPLES).map(|_| <Bls12 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>());
let v = Arc::new(
(0..SAMPLES)
.map(|_| <Bls12 as ScalarEngine>::Fr::random(rng).into_repr())
.collect::<Vec<_>>(),
);
let g = Arc::new(
(0..SAMPLES)
.map(|_| <Bls12 as Engine>::G1::random(rng).into_affine())
.collect::<Vec<_>>(),
);
let naive = naive_multiexp(g.clone(), v.clone());
let pool = Worker::new();
let fast = multiexp(
&pool,
(g, 0),
FullDensity,
v
).wait().unwrap();
let fast = multiexp(&pool, (g, 0), FullDensity, v).wait().unwrap();
assert_eq!(naive, fast);
}

View File

@@ -4,41 +4,31 @@ extern crate pairing;
extern crate rand;
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
use rand::thread_rng;
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using pairing-friendly curves
use ff::Field;
use ff::{Field, ScalarEngine};
use pairing::Engine;
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use pairing::bls12_381::{
Bls12
};
use pairing::bls12_381::Bls12;
// We'll use these interfaces to construct our circuit.
use bellman::{
Circuit,
ConstraintSystem,
SynthesisError
};
use bellman::{Circuit, ConstraintSystem, SynthesisError};
// We're going to use the Groth16 proving system.
use bellman::groth16::{
Proof,
generate_random_parameters,
prepare_verifying_key,
create_random_proof,
verify_proof,
create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof, Proof,
};
const MIMC_ROUNDS: usize = 322;
/// This is an implementation of MiMC, specifically a
/// variant named `LongsightF322p3` for BLS12-381.
/// See http://eprint.iacr.org/2016/492 for more
/// See http://eprint.iacr.org/2016/492 for more
/// information about this construction.
///
/// ```
@@ -49,12 +39,7 @@ const MIMC_ROUNDS: usize = 322;
/// return xL
/// }
/// ```
fn mimc<E: Engine>(
mut xl: E::Fr,
mut xr: E::Fr,
constants: &[E::Fr]
) -> E::Fr
{
fn mimc<E: Engine>(mut xl: E::Fr, mut xr: E::Fr, constants: &[E::Fr]) -> E::Fr {
assert_eq!(constants.len(), MIMC_ROUNDS);
for i in 0..MIMC_ROUNDS {
@@ -76,31 +61,29 @@ fn mimc<E: Engine>(
struct MiMCDemo<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
constants: &'a [E::Fr]
constants: &'a [E::Fr],
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
let mut xl = cs.alloc(
|| "preimage xl",
|| xl_value.ok_or(SynthesisError::AssignmentMissing),
)?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
let mut xr = cs.alloc(
|| "preimage xr",
|| xr_value.ok_or(SynthesisError::AssignmentMissing),
)?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
@@ -112,15 +95,16 @@ impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
e.square();
e
});
let mut tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
let mut tmp = cs.alloc(
|| "tmp",
|| tmp_value.ok_or(SynthesisError::AssignmentMissing),
)?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
|lc| lc + tmp,
);
// new_xL = xR + (xL + Ci)^3
@@ -133,23 +117,25 @@ impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
e
});
let mut new_xl = if i == (MIMC_ROUNDS-1) {
let mut new_xl = if i == (MIMC_ROUNDS - 1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(|| "image", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
cs.alloc_input(
|| "image",
|| new_xl_value.ok_or(SynthesisError::AssignmentMissing),
)?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
cs.alloc(
|| "new_xl",
|| new_xl_value.ok_or(SynthesisError::AssignmentMissing),
)?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
|lc| lc + new_xl - xr,
);
// xR = xL
@@ -172,7 +158,9 @@ fn test_mimc() {
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let constants = (0..MIMC_ROUNDS)
.map(|_| <Bls12 as ScalarEngine>::Fr::random(rng))
.collect::<Vec<_>>();
println!("Creating parameters...");
@@ -181,7 +169,7 @@ fn test_mimc() {
let c = MiMCDemo::<Bls12> {
xl: None,
xr: None,
constants: &constants
constants: &constants,
};
generate_random_parameters(c, rng).unwrap()
@@ -203,8 +191,8 @@ fn test_mimc() {
for _ in 0..SAMPLES {
// Generate a random preimage and compute the image
let xl = rng.gen();
let xr = rng.gen();
let xl = <Bls12 as ScalarEngine>::Fr::random(rng);
let xr = <Bls12 as ScalarEngine>::Fr::random(rng);
let image = mimc::<Bls12>(xl, xr, &constants);
proof_vec.truncate(0);
@@ -216,7 +204,7 @@ fn test_mimc() {
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
constants: &constants,
};
// Create a groth16 proof with our parameters.
@@ -230,20 +218,16 @@ fn test_mimc() {
let start = Instant::now();
let proof = Proof::read(&proof_vec[..]).unwrap();
// Check the proof
assert!(verify_proof(
&pvk,
&proof,
&[image]
).unwrap());
assert!(verify_proof(&pvk, &proof, &[image]).unwrap());
total_verifying += start.elapsed();
}
let proving_avg = total_proving / SAMPLES;
let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (proving_avg.as_secs() as f64);
let proving_avg =
proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64);
let verifying_avg = total_verifying / SAMPLES;
let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (verifying_avg.as_secs() as f64);
let verifying_avg =
verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64);
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);

View File

@@ -10,8 +10,8 @@ repository = "https://github.com/ebfull/ff"
[dependencies]
byteorder = "1"
rand = "0.4"
ff_derive = { version = "0.3.0", path = "ff_derive", optional = true }
rand_core = "0.5"
[features]
default = []

View File

@@ -52,13 +52,8 @@ pub fn prime_field(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let mut gen = proc_macro2::TokenStream::new();
let (constants_impl, sqrt_impl) = prime_field_constants_and_sqrt(
&ast.ident,
&repr_ident,
modulus,
limbs,
generator,
);
let (constants_impl, sqrt_impl) =
prime_field_constants_and_sqrt(&ast.ident, &repr_ident, modulus, limbs, generator);
gen.extend(constants_impl);
gen.extend(prime_field_repr_impl(&repr_ident, limbs));
@@ -136,13 +131,6 @@ fn prime_field_repr_impl(repr: &syn::Ident, limbs: usize) -> proc_macro2::TokenS
}
}
impl ::rand::Rand for #repr {
#[inline(always)]
fn rand<R: ::rand::Rng>(rng: &mut R) -> Self {
#repr(rng.gen())
}
}
impl ::std::fmt::Display for #repr {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
try!(write!(f, "0x"));
@@ -366,7 +354,8 @@ fn biguint_num_bits(mut v: BigUint) -> u32 {
fn exp(base: BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut ret = BigUint::one();
for i in exp.to_bytes_be()
for i in exp
.to_bytes_be()
.into_iter()
.flat_map(|x| (0..8).rev().map(move |i| (x >> i).is_odd()))
{
@@ -387,11 +376,13 @@ fn test_exp() {
&BigUint::from_str("5489673498567349856734895").unwrap(),
&BigUint::from_str(
"52435875175126190479447740508185965837690552500527637822603658699938581184513"
).unwrap()
)
.unwrap()
),
BigUint::from_str(
"4371221214068404307866768905142520595925044802278091865033317963560480051536"
).unwrap()
)
.unwrap()
);
}
@@ -430,7 +421,7 @@ fn prime_field_constants_and_sqrt(
let mod_minus_1_over_2 =
biguint_to_u64_vec((&modulus - BigUint::from_str("1").unwrap()) >> 1, limbs);
let legendre_impl = quote!{
let legendre_impl = quote! {
fn legendre(&self) -> ::ff::LegendreSymbol {
// s = self^((modulus - 1) // 2)
let s = self.pow(#mod_minus_1_over_2);
@@ -452,7 +443,7 @@ fn prime_field_constants_and_sqrt(
// Compute -R as (m - r)
let rneg = biguint_to_u64_vec(&modulus - &r, limbs);
quote!{
quote! {
impl ::ff::SqrtField for #name {
#legendre_impl
@@ -479,7 +470,7 @@ fn prime_field_constants_and_sqrt(
let t_plus_1_over_2 = biguint_to_u64_vec((&t + BigUint::one()) >> 1, limbs);
let t = biguint_to_u64_vec(t.clone(), limbs);
quote!{
quote! {
impl ::ff::SqrtField for #name {
#legendre_impl
@@ -526,7 +517,7 @@ fn prime_field_constants_and_sqrt(
}
}
} else {
quote!{}
quote! {}
};
// Compute R^2 mod m
@@ -543,36 +534,39 @@ fn prime_field_constants_and_sqrt(
}
inv = inv.wrapping_neg();
(quote! {
/// This is the modulus m of the prime field
const MODULUS: #repr = #repr([#(#modulus,)*]);
(
quote! {
/// This is the modulus m of the prime field
const MODULUS: #repr = #repr([#(#modulus,)*]);
/// The number of bits needed to represent the modulus.
const MODULUS_BITS: u32 = #modulus_num_bits;
/// The number of bits needed to represent the modulus.
const MODULUS_BITS: u32 = #modulus_num_bits;
/// The number of bits that must be shaved from the beginning of
/// the representation when randomly sampling.
const REPR_SHAVE_BITS: u32 = #repr_shave_bits;
/// The number of bits that must be shaved from the beginning of
/// the representation when randomly sampling.
const REPR_SHAVE_BITS: u32 = #repr_shave_bits;
/// 2^{limbs*64} mod m
const R: #repr = #repr(#r);
/// 2^{limbs*64} mod m
const R: #repr = #repr(#r);
/// 2^{limbs*64*2} mod m
const R2: #repr = #repr(#r2);
/// 2^{limbs*64*2} mod m
const R2: #repr = #repr(#r2);
/// -(m^{-1} mod m) mod m
const INV: u64 = #inv;
/// -(m^{-1} mod m) mod m
const INV: u64 = #inv;
/// Multiplicative generator of `MODULUS` - 1 order, also quadratic
/// nonresidue.
const GENERATOR: #repr = #repr(#generator);
/// Multiplicative generator of `MODULUS` - 1 order, also quadratic
/// nonresidue.
const GENERATOR: #repr = #repr(#generator);
/// 2^s * t = MODULUS - 1 with t odd
const S: u32 = #s;
/// 2^s * t = MODULUS - 1 with t odd
const S: u32 = #s;
/// 2^s root of unity computed by GENERATOR^t
const ROOT_OF_UNITY: #repr = #repr(#root_of_unity);
}, sqrt_impl)
/// 2^s root of unity computed by GENERATOR^t
const ROOT_OF_UNITY: #repr = #repr(#root_of_unity);
},
sqrt_impl,
)
}
/// Implement PrimeField for the derived type.
@@ -592,9 +586,9 @@ fn prime_field_impl(
mont_paramlist.append_separated(
(0..(limbs * 2)).map(|i| (i, get_temp(i))).map(|(i, x)| {
if i != 0 {
quote!{mut #x: u64}
quote! {mut #x: u64}
} else {
quote!{#x: u64}
quote! {#x: u64}
}
}),
proc_macro2::Punct::new(',', proc_macro2::Spacing::Alone),
@@ -607,7 +601,7 @@ fn prime_field_impl(
for i in 0..limbs {
{
let temp = get_temp(i);
gen.extend(quote!{
gen.extend(quote! {
let k = #temp.wrapping_mul(INV);
let mut carry = 0;
::ff::mac_with_carry(#temp, k, MODULUS.0[0], &mut carry);
@@ -616,7 +610,7 @@ fn prime_field_impl(
for j in 1..limbs {
let temp = get_temp(i + j);
gen.extend(quote!{
gen.extend(quote! {
#temp = ::ff::mac_with_carry(#temp, k, MODULUS.0[#j], &mut carry);
});
}
@@ -624,17 +618,17 @@ fn prime_field_impl(
let temp = get_temp(i + limbs);
if i == 0 {
gen.extend(quote!{
gen.extend(quote! {
#temp = ::ff::adc(#temp, 0, &mut carry);
});
} else {
gen.extend(quote!{
gen.extend(quote! {
#temp = ::ff::adc(#temp, carry2, &mut carry);
});
}
if i != (limbs - 1) {
gen.extend(quote!{
gen.extend(quote! {
let carry2 = carry;
});
}
@@ -643,7 +637,7 @@ fn prime_field_impl(
for i in 0..limbs {
let temp = get_temp(limbs + i);
gen.extend(quote!{
gen.extend(quote! {
(self.0).0[#i] = #temp;
});
}
@@ -655,14 +649,14 @@ fn prime_field_impl(
let mut gen = proc_macro2::TokenStream::new();
for i in 0..(limbs - 1) {
gen.extend(quote!{
gen.extend(quote! {
let mut carry = 0;
});
for j in (i + 1)..limbs {
let temp = get_temp(i + j);
if i == 0 {
gen.extend(quote!{
gen.extend(quote! {
let #temp = ::ff::mac_with_carry(0, (#a.0).0[#i], (#a.0).0[#j], &mut carry);
});
} else {
@@ -674,7 +668,7 @@ fn prime_field_impl(
let temp = get_temp(i + limbs);
gen.extend(quote!{
gen.extend(quote! {
let #temp = carry;
});
}
@@ -684,21 +678,21 @@ fn prime_field_impl(
let temp1 = get_temp(limbs * 2 - i - 1);
if i == 1 {
gen.extend(quote!{
gen.extend(quote! {
let #temp0 = #temp1 >> 63;
});
} else if i == (limbs * 2 - 1) {
gen.extend(quote!{
gen.extend(quote! {
let #temp0 = #temp0 << 1;
});
} else {
gen.extend(quote!{
gen.extend(quote! {
let #temp0 = (#temp0 << 1) | (#temp1 >> 63);
});
}
}
gen.extend(quote!{
gen.extend(quote! {
let mut carry = 0;
});
@@ -706,7 +700,7 @@ fn prime_field_impl(
let temp0 = get_temp(i * 2);
let temp1 = get_temp(i * 2 + 1);
if i == 0 {
gen.extend(quote!{
gen.extend(quote! {
let #temp0 = ::ff::mac_with_carry(0, (#a.0).0[#i], (#a.0).0[#i], &mut carry);
});
} else {
@@ -715,7 +709,7 @@ fn prime_field_impl(
});
}
gen.extend(quote!{
gen.extend(quote! {
let #temp1 = ::ff::adc(#temp1, 0, &mut carry);
});
}
@@ -726,7 +720,7 @@ fn prime_field_impl(
proc_macro2::Punct::new(',', proc_macro2::Spacing::Alone),
);
gen.extend(quote!{
gen.extend(quote! {
self.mont_reduce(#mont_calling);
});
@@ -741,7 +735,7 @@ fn prime_field_impl(
let mut gen = proc_macro2::TokenStream::new();
for i in 0..limbs {
gen.extend(quote!{
gen.extend(quote! {
let mut carry = 0;
});
@@ -749,7 +743,7 @@ fn prime_field_impl(
let temp = get_temp(i + j);
if i == 0 {
gen.extend(quote!{
gen.extend(quote! {
let #temp = ::ff::mac_with_carry(0, (#a.0).0[#i], (#b.0).0[#j], &mut carry);
});
} else {
@@ -761,7 +755,7 @@ fn prime_field_impl(
let temp = get_temp(i + limbs);
gen.extend(quote!{
gen.extend(quote! {
let #temp = carry;
});
}
@@ -772,29 +766,29 @@ fn prime_field_impl(
proc_macro2::Punct::new(',', proc_macro2::Spacing::Alone),
);
gen.extend(quote!{
gen.extend(quote! {
self.mont_reduce(#mont_calling);
});
gen
}
let squaring_impl = sqr_impl(quote!{self}, limbs);
let multiply_impl = mul_impl(quote!{self}, quote!{other}, limbs);
let squaring_impl = sqr_impl(quote! {self}, limbs);
let multiply_impl = mul_impl(quote! {self}, quote! {other}, limbs);
let montgomery_impl = mont_impl(limbs);
// (self.0).0[0], (self.0).0[1], ..., 0, 0, 0, 0, ...
let mut into_repr_params = proc_macro2::TokenStream::new();
into_repr_params.append_separated(
(0..limbs)
.map(|i| quote!{ (self.0).0[#i] })
.chain((0..limbs).map(|_| quote!{0})),
.map(|i| quote! { (self.0).0[#i] })
.chain((0..limbs).map(|_| quote! {0})),
proc_macro2::Punct::new(',', proc_macro2::Spacing::Alone),
);
let top_limb_index = limbs - 1;
quote!{
quote! {
impl ::std::marker::Copy for #name { }
impl ::std::clone::Clone for #name {
@@ -839,22 +833,6 @@ fn prime_field_impl(
}
}
impl ::rand::Rand for #name {
/// Computes a uniformly random element using rejection sampling.
fn rand<R: ::rand::Rng>(rng: &mut R) -> Self {
loop {
let mut tmp = #name(#repr::rand(rng));
// Mask away the unused bits at the beginning.
tmp.0.as_mut()[#top_limb_index] &= 0xffffffffffffffff >> REPR_SHAVE_BITS;
if tmp.is_valid() {
return tmp
}
}
}
}
impl From<#name> for #repr {
fn from(e: #name) -> #repr {
e.into_repr()
@@ -904,6 +882,26 @@ fn prime_field_impl(
}
impl ::ff::Field for #name {
/// Computes a uniformly random element using rejection sampling.
fn random<R: ::rand_core::RngCore>(rng: &mut R) -> Self {
loop {
let mut tmp = {
let mut repr = [0u64; #limbs];
for i in 0..#limbs {
repr[i] = rng.next_u64();
}
#name(#repr(repr))
};
// Mask away the unused most-significant bits.
tmp.0.as_mut()[#top_limb_index] &= 0xffffffffffffffff >> REPR_SHAVE_BITS;
if tmp.is_valid() {
return tmp
}
}
}
#[inline]
fn zero() -> Self {
#name(#repr::from(0))

View File

@@ -1,7 +1,7 @@
#![allow(unused_imports)]
extern crate byteorder;
extern crate rand;
extern crate rand_core;
#[cfg(feature = "derive")]
#[macro_use]
@@ -10,14 +10,18 @@ extern crate ff_derive;
#[cfg(feature = "derive")]
pub use ff_derive::*;
use rand_core::RngCore;
use std::error::Error;
use std::fmt;
use std::io::{self, Read, Write};
/// This trait represents an element of a field.
pub trait Field:
Sized + Eq + Copy + Clone + Send + Sync + fmt::Debug + fmt::Display + 'static + rand::Rand
Sized + Eq + Copy + Clone + Send + Sync + fmt::Debug + fmt::Display + 'static
{
/// Returns an element chosen uniformly at random using a user-provided RNG.
fn random<R: RngCore>(rng: &mut R) -> Self;
/// Returns the zero element of the field, the additive identity.
fn zero() -> Self;
@@ -100,7 +104,6 @@ pub trait PrimeFieldRepr:
+ fmt::Debug
+ fmt::Display
+ 'static
+ rand::Rand
+ AsRef<[u64]>
+ AsMut<[u64]>
+ From<u64>

View File

@@ -14,4 +14,5 @@ repository = "https://github.com/ebfull/group"
[dependencies]
ff = { path = "../ff" }
rand = "0.4"
rand = "0.7"
rand_xorshift = "0.2"

View File

@@ -1,7 +1,9 @@
extern crate ff;
extern crate rand;
extern crate rand_xorshift;
use ff::{PrimeField, PrimeFieldDecodingError, ScalarEngine, SqrtField};
use rand::RngCore;
use std::error::Error;
use std::fmt;
@@ -13,23 +15,16 @@ pub use self::wnaf::Wnaf;
/// Projective representation of an elliptic curve point guaranteed to be
/// in the correct prime order subgroup.
pub trait CurveProjective:
PartialEq
+ Eq
+ Sized
+ Copy
+ Clone
+ Send
+ Sync
+ fmt::Debug
+ fmt::Display
+ rand::Rand
+ 'static
PartialEq + Eq + Sized + Copy + Clone + Send + Sync + fmt::Debug + fmt::Display + 'static
{
type Engine: ScalarEngine<Fr = Self::Scalar>;
type Scalar: PrimeField + SqrtField;
type Base: SqrtField;
type Affine: CurveAffine<Projective = Self, Scalar = Self::Scalar>;
/// Returns an element chosen uniformly at random using a user-provided RNG.
fn random<R: RngCore>(rng: &mut R) -> Self;
/// Returns the additive identity.
fn zero() -> Self;

View File

@@ -1,9 +1,14 @@
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use ff::{Field, PrimeField};
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use {CurveAffine, CurveProjective, EncodedPoint};
pub fn curve_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
// Negation edge case with zero.
{
@@ -21,7 +26,7 @@ pub fn curve_tests<G: CurveProjective>() {
// Addition edge cases with zero
{
let mut r = G::rand(&mut rng);
let mut r = G::random(&mut rng);
let rcopy = r;
r.add_assign(&G::zero());
assert_eq!(r, rcopy);
@@ -45,9 +50,10 @@ pub fn curve_tests<G: CurveProjective>() {
// Transformations
{
let a = G::rand(&mut rng);
let a = G::random(&mut rng);
let b = a.into_affine().into_projective();
let c = a.into_affine()
let c = a
.into_affine()
.into_projective()
.into_affine()
.into_projective();
@@ -65,11 +71,12 @@ pub fn curve_tests<G: CurveProjective>() {
}
fn random_wnaf_tests<G: CurveProjective>() {
use ff::PrimeField;
use wnaf::*;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
{
let mut table = vec![];
@@ -77,8 +84,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
for w in 2..14 {
for _ in 0..100 {
let g = G::rand(&mut rng);
let s = G::Scalar::rand(&mut rng).into_repr();
let g = G::random(&mut rng);
let s = G::Scalar::random(&mut rng).into_repr();
let mut g1 = g;
g1.mul_assign(s);
@@ -95,8 +102,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
fn only_compiles_if_send<S: Send>(_: &S) {}
for _ in 0..100 {
let g = G::rand(&mut rng);
let s = G::Scalar::rand(&mut rng).into_repr();
let g = G::random(&mut rng);
let s = G::Scalar::random(&mut rng).into_repr();
let mut g1 = g;
g1.mul_assign(s);
@@ -129,7 +136,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
wnaf.base(G::random(&mut rng), 1)
.scalar(G::Scalar::random(&mut rng).into_repr());
}
wnaf.base(g, 1).scalar(s)
};
@@ -137,7 +145,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
wnaf.base(G::random(&mut rng), 1)
.scalar(G::Scalar::random(&mut rng).into_repr());
}
wnaf.scalar(s).base(g)
};
@@ -145,7 +154,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
wnaf.base(G::random(&mut rng), 1)
.scalar(G::Scalar::random(&mut rng).into_repr());
}
let mut shared = wnaf.base(g, 1).shared();
@@ -157,7 +167,8 @@ fn random_wnaf_tests<G: CurveProjective>() {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
wnaf.base(G::random(&mut rng), 1)
.scalar(G::Scalar::random(&mut rng).into_repr());
}
let mut shared = wnaf.scalar(s).shared();
@@ -179,14 +190,15 @@ fn random_wnaf_tests<G: CurveProjective>() {
}
fn random_negation_tests<G: CurveProjective>() {
use ff::Field;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let r = G::rand(&mut rng);
let r = G::random(&mut rng);
let s = G::Scalar::rand(&mut rng);
let s = G::Scalar::random(&mut rng);
let mut sneg = s;
sneg.negate();
@@ -210,11 +222,14 @@ fn random_negation_tests<G: CurveProjective>() {
}
fn random_doubling_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let mut a = G::rand(&mut rng);
let mut b = G::rand(&mut rng);
let mut a = G::random(&mut rng);
let mut b = G::random(&mut rng);
// 2(a + b)
let mut tmp1 = a;
@@ -237,15 +252,18 @@ fn random_doubling_tests<G: CurveProjective>() {
}
fn random_multiplication_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let mut a = G::rand(&mut rng);
let mut b = G::rand(&mut rng);
let mut a = G::random(&mut rng);
let mut b = G::random(&mut rng);
let a_affine = a.into_affine();
let b_affine = b.into_affine();
let s = G::Scalar::rand(&mut rng);
let s = G::Scalar::random(&mut rng);
// s ( a + b )
let mut tmp1 = a;
@@ -269,12 +287,15 @@ fn random_multiplication_tests<G: CurveProjective>() {
}
fn random_addition_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let a = G::rand(&mut rng);
let b = G::rand(&mut rng);
let c = G::rand(&mut rng);
let a = G::random(&mut rng);
let b = G::random(&mut rng);
let c = G::random(&mut rng);
let a_affine = a.into_affine();
let b_affine = b.into_affine();
let c_affine = c.into_affine();
@@ -347,10 +368,13 @@ fn random_addition_tests<G: CurveProjective>() {
}
fn random_transformation_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let g = G::rand(&mut rng);
let g = G::random(&mut rng);
let g_affine = g.into_affine();
let g_projective = g_affine.into_projective();
assert_eq!(g, g_projective);
@@ -358,24 +382,25 @@ fn random_transformation_tests<G: CurveProjective>() {
// Batch normalization
for _ in 0..10 {
let mut v = (0..1000).map(|_| G::rand(&mut rng)).collect::<Vec<_>>();
let mut v = (0..1000).map(|_| G::random(&mut rng)).collect::<Vec<_>>();
for i in &v {
assert!(!i.is_normalized());
}
use rand::distributions::{IndependentSample, Range};
let between = Range::new(0, 1000);
use rand::distributions::{Distribution, Uniform};
let between = Uniform::new(0, 1000);
// Sprinkle in some normalized points
for _ in 0..5 {
v[between.ind_sample(&mut rng)] = G::zero();
v[between.sample(&mut rng)] = G::zero();
}
for _ in 0..5 {
let s = between.ind_sample(&mut rng);
let s = between.sample(&mut rng);
v[s] = v[s].into_affine().into_projective();
}
let expected_v = v.iter()
let expected_v = v
.iter()
.map(|v| v.into_affine().into_projective())
.collect::<Vec<_>>();
G::batch_normalization(&mut v);
@@ -389,7 +414,10 @@ fn random_transformation_tests<G: CurveProjective>() {
}
fn random_encoding_tests<G: CurveAffine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
assert_eq!(
G::zero().into_uncompressed().into_affine().unwrap(),
@@ -402,7 +430,7 @@ fn random_encoding_tests<G: CurveAffine>() {
);
for _ in 0..1000 {
let mut r = G::Projective::rand(&mut rng).into_affine();
let mut r = G::Projective::random(&mut rng).into_affine();
let uncompressed = r.into_uncompressed();
let de_uncompressed = uncompressed.into_affine().unwrap();

View File

@@ -15,16 +15,14 @@ crate-type = ["staticlib"]
[dependencies]
bellman = { path = "../bellman" }
blake2b_simd = "0.5"
blake2s_simd = "0.5"
ff = { path = "../ff" }
libc = "0.2"
pairing = { path = "../pairing" }
lazy_static = "1"
byteorder = "1"
rand = "0.4"
sapling-crypto = { path = "../sapling-crypto" }
rand_core = "0.5"
rand_os = "0.2"
zcash_primitives = { path = "../zcash_primitives" }
zcash_proofs = { path = "../zcash_proofs" }
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"

View File

@@ -1,4 +1,4 @@
use blake2_rfc::blake2b::{Blake2b, Blake2bResult};
use blake2b_simd::{Hash as Blake2bHash, Params as Blake2bParams, State as Blake2bState};
use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io::Cursor;
use std::mem::size_of;
@@ -33,7 +33,7 @@ impl Params {
}
impl Node {
fn new(p: &Params, state: &Blake2b, i: u32) -> Self {
fn new(p: &Params, state: &Blake2bState, i: u32) -> Self {
let hash = generate_hash(state, i / p.indices_per_hash_output());
let start = ((i % p.indices_per_hash_output()) * p.n / 8) as usize;
let end = start + (p.n as usize) / 8;
@@ -99,15 +99,18 @@ impl Node {
}
}
fn initialise_state(n: u32, k: u32, digest_len: u8) -> Blake2b {
fn initialise_state(n: u32, k: u32, digest_len: u8) -> Blake2bState {
let mut personalization: Vec<u8> = Vec::from("ZcashPoW");
personalization.write_u32::<LittleEndian>(n).unwrap();
personalization.write_u32::<LittleEndian>(k).unwrap();
Blake2b::with_params(digest_len as usize, &[], &[], &personalization)
Blake2bParams::new()
.hash_length(digest_len as usize)
.personal(&personalization)
.to_state()
}
fn generate_hash(base_state: &Blake2b, i: u32) -> Blake2bResult {
fn generate_hash(base_state: &Blake2bState, i: u32) -> Blake2bHash {
let mut lei = [0u8; 4];
(&mut lei[..]).write_u32::<LittleEndian>(i).unwrap();
@@ -249,7 +252,7 @@ pub fn is_valid_solution_iterative(
return rows[0].is_zero(hash_len);
}
fn tree_validator(p: &Params, state: &Blake2b, indices: &[u32]) -> Option<Node> {
fn tree_validator(p: &Params, state: &Blake2bState, indices: &[u32]) -> Option<Node> {
if indices.len() > 1 {
let end = indices.len();
let mid = end / 2;

View File

@@ -1,46 +1,46 @@
extern crate bellman;
extern crate blake2_rfc;
extern crate blake2b_simd;
extern crate blake2s_simd;
extern crate byteorder;
extern crate ff;
extern crate libc;
extern crate pairing;
extern crate rand;
extern crate sapling_crypto;
extern crate rand_core;
extern crate rand_os;
extern crate zcash_primitives;
extern crate zcash_proofs;
extern crate lazy_static;
use ff::{BitIterator, PrimeField, PrimeFieldRepr};
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr, FrRepr};
use sapling_crypto::{
circuit::multipack,
use zcash_primitives::{
constants::CRH_IVK_PERSONALIZATION,
jubjub::{
edwards,
fs::{Fs, FsRepr},
FixedGenerators, JubjubEngine, JubjubParams, PrimeOrder, ToUniform, Unknown,
},
pedersen_hash::{pedersen_hash, Personalization},
redjubjub::{self, Signature},
};
use sapling_crypto::circuit::sapling::TREE_DEPTH as SAPLING_TREE_DEPTH;
use sapling_crypto::circuit::sprout::{self, TREE_DEPTH as SPROUT_TREE_DEPTH};
use zcash_proofs::circuit::sapling::TREE_DEPTH as SAPLING_TREE_DEPTH;
use zcash_proofs::circuit::sprout::{self, TREE_DEPTH as SPROUT_TREE_DEPTH};
use bellman::gadgets::multipack;
use bellman::groth16::{
create_random_proof, verify_proof, Parameters, PreparedVerifyingKey, Proof,
};
use blake2_rfc::blake2s::Blake2s;
use blake2s_simd::Params as Blake2sParams;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rand::{OsRng, Rng};
use rand_core::RngCore;
use rand_os::OsRng;
use std::io::BufReader;
use libc::{c_char, c_uchar, int64_t, size_t, uint32_t, uint64_t};
use libc::{c_char, c_uchar, size_t};
use std::ffi::CStr;
use std::fs::File;
use std::path::{Path, PathBuf};
@@ -56,11 +56,18 @@ use std::ffi::OsString;
#[cfg(target_os = "windows")]
use std::os::windows::ffi::OsStringExt;
use sapling_crypto::primitives::{ProofGenerationKey, ViewingKey};
use zcash_primitives::{note_encryption::sapling_ka_agree, sapling::spend_sig, zip32, JUBJUB};
use zcash_primitives::{
merkle_tree::CommitmentTreeWitness,
note_encryption::sapling_ka_agree,
primitives::{Diversifier, Note, PaymentAddress, ProofGenerationKey, ViewingKey},
redjubjub::{self, Signature},
sapling::{merkle_hash, spend_sig},
transaction::components::Amount,
zip32, JUBJUB,
};
use zcash_proofs::{
load_parameters,
sapling::{CommitmentTreeWitness, SaplingProvingContext, SaplingVerificationContext},
sapling::{SaplingProvingContext, SaplingVerificationContext},
};
pub mod equihash;
@@ -228,7 +235,7 @@ fn init_zksnark_params(
#[no_mangle]
pub extern "system" fn librustzcash_tree_uncommitted(result: *mut [c_uchar; 32]) {
let tmp = sapling_crypto::primitives::Note::<Bls12>::uncommitted().into_repr();
let tmp = Note::<Bls12>::uncommitted().into_repr();
// Should be okay, caller is responsible for ensuring the pointer
// is a valid pointer to 32 bytes that can be mutated.
@@ -254,28 +261,7 @@ pub extern "system" fn librustzcash_merkle_hash(
// size of the representation
let b_repr = read_le(unsafe { &(&*b)[..] });
let mut lhs = [false; 256];
let mut rhs = [false; 256];
for (a, b) in lhs.iter_mut().rev().zip(BitIterator::new(a_repr)) {
*a = b;
}
for (a, b) in rhs.iter_mut().rev().zip(BitIterator::new(b_repr)) {
*a = b;
}
let tmp = pedersen_hash::<Bls12, _>(
Personalization::MerkleTree(depth),
lhs.iter()
.map(|&x| x)
.take(Fr::NUM_BITS as usize)
.chain(rhs.iter().map(|&x| x).take(Fr::NUM_BITS as usize)),
&JUBJUB,
)
.into_xy()
.0
.into_repr();
let tmp = merkle_hash(depth, &a_repr, &b_repr);
// Should be okay, caller is responsible for ensuring the pointer
// is a valid pointer to 32 bytes that can be mutated.
@@ -336,7 +322,10 @@ pub extern "system" fn librustzcash_crh_ivk(
let ak = unsafe { &*ak };
let nk = unsafe { &*nk };
let mut h = Blake2s::with_params(32, &[], &[], CRH_IVK_PERSONALIZATION);
let mut h = Blake2sParams::new()
.hash_length(32)
.personal(CRH_IVK_PERSONALIZATION)
.to_state();
h.update(ak);
h.update(nk);
let mut h = h.finalize().as_ref().to_vec();
@@ -351,7 +340,7 @@ pub extern "system" fn librustzcash_crh_ivk(
#[no_mangle]
pub extern "system" fn librustzcash_check_diversifier(diversifier: *const [c_uchar; 11]) -> bool {
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
let diversifier = Diversifier(unsafe { *diversifier });
diversifier.g_d::<Bls12>(&JUBJUB).is_some()
}
@@ -362,7 +351,7 @@ pub extern "system" fn librustzcash_ivk_to_pkd(
result: *mut [c_uchar; 32],
) -> bool {
let ivk = read_fs(unsafe { &*ivk });
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
let diversifier = Diversifier(unsafe { *diversifier });
if let Some(g_d) = diversifier.g_d::<Bls12>(&JUBJUB) {
let pk_d = g_d.mul(ivk, &JUBJUB);
@@ -399,11 +388,9 @@ fn test_gen_r() {
#[no_mangle]
pub extern "system" fn librustzcash_sapling_generate_r(result: *mut [c_uchar; 32]) {
// create random 64 byte buffer
let mut rng = OsRng::new().expect("should be able to construct RNG");
let mut rng = OsRng;
let mut buffer = [0u8; 64];
for i in 0..buffer.len() {
buffer[i] = rng.gen();
}
rng.fill_bytes(&mut buffer);
// reduce to uniform value
let r = <Bls12 as JubjubEngine>::Fs::to_uniform(&buffer[..]);
@@ -417,10 +404,10 @@ pub extern "system" fn librustzcash_sapling_generate_r(result: *mut [c_uchar; 32
fn priv_get_note(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: uint64_t,
value: u64,
r: *const [c_uchar; 32],
) -> Result<sapling_crypto::primitives::Note<Bls12>, ()> {
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
) -> Result<Note<Bls12>, ()> {
let diversifier = Diversifier(unsafe { *diversifier });
let g_d = match diversifier.g_d::<Bls12>(&JUBJUB) {
Some(g_d) => g_d,
None => return Err(()),
@@ -442,7 +429,7 @@ fn priv_get_note(
Err(_) => return Err(()),
};
let note = sapling_crypto::primitives::Note {
let note = Note {
value,
g_d,
pk_d,
@@ -457,11 +444,11 @@ fn priv_get_note(
pub extern "system" fn librustzcash_sapling_compute_nf(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: uint64_t,
value: u64,
r: *const [c_uchar; 32],
ak: *const [c_uchar; 32],
nk: *const [c_uchar; 32],
position: uint64_t,
position: u64,
result: *mut [c_uchar; 32],
) -> bool {
let note = match priv_get_note(diversifier, pk_d, value, r) {
@@ -502,7 +489,7 @@ pub extern "system" fn librustzcash_sapling_compute_nf(
pub extern "system" fn librustzcash_sapling_compute_cm(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
value: uint64_t,
value: u64,
r: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) -> bool {
@@ -551,7 +538,7 @@ pub extern "system" fn librustzcash_sapling_ka_derivepublic(
esk: *const [c_uchar; 32],
result: *mut [c_uchar; 32],
) -> bool {
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
let diversifier = Diversifier(unsafe { *diversifier });
// Compute g_d from the diversifier
let g_d = match diversifier.g_d::<Bls12>(&JUBJUB) {
@@ -575,8 +562,8 @@ pub extern "system" fn librustzcash_sapling_ka_derivepublic(
#[no_mangle]
pub extern "system" fn librustzcash_eh_isvalid(
n: uint32_t,
k: uint32_t,
n: u32,
k: u32,
input: *const c_uchar,
input_len: size_t,
nonce: *const c_uchar,
@@ -713,10 +700,15 @@ pub extern "system" fn librustzcash_sapling_check_output(
#[no_mangle]
pub extern "system" fn librustzcash_sapling_final_check(
ctx: *mut SaplingVerificationContext,
value_balance: int64_t,
value_balance: i64,
binding_sig: *const [c_uchar; 64],
sighash_value: *const [c_uchar; 32],
) -> bool {
let value_balance = match Amount::from_i64(value_balance) {
Ok(vb) => vb,
Err(()) => return false,
};
// Deserialize the signature
let binding_sig = match Signature::read(&(unsafe { &*binding_sig })[..]) {
Ok(sig) => sig,
@@ -741,31 +733,31 @@ pub extern "system" fn librustzcash_sprout_prove(
// First input
in_sk1: *const [c_uchar; 32],
in_value1: uint64_t,
in_value1: u64,
in_rho1: *const [c_uchar; 32],
in_r1: *const [c_uchar; 32],
in_auth1: *const [c_uchar; 1 + 33 * SPROUT_TREE_DEPTH + 8],
// Second input
in_sk2: *const [c_uchar; 32],
in_value2: uint64_t,
in_value2: u64,
in_rho2: *const [c_uchar; 32],
in_r2: *const [c_uchar; 32],
in_auth2: *const [c_uchar; 1 + 33 * SPROUT_TREE_DEPTH + 8],
// First output
out_pk1: *const [c_uchar; 32],
out_value1: uint64_t,
out_value1: u64,
out_r1: *const [c_uchar; 32],
// Second output
out_pk2: *const [c_uchar; 32],
out_value2: uint64_t,
out_value2: u64,
out_r2: *const [c_uchar; 32],
// Public value
vpub_old: uint64_t,
vpub_new: uint64_t,
vpub_old: u64,
vpub_new: u64,
) {
let phi = unsafe { *phi };
let rt = unsafe { *rt };
@@ -871,7 +863,7 @@ pub extern "system" fn librustzcash_sprout_prove(
drop(sprout_fs);
// Initialize secure RNG
let mut rng = OsRng::new().expect("should be able to construct RNG");
let mut rng = OsRng;
let proof = create_random_proof(js, &params, &mut rng).expect("proving should not fail");
@@ -891,8 +883,8 @@ pub extern "system" fn librustzcash_sprout_verify(
nf2: *const [c_uchar; 32],
cm1: *const [c_uchar; 32],
cm2: *const [c_uchar; 32],
vpub_old: uint64_t,
vpub_new: uint64_t,
vpub_old: u64,
vpub_new: u64,
) -> bool {
// Prepare the public input for the verifier
let mut public_input = Vec::with_capacity((32 * 8) + (8 * 2));
@@ -936,7 +928,7 @@ pub extern "system" fn librustzcash_sapling_output_proof(
diversifier: *const [c_uchar; 11],
pk_d: *const [c_uchar; 32],
rcm: *const [c_uchar; 32],
value: uint64_t,
value: u64,
cv: *mut [c_uchar; 32],
zkproof: *mut [c_uchar; GROTH_PROOF_SIZE],
) -> bool {
@@ -947,7 +939,7 @@ pub extern "system" fn librustzcash_sapling_output_proof(
};
// Grab the diversifier from the caller.
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
let diversifier = Diversifier(unsafe { *diversifier });
// Grab pk_d from the caller.
let pk_d = match edwards::Point::<Bls12, Unknown>::read(&(unsafe { &*pk_d })[..], &JUBJUB) {
@@ -962,7 +954,7 @@ pub extern "system" fn librustzcash_sapling_output_proof(
};
// Construct a payment address
let payment_address = sapling_crypto::primitives::PaymentAddress {
let payment_address = PaymentAddress {
pk_d: pk_d,
diversifier: diversifier,
};
@@ -1015,8 +1007,11 @@ pub extern "system" fn librustzcash_sapling_spend_sig(
Err(_) => return false,
};
// Initialize secure RNG
let mut rng = OsRng;
// Do the signing
let sig = spend_sig(ask, ar, unsafe { &*sighash }, &JUBJUB);
let sig = spend_sig(ask, ar, unsafe { &*sighash }, &mut rng, &JUBJUB);
// Write out the signature
sig.write(&mut (unsafe { &mut *result })[..])
@@ -1028,10 +1023,15 @@ pub extern "system" fn librustzcash_sapling_spend_sig(
#[no_mangle]
pub extern "system" fn librustzcash_sapling_binding_sig(
ctx: *const SaplingProvingContext,
value_balance: int64_t,
value_balance: i64,
sighash: *const [c_uchar; 32],
result: *mut [c_uchar; 64],
) -> bool {
let value_balance = match Amount::from_i64(value_balance) {
Ok(vb) => vb,
Err(()) => return false,
};
// Sign
let sig = match unsafe { &*ctx }.binding_sig(value_balance, unsafe { &*sighash }, &JUBJUB) {
Ok(s) => s,
@@ -1053,7 +1053,7 @@ pub extern "system" fn librustzcash_sapling_spend_proof(
diversifier: *const [c_uchar; 11],
rcm: *const [c_uchar; 32],
ar: *const [c_uchar; 32],
value: uint64_t,
value: u64,
anchor: *const [c_uchar; 32],
witness: *const [c_uchar; 1 + 33 * SAPLING_TREE_DEPTH + 8],
cv: *mut [c_uchar; 32],
@@ -1085,7 +1085,7 @@ pub extern "system" fn librustzcash_sapling_spend_proof(
};
// Grab the diversifier from the caller
let diversifier = sapling_crypto::primitives::Diversifier(unsafe { *diversifier });
let diversifier = Diversifier(unsafe { *diversifier });
// The caller chooses the note randomness
let rcm = match Fs::from_repr(read_fs(&(unsafe { &*rcm })[..])) {
@@ -1174,7 +1174,7 @@ pub extern "system" fn librustzcash_zip32_xsk_master(
#[no_mangle]
pub extern "system" fn librustzcash_zip32_xsk_derive(
xsk_parent: *const [c_uchar; 169],
i: uint32_t,
i: u32,
xsk_i: *mut [c_uchar; 169],
) {
let xsk_parent = zip32::ExtendedSpendingKey::read(&unsafe { *xsk_parent }[..])
@@ -1190,7 +1190,7 @@ pub extern "system" fn librustzcash_zip32_xsk_derive(
#[no_mangle]
pub extern "system" fn librustzcash_zip32_xfvk_derive(
xfvk_parent: *const [c_uchar; 169],
i: uint32_t,
i: u32,
xfvk_i: *mut [c_uchar; 169],
) -> bool {
let xfvk_parent = zip32::ExtendedFullViewingKey::read(&unsafe { *xfvk_parent }[..])

View File

@@ -1,8 +1,9 @@
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::Bls12;
use rand::{OsRng, Rng};
use sapling_crypto::jubjub::{edwards, JubjubBls12};
use sapling_crypto::primitives::{Diversifier, ViewingKey};
use rand_core::RngCore;
use rand_os::OsRng;
use zcash_primitives::jubjub::{edwards, JubjubBls12};
use zcash_primitives::primitives::{Diversifier, ViewingKey};
use {
librustzcash_sapling_generate_r, librustzcash_sapling_ka_agree,
@@ -12,7 +13,7 @@ use {
#[test]
fn test_key_agreement() {
let params = JubjubBls12::new();
let mut rng = OsRng::new().unwrap();
let mut rng = OsRng;
// Create random viewing key
let vk = ViewingKey::<Bls12> {
@@ -22,7 +23,9 @@ fn test_key_agreement() {
// Create a random address with the viewing key
let addr = loop {
match vk.into_payment_address(Diversifier(rng.gen()), &params) {
let mut d = [0; 11];
rng.fill_bytes(&mut d);
match vk.into_payment_address(Diversifier(d), &params) {
Some(a) => break a,
None => {}
}

View File

@@ -1,6 +1,6 @@
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::Bls12;
use sapling_crypto::{
use zcash_primitives::{
jubjub::{fs::FsRepr, FixedGenerators, JubjubEngine, JubjubParams},
primitives::{Diversifier, ProofGenerationKey},
};
@@ -28,6 +28,8 @@ fn key_components() {
note_v: u64,
note_r: [u8; 32],
note_cm: [u8; 32],
note_pos: u64,
note_nf: [u8; 32],
};
// From https://github.com/zcash-hackworks/zcash-test-vectors/blob/master/sapling_key_components.py
@@ -87,6 +89,12 @@ fn key_components() {
0x18, 0x50, 0xc9, 0xfe, 0xd4, 0x4f, 0xce, 0x08, 0x06, 0x27, 0x8f, 0x08, 0x3e, 0xf2,
0xdd, 0x07, 0x64, 0x39,
],
note_pos: 0,
note_nf: [
0x44, 0xfa, 0xd6, 0x56, 0x4f, 0xfd, 0xec, 0x9f, 0xa1, 0x9c, 0x43, 0xa2, 0x8f, 0x86,
0x1d, 0x5e, 0xbf, 0x60, 0x23, 0x46, 0x00, 0x7d, 0xe7, 0x62, 0x67, 0xd9, 0x75, 0x27,
0x47, 0xab, 0x40, 0x63,
],
},
TestVector {
sk: [
@@ -143,6 +151,12 @@ fn key_components() {
0x89, 0xe1, 0x0e, 0x26, 0x6b, 0xcf, 0xa3, 0x1c, 0x31, 0xb2, 0x9a, 0x53, 0xae, 0x72,
0xca, 0xd4, 0x69, 0x50,
],
note_pos: 763714296,
note_nf: [
0x67, 0x9e, 0xb0, 0xc3, 0xa7, 0x57, 0xe2, 0xae, 0x83, 0xcd, 0xb4, 0x2a, 0x1a, 0xb2,
0x59, 0xd7, 0x83, 0x88, 0x31, 0x54, 0x19, 0xad, 0xc7, 0x1d, 0x2e, 0x37, 0x63, 0x17,
0x4c, 0x2e, 0x9d, 0x93,
],
},
TestVector {
sk: [
@@ -199,6 +213,12 @@ fn key_components() {
0xb7, 0x40, 0x82, 0x96, 0x66, 0x17, 0x70, 0xb1, 0x01, 0xb0, 0xaa, 0x87, 0x83, 0x9f,
0x4e, 0x55, 0xf1, 0x51,
],
note_pos: 1527428592,
note_nf: [
0xe9, 0x8f, 0x6a, 0x8f, 0x34, 0xff, 0x49, 0x80, 0x59, 0xb3, 0xc7, 0x31, 0xb9, 0x1f,
0x45, 0x11, 0x08, 0xc4, 0x95, 0x4d, 0x91, 0x94, 0x84, 0x36, 0x1c, 0xf9, 0xb4, 0x8f,
0x59, 0xae, 0x1d, 0x14,
],
},
TestVector {
sk: [
@@ -255,6 +275,12 @@ fn key_components() {
0xbd, 0x10, 0x5d, 0x88, 0x39, 0x21, 0x2e, 0x0d, 0x16, 0x44, 0xb9, 0xd5, 0x5c, 0xaa,
0x60, 0xd1, 0x9b, 0x6c,
],
note_pos: 2291142888,
note_nf: [
0x55, 0x47, 0xaa, 0x12, 0xff, 0x80, 0xa6, 0xb3, 0x30, 0x4e, 0x3b, 0x05, 0x86, 0x56,
0x47, 0x2a, 0xbd, 0x2c, 0x81, 0x83, 0xb5, 0x9d, 0x07, 0x37, 0xb9, 0x3c, 0xee, 0x75,
0x8b, 0xec, 0x47, 0xa1,
],
},
TestVector {
sk: [
@@ -311,6 +337,12 @@ fn key_components() {
0xcf, 0x1e, 0x67, 0x15, 0xbf, 0xe7, 0x0b, 0x63, 0x2d, 0x04, 0x4b, 0x26, 0xfb, 0x2b,
0xc7, 0x1b, 0x7f, 0x36,
],
note_pos: 3054857184,
note_nf: [
0x8a, 0x9a, 0xbd, 0xa3, 0xd4, 0xef, 0x85, 0xca, 0xf2, 0x2b, 0xfa, 0xf2, 0xc4, 0x8f,
0x62, 0x38, 0x2a, 0x73, 0xa1, 0x62, 0x4e, 0xb8, 0xeb, 0x2b, 0xd0, 0x0d, 0x27, 0x03,
0x01, 0xbf, 0x3d, 0x13,
],
},
TestVector {
sk: [
@@ -367,6 +399,12 @@ fn key_components() {
0x1d, 0x74, 0xc5, 0xbc, 0xf2, 0xe1, 0xef, 0x95, 0x66, 0x90, 0x44, 0x73, 0x01, 0x69,
0xde, 0x1a, 0x5b, 0x4c,
],
note_pos: 3818571480,
note_nf: [
0x33, 0x2a, 0xd9, 0x9e, 0xb9, 0xe9, 0x77, 0xeb, 0x62, 0x7a, 0x12, 0x2d, 0xbf, 0xb2,
0xf2, 0x5f, 0xe5, 0x88, 0xe5, 0x97, 0x75, 0x3e, 0xc5, 0x58, 0x0f, 0xf2, 0xbe, 0x20,
0xb6, 0xc9, 0xa7, 0xe1,
],
},
TestVector {
sk: [
@@ -423,6 +461,12 @@ fn key_components() {
0x90, 0xb6, 0xe0, 0xf2, 0xf4, 0xbf, 0x4e, 0xc4, 0xa0, 0xdb, 0x5b, 0xbc, 0xcb, 0x5b,
0x78, 0x3a, 0x1e, 0x55,
],
note_pos: 287318480,
note_nf: [
0xfc, 0x74, 0xcd, 0x0e, 0x4b, 0xe0, 0x49, 0x57, 0xb1, 0x96, 0xcf, 0x87, 0x34, 0xae,
0x99, 0x23, 0x96, 0xaf, 0x4c, 0xfa, 0x8f, 0xec, 0xbb, 0x86, 0xf9, 0x61, 0xe6, 0xb4,
0x07, 0xd5, 0x1e, 0x11,
],
},
TestVector {
sk: [
@@ -479,6 +523,12 @@ fn key_components() {
0x60, 0xa0, 0x06, 0xf8, 0x2b, 0xb7, 0xad, 0xcd, 0x75, 0x22, 0x3f, 0xa8, 0x59, 0x36,
0xf7, 0x8c, 0x2b, 0x23,
],
note_pos: 1051032776,
note_nf: [
0xd2, 0xe8, 0x87, 0xbd, 0x85, 0x4a, 0x80, 0x2b, 0xce, 0x85, 0x70, 0x53, 0x02, 0x0f,
0x5d, 0x3e, 0x7c, 0x8a, 0xe5, 0x26, 0x7c, 0x5b, 0x65, 0x83, 0xb3, 0xd2, 0x12, 0xcc,
0x8b, 0xb6, 0x98, 0x90,
],
},
TestVector {
sk: [
@@ -535,6 +585,12 @@ fn key_components() {
0x23, 0x36, 0xc2, 0xa0, 0x5a, 0x08, 0x03, 0x23, 0x9b, 0x5b, 0x88, 0xfd, 0x92, 0x07,
0x8f, 0xea, 0x4d, 0x04,
],
note_pos: 1814747072,
note_nf: [
0xa8, 0x2f, 0x17, 0x50, 0xcc, 0x5b, 0x2b, 0xee, 0x64, 0x9a, 0x36, 0x5c, 0x04, 0x20,
0xed, 0x87, 0x07, 0x5b, 0x88, 0x71, 0xfd, 0xa4, 0xa7, 0xf5, 0x84, 0x0d, 0x6b, 0xbe,
0xb1, 0x7c, 0xd6, 0x20,
],
},
TestVector {
sk: [
@@ -591,6 +647,12 @@ fn key_components() {
0x64, 0x41, 0x9b, 0x0e, 0x55, 0x0a, 0xbb, 0xcb, 0x8e, 0x2b, 0xcb, 0xda, 0x8b, 0x63,
0xe4, 0x1d, 0xeb, 0x37,
],
note_pos: 2578461368,
note_nf: [
0x65, 0x36, 0x74, 0x87, 0x3b, 0x3c, 0x67, 0x0c, 0x58, 0x85, 0x84, 0x73, 0xe7, 0xfe,
0x72, 0x19, 0x72, 0xfb, 0x96, 0xe2, 0x15, 0xb8, 0x73, 0x77, 0xa1, 0x7c, 0xa3, 0x71,
0x0d, 0x93, 0xc9, 0xe9,
],
},
];
@@ -663,5 +725,7 @@ fn key_components() {
note.cm(&JUBJUB).into_repr().write_le(&mut vec).unwrap();
assert_eq!(&vec, &tv.note_cm);
}
assert_eq!(note.nf(&fvk, tv.note_pos, &JUBJUB), tv.note_nf);
}
}

View File

@@ -1,4 +1,4 @@
use sapling_crypto::jubjub::{FixedGenerators, JubjubParams};
use zcash_primitives::jubjub::{FixedGenerators, JubjubParams};
use super::JUBJUB;

View File

@@ -1,9 +1,7 @@
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::Bls12;
use sapling_crypto::{
jubjub::{FixedGenerators, JubjubEngine},
redjubjub::{PrivateKey, PublicKey, Signature},
};
use zcash_primitives::jubjub::{FixedGenerators, JubjubEngine};
use zcash_primitives::redjubjub::{PrivateKey, PublicKey, Signature};
use super::JUBJUB;

View File

@@ -15,10 +15,13 @@ homepage = "https://github.com/ebfull/pairing"
repository = "https://github.com/ebfull/pairing"
[dependencies]
rand = "0.4"
byteorder = "1"
ff = { path = "../ff", features = ["derive"] }
group = { path = "../group" }
rand_core = "0.5"
[dev-dependencies]
rand_xorshift = "0.2"
[features]
unstable-features = ["expose-arith"]

View File

@@ -14,11 +14,10 @@ macro_rules! curve_impl {
pub struct $affine {
pub(crate) x: $basefield,
pub(crate) y: $basefield,
pub(crate) infinity: bool
pub(crate) infinity: bool,
}
impl ::std::fmt::Display for $affine
{
impl ::std::fmt::Display for $affine {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
if self.infinity {
write!(f, "{}(Infinity)", $name)
@@ -30,13 +29,12 @@ macro_rules! curve_impl {
#[derive(Copy, Clone, Debug, Eq)]
pub struct $projective {
pub(crate) x: $basefield,
pub(crate) y: $basefield,
pub(crate) z: $basefield
pub(crate) x: $basefield,
pub(crate) y: $basefield,
pub(crate) z: $basefield,
}
impl ::std::fmt::Display for $projective
{
impl ::std::fmt::Display for $projective {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", self.into_affine())
}
@@ -89,7 +87,9 @@ macro_rules! curve_impl {
let mut res = $projective::zero();
for i in bits {
res.double();
if i { res.add_assign_mixed(self) }
if i {
res.add_assign_mixed(self)
}
}
res
}
@@ -112,12 +112,8 @@ macro_rules! curve_impl {
$affine {
x: x,
y: if (y < negy) ^ greatest {
y
} else {
negy
},
infinity: false
y: if (y < negy) ^ greatest { y } else { negy },
infinity: false,
}
})
}
@@ -156,7 +152,7 @@ macro_rules! curve_impl {
$affine {
x: $basefield::zero(),
y: $basefield::one(),
infinity: true
infinity: true,
}
}
@@ -182,7 +178,6 @@ macro_rules! curve_impl {
fn into_projective(&self) -> $projective {
(*self).into()
}
}
impl PairingCurveAffine for $affine {
@@ -197,14 +192,18 @@ macro_rules! curve_impl {
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
self.perform_pairing(other)
}
}
impl Rand for $projective {
fn rand<R: Rng>(rng: &mut R) -> Self {
impl CurveProjective for $projective {
type Engine = Bls12;
type Scalar = $scalarfield;
type Base = $basefield;
type Affine = $affine;
fn random<R: RngCore>(rng: &mut R) -> Self {
loop {
let x = rng.gen();
let greatest = rng.gen();
let x = $basefield::random(rng);
let greatest = rng.next_u32() % 2 != 0;
if let Some(p) = $affine::get_point_from_x(x, greatest) {
let p = p.scale_by_cofactor();
@@ -215,13 +214,6 @@ macro_rules! curve_impl {
}
}
}
}
impl CurveProjective for $projective {
type Engine = Bls12;
type Scalar = $scalarfield;
type Base = $basefield;
type Affine = $affine;
// The point at infinity is always represented by
// Z = 0.
@@ -229,7 +221,7 @@ macro_rules! curve_impl {
$projective {
x: $basefield::zero(),
y: $basefield::one(),
z: $basefield::zero()
z: $basefield::zero(),
}
}
@@ -247,8 +239,7 @@ macro_rules! curve_impl {
self.is_zero() || self.z == $basefield::one()
}
fn batch_normalization(v: &mut [Self])
{
fn batch_normalization(v: &mut [Self]) {
// Montgomerys Trick and Fast Implementation of Masked AES
// Genelle, Prouff and Quisquater
// Section 3.2
@@ -256,9 +247,10 @@ macro_rules! curve_impl {
// First pass: compute [a, ab, abc, ...]
let mut prod = Vec::with_capacity(v.len());
let mut tmp = $basefield::one();
for g in v.iter_mut()
// Ignore normalized elements
.filter(|g| !g.is_normalized())
for g in v
.iter_mut()
// Ignore normalized elements
.filter(|g| !g.is_normalized())
{
tmp.mul_assign(&g.z);
prod.push(tmp);
@@ -268,13 +260,19 @@ macro_rules! curve_impl {
tmp = tmp.inverse().unwrap(); // Guaranteed to be nonzero.
// Second pass: iterate backwards to compute inverses
for (g, s) in v.iter_mut()
// Backwards
.rev()
// Ignore normalized elements
.filter(|g| !g.is_normalized())
// Backwards, skip last element, fill in one for last term.
.zip(prod.into_iter().rev().skip(1).chain(Some($basefield::one())))
for (g, s) in v
.iter_mut()
// Backwards
.rev()
// Ignore normalized elements
.filter(|g| !g.is_normalized())
// Backwards, skip last element, fill in one for last term.
.zip(
prod.into_iter()
.rev()
.skip(1)
.chain(Some($basefield::one())),
)
{
// tmp := tmp * g.z; g.z := tmp * s = 1/z
let mut newtmp = tmp;
@@ -285,9 +283,7 @@ macro_rules! curve_impl {
}
// Perform affine transformations
for g in v.iter_mut()
.filter(|g| !g.is_normalized())
{
for g in v.iter_mut().filter(|g| !g.is_normalized()) {
let mut z = g.z; // 1/z
z.square(); // 1/z^2
g.x.mul_assign(&z); // x/z^2
@@ -540,8 +536,7 @@ macro_rules! curve_impl {
let mut found_one = false;
for i in BitIterator::new(other.into())
{
for i in BitIterator::new(other.into()) {
if found_one {
res.double();
} else {
@@ -579,7 +574,7 @@ macro_rules! curve_impl {
$projective {
x: p.x,
y: p.y,
z: $basefield::one()
z: $basefield::one(),
}
}
}
@@ -596,7 +591,7 @@ macro_rules! curve_impl {
$affine {
x: p.x,
y: p.y,
infinity: false
infinity: false,
}
} else {
// Z is nonzero, so it must have an inverse in a field.
@@ -616,12 +611,12 @@ macro_rules! curve_impl {
$affine {
x: x,
y: y,
infinity: false
infinity: false,
}
}
}
}
}
};
}
pub mod g1 {
@@ -629,7 +624,7 @@ pub mod g1 {
use super::g2::G2Affine;
use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr, SqrtField};
use group::{CurveAffine, CurveProjective, EncodedPoint, GroupDecodingError};
use rand::{Rand, Rng};
use rand_core::RngCore;
use std::fmt;
use {Engine, PairingCurveAffine};
@@ -957,7 +952,7 @@ pub mod g1 {
let negyrepr = negy.into_repr();
let p = G1Affine {
x: x,
x,
y: if yrepr < negyrepr { y } else { negy },
infinity: false,
};
@@ -992,7 +987,8 @@ pub mod g1 {
0x9fe83b1b4a5d648d,
0xf583cc5a508f6a40,
0xc3ad2aefde0bb13,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x60aa6f9552f03aae,
0xecd01d5181300d35,
@@ -1000,7 +996,8 @@ pub mod g1 {
0xe760f57922998c9d,
0x953703f5795a39e5,
0xfe3ae0922df702c,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
assert!(!p.is_on_curve());
@@ -1017,7 +1014,8 @@ pub mod g1 {
0xea034ee2928b30a8,
0xbd8833dc7c79a7f7,
0xe45c9f0c0438675,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x3b450eb1ab7b5dad,
0xa65cb81e975e8675,
@@ -1025,7 +1023,8 @@ pub mod g1 {
0x753ddf21a2601d20,
0x532d0b640bd3ff8b,
0x118d2c543f031102,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
assert!(!p.is_on_curve());
@@ -1043,7 +1042,8 @@ pub mod g1 {
0xf35de9ce0d6b4e84,
0x265bddd23d1dec54,
0x12a8778088458308,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x8a22defa0d526256,
0xc57ca55456fcb9ae,
@@ -1051,7 +1051,8 @@ pub mod g1 {
0x921beef89d4f29df,
0x5b6fda44ad85fa78,
0xed74ab9f302cbe0,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
assert!(p.is_on_curve());
@@ -1069,7 +1070,8 @@ pub mod g1 {
0x485e77d50a5df10d,
0x4c6fcac4b55fd479,
0x86ed4d9906fb064,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0xd25ee6461538c65,
0x9f3bbb2ecd3719b9,
@@ -1077,7 +1079,8 @@ pub mod g1 {
0xcefca68333c35288,
0x570c8005f8573fa6,
0x152ca696fe034442,
])).unwrap(),
]))
.unwrap(),
z: Fq::one(),
};
@@ -1089,7 +1092,8 @@ pub mod g1 {
0x5f44314ec5e3fb03,
0x24e8538737c6e675,
0x8abd623a594fba8,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x6b0528f088bb7044,
0x2fdeb5c82917ff9e,
@@ -1097,7 +1101,8 @@ pub mod g1 {
0xd65104c6f95a872a,
0x1f2998a5a9c61253,
0xe74846154a9e44,
])).unwrap(),
]))
.unwrap(),
z: Fq::one(),
});
@@ -1113,7 +1118,8 @@ pub mod g1 {
0xc4f9a52a428e23bb,
0xd178b28dd4f407ef,
0x17fb8905e9183c69
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0xd0de9d65292b7710,
0xf6a05f2bcf1d9ca7,
@@ -1121,7 +1127,8 @@ pub mod g1 {
0xeec8d1a5b7466c58,
0x4bc362649dce6376,
0x430cbdc5455b00a
])).unwrap(),
]))
.unwrap(),
infinity: false,
}
);
@@ -1137,7 +1144,8 @@ pub mod g1 {
0x485e77d50a5df10d,
0x4c6fcac4b55fd479,
0x86ed4d9906fb064,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0xd25ee6461538c65,
0x9f3bbb2ecd3719b9,
@@ -1145,7 +1153,8 @@ pub mod g1 {
0xcefca68333c35288,
0x570c8005f8573fa6,
0x152ca696fe034442,
])).unwrap(),
]))
.unwrap(),
z: Fq::one(),
};
@@ -1163,7 +1172,8 @@ pub mod g1 {
0x4b914c16687dcde0,
0x66c8baf177d20533,
0xaf960cff3d83833
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x3f0675695f5177a8,
0x2b6d82ae178a1ba0,
@@ -1171,7 +1181,8 @@ pub mod g1 {
0x1771a65b60572f4e,
0x8b547c1313b27555,
0x135075589a687b1e
])).unwrap(),
]))
.unwrap(),
infinity: false,
}
);
@@ -1194,7 +1205,8 @@ pub mod g1 {
0x71ffa8021531705,
0x7418d484386d267,
0xd5108d8ff1fbd6,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0xa776ccbfe9981766,
0x255632964ff40f4a,
@@ -1202,7 +1214,8 @@ pub mod g1 {
0x520f74773e74c8c3,
0x484c8fc982008f0,
0xee2c3d922008cc6,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
@@ -1214,7 +1227,8 @@ pub mod g1 {
0xc6e05201e5f83991,
0xf7c75910816f207c,
0x18d4043e78103106,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0xa776ccbfe9981766,
0x255632964ff40f4a,
@@ -1222,7 +1236,8 @@ pub mod g1 {
0x520f74773e74c8c3,
0x484c8fc982008f0,
0xee2c3d922008cc6,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
@@ -1237,7 +1252,8 @@ pub mod g1 {
0x9676ff02ec39c227,
0x4c12c15d7e55b9f3,
0x57fd1e317db9bd,
])).unwrap(),
]))
.unwrap(),
y: Fq::from_repr(FqRepr([
0x1288334016679345,
0xf955cd68615ff0b5,
@@ -1245,7 +1261,8 @@ pub mod g1 {
0x1267d70db51049fb,
0x4696deb9ab2ba3e7,
0xb1e4e11177f59d4,
])).unwrap(),
]))
.unwrap(),
infinity: false,
};
@@ -1276,7 +1293,7 @@ pub mod g2 {
use super::g1::G1Affine;
use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr, SqrtField};
use group::{CurveAffine, CurveProjective, EncodedPoint, GroupDecodingError};
use rand::{Rand, Rng};
use rand_core::RngCore;
use std::fmt;
use {Engine, PairingCurveAffine};
@@ -1639,7 +1656,7 @@ pub mod g2 {
negy.negate();
let p = G2Affine {
x: x,
x,
y: if y < negy { y } else { negy },
infinity: false,
};
@@ -1675,7 +1692,8 @@ pub mod g2 {
0x7a17a004747e3dbe,
0xcc65406a7c2e5a73,
0x10b8c03d64db4d0c,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xd30e70fe2f029778,
0xda30772df0f5212e,
@@ -1683,7 +1701,8 @@ pub mod g2 {
0xfb777e5b9b568608,
0x789bac1fec71a2b9,
0x1342f02e2da54405,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1693,7 +1712,8 @@ pub mod g2 {
0x663015d9410eb608,
0x78e82a79d829a544,
0x40a00545bb3c1e,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x4709802348e79377,
0xb5ac4dc9204bcfbd,
@@ -1701,7 +1721,8 @@ pub mod g2 {
0x15008b1dc399e8df,
0x68128fd0548a3829,
0x16a613db5c873aaa,
])).unwrap(),
]))
.unwrap(),
},
infinity: false,
};
@@ -1720,7 +1741,8 @@ pub mod g2 {
0x41abba710d6c692c,
0xffcc4b2b62ce8484,
0x6993ec01b8934ed,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xb94e92d5f874e26,
0x44516408bc115d95,
@@ -1728,7 +1750,8 @@ pub mod g2 {
0xa5a0c2b7131f3555,
0x83800965822367e7,
0x10cf1d3ad8d90bfa,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1738,7 +1761,8 @@ pub mod g2 {
0x5a9171720e73eb51,
0x38eb4fd8d658adb7,
0xb649051bbc1164d,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x9225814253d7df75,
0xc196c2513477f887,
@@ -1746,7 +1770,8 @@ pub mod g2 {
0x55f2b8efad953e04,
0x7379345eda55265e,
0x377f2e6208fd4cb,
])).unwrap(),
]))
.unwrap(),
},
infinity: false,
};
@@ -1766,7 +1791,8 @@ pub mod g2 {
0x2199bc19c48c393d,
0x4a151b732a6075bf,
0x17762a3b9108c4a7,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x26f461e944bbd3d1,
0x298f3189a9cf6ed6,
@@ -1774,7 +1800,8 @@ pub mod g2 {
0x7e147f3f9e6e241,
0x72a9b63583963fff,
0x158b0083c000462,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1784,7 +1811,8 @@ pub mod g2 {
0x68cad19430706b4d,
0x3ccfb97b924dcea8,
0x1660f93434588f8d,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xaaed3985b6dcb9c7,
0xc1e985d6d898d9f4,
@@ -1792,7 +1820,8 @@ pub mod g2 {
0x3940a2dbb914b529,
0xbeb88137cf34f3e7,
0x1699ee577c61b694,
])).unwrap(),
]))
.unwrap(),
},
infinity: false,
};
@@ -1812,7 +1841,8 @@ pub mod g2 {
0x72556c999f3707ac,
0x4617f2e6774e9711,
0x100b2fe5bffe030b,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x7a33555977ec608,
0xe23039d1fe9c0881,
@@ -1820,7 +1850,8 @@ pub mod g2 {
0x4637c4f417667e2e,
0x93ebe7c3e41f6acc,
0xde884f89a9a371b,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1830,7 +1861,8 @@ pub mod g2 {
0x25fd427b4122f231,
0xd83112aace35cae,
0x191b2432407cbb7f,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xf68ae82fe97662f5,
0xe986057068b50b7d,
@@ -1838,7 +1870,8 @@ pub mod g2 {
0x9eaa6d19de569196,
0xf6a03d31e2ec2183,
0x3bdafaf7ca9b39b,
])).unwrap(),
]))
.unwrap(),
},
z: Fq2::one(),
};
@@ -1852,7 +1885,8 @@ pub mod g2 {
0x8e73a96b329ad190,
0x27c546f75ee1f3ab,
0xa33d27add5e7e82,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x93b1ebcd54870dfe,
0xf1578300e1342e11,
@@ -1860,7 +1894,8 @@ pub mod g2 {
0x2089faf462438296,
0x828e5848cd48ea66,
0x141ecbac1deb038b,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1870,7 +1905,8 @@ pub mod g2 {
0x2767032fc37cc31d,
0xd5ee2aba84fd10fe,
0x16576ccd3dd0a4e8,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x4da9b6f6a96d1dd2,
0x9657f7da77f1650e,
@@ -1878,7 +1914,8 @@ pub mod g2 {
0x31898db63f87363a,
0xabab040ddbd097cc,
0x11ad236b9ba02990,
])).unwrap(),
]))
.unwrap(),
},
z: Fq2::one(),
});
@@ -1896,7 +1933,8 @@ pub mod g2 {
0xf1273e6406eef9cc,
0xababd760ff05cb92,
0xd7c20456617e89
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xd1a50b8572cbd2b8,
0x238f0ac6119d07df,
@@ -1904,7 +1942,8 @@ pub mod g2 {
0x8b203284c51edf6b,
0xc8a0b730bbb21f5e,
0x1a3b59d29a31274
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1914,7 +1953,8 @@ pub mod g2 {
0x64528ab3863633dc,
0x159384333d7cba97,
0x4cb84741f3cafe8
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x242af0dc3640e1a4,
0xe90a73ad65c66919,
@@ -1922,7 +1962,8 @@ pub mod g2 {
0x38528f92b689644d,
0xb6884deec59fb21f,
0x3c075d3ec52ba90
])).unwrap(),
]))
.unwrap(),
},
infinity: false,
}
@@ -1940,7 +1981,8 @@ pub mod g2 {
0x72556c999f3707ac,
0x4617f2e6774e9711,
0x100b2fe5bffe030b,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x7a33555977ec608,
0xe23039d1fe9c0881,
@@ -1948,7 +1990,8 @@ pub mod g2 {
0x4637c4f417667e2e,
0x93ebe7c3e41f6acc,
0xde884f89a9a371b,
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -1958,7 +2001,8 @@ pub mod g2 {
0x25fd427b4122f231,
0xd83112aace35cae,
0x191b2432407cbb7f,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xf68ae82fe97662f5,
0xe986057068b50b7d,
@@ -1966,7 +2010,8 @@ pub mod g2 {
0x9eaa6d19de569196,
0xf6a03d31e2ec2183,
0x3bdafaf7ca9b39b,
])).unwrap(),
]))
.unwrap(),
},
z: Fq2::one(),
};
@@ -1986,7 +2031,8 @@ pub mod g2 {
0xbcedcfce1e52d986,
0x9755d4a3926e9862,
0x18bab73760fd8024
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x4e7c5e0a2ae5b99e,
0x96e582a27f028961,
@@ -1994,7 +2040,8 @@ pub mod g2 {
0xeb0cf5e610ef4fe7,
0x7b4c2bae8db6e70b,
0xf136e43909fca0
])).unwrap(),
]))
.unwrap(),
},
y: Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -2004,7 +2051,8 @@ pub mod g2 {
0xa5a2a51f7fde787b,
0x8b92866bc6384188,
0x81a53fe531d64ef
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x4c5d607666239b34,
0xeddb5f48304d14b3,
@@ -2012,7 +2060,8 @@ pub mod g2 {
0xb271f52f12ead742,
0x244e6c2015c83348,
0x19e2deae6eb9b441
])).unwrap(),
]))
.unwrap(),
},
infinity: false,
}

View File

@@ -1173,7 +1173,9 @@ fn test_neg_one() {
}
#[cfg(test)]
use rand::{Rand, SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
#[cfg(test)]
use rand_xorshift::XorShiftRng;
#[test]
fn test_fq_repr_ordering() {
@@ -1396,7 +1398,10 @@ fn test_fq_repr_num_bits() {
#[test]
fn test_fq_repr_sub_noborrow() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let mut t = FqRepr([
0x827a4a08041ebd9,
@@ -1426,7 +1431,7 @@ fn test_fq_repr_sub_noborrow() {
);
for _ in 0..1000 {
let mut a = FqRepr::rand(&mut rng);
let mut a = Fq::random(&mut rng).into_repr();
a.0[5] >>= 30;
let mut b = a;
for _ in 0..10 {
@@ -1483,7 +1488,10 @@ fn test_fq_repr_sub_noborrow() {
#[test]
fn test_fq_repr_add_nocarry() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let mut t = FqRepr([
0x827a4a08041ebd9,
@@ -1514,9 +1522,9 @@ fn test_fq_repr_add_nocarry() {
// Test for the associativity of addition.
for _ in 0..1000 {
let mut a = FqRepr::rand(&mut rng);
let mut b = FqRepr::rand(&mut rng);
let mut c = FqRepr::rand(&mut rng);
let mut a = Fq::random(&mut rng).into_repr();
let mut b = Fq::random(&mut rng).into_repr();
let mut c = Fq::random(&mut rng).into_repr();
// Unset the first few bits, so that overflow won't occur.
a.0[5] >>= 3;
@@ -1574,31 +1582,32 @@ fn test_fq_is_valid() {
a.0.sub_noborrow(&FqRepr::from(1));
assert!(a.is_valid());
assert!(Fq(FqRepr::from(0)).is_valid());
assert!(
Fq(FqRepr([
0xdf4671abd14dab3e,
0xe2dc0c9f534fbd33,
0x31ca6c880cc444a6,
0x257a67e70ef33359,
0xf9b29e493f899b36,
0x17c8be1800b9f059
])).is_valid()
);
assert!(
!Fq(FqRepr([
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
])).is_valid()
);
assert!(Fq(FqRepr([
0xdf4671abd14dab3e,
0xe2dc0c9f534fbd33,
0x31ca6c880cc444a6,
0x257a67e70ef33359,
0xf9b29e493f899b36,
0x17c8be1800b9f059
]))
.is_valid());
assert!(!Fq(FqRepr([
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
]))
.is_valid());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
assert!(a.is_valid());
}
}
@@ -1708,13 +1717,16 @@ fn test_fq_add_assign() {
// Test associativity
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Generate a, b, c and ensure (a + b) + c == a + (b + c).
let a = Fq::rand(&mut rng);
let b = Fq::rand(&mut rng);
let c = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let b = Fq::random(&mut rng);
let c = Fq::random(&mut rng);
let mut tmp1 = a;
tmp1.add_assign(&b);
@@ -1818,12 +1830,15 @@ fn test_fq_sub_assign() {
);
}
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure that (a - b) + (b - a) = 0.
let a = Fq::rand(&mut rng);
let b = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let b = Fq::random(&mut rng);
let mut tmp1 = a;
tmp1.sub_assign(&b);
@@ -1865,13 +1880,16 @@ fn test_fq_mul_assign() {
]))
);
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000000 {
// Ensure that (a * b) * c = a * (b * c)
let a = Fq::rand(&mut rng);
let b = Fq::rand(&mut rng);
let c = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let b = Fq::random(&mut rng);
let c = Fq::random(&mut rng);
let mut tmp1 = a;
tmp1.mul_assign(&b);
@@ -1887,10 +1905,10 @@ fn test_fq_mul_assign() {
for _ in 0..1000000 {
// Ensure that r * (a + b + c) = r*a + r*b + r*c
let r = Fq::rand(&mut rng);
let mut a = Fq::rand(&mut rng);
let mut b = Fq::rand(&mut rng);
let mut c = Fq::rand(&mut rng);
let r = Fq::random(&mut rng);
let mut a = Fq::random(&mut rng);
let mut b = Fq::random(&mut rng);
let mut c = Fq::random(&mut rng);
let mut tmp1 = a;
tmp1.add_assign(&b);
@@ -1929,14 +1947,18 @@ fn test_fq_squaring() {
0xdc05c659b4e15b27,
0x79361e5a802c6a23,
0x24bcbe5d51b9a6f
])).unwrap()
]))
.unwrap()
);
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000000 {
// Ensure that (a * a) = a^2
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let mut tmp = a;
tmp.square();
@@ -1952,13 +1974,16 @@ fn test_fq_squaring() {
fn test_fq_inverse() {
assert!(Fq::zero().inverse().is_none());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let one = Fq::one();
for _ in 0..1000 {
// Ensure that a * a^-1 = 1
let mut a = Fq::rand(&mut rng);
let mut a = Fq::random(&mut rng);
let ainv = a.inverse().unwrap();
a.mul_assign(&ainv);
assert_eq!(a, one);
@@ -1967,11 +1992,14 @@ fn test_fq_inverse() {
#[test]
fn test_fq_double() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure doubling a is equivalent to adding a to itself.
let mut a = Fq::rand(&mut rng);
let mut a = Fq::random(&mut rng);
let mut b = a;
b.add_assign(&a);
a.double();
@@ -1988,11 +2016,14 @@ fn test_fq_negate() {
assert!(a.is_zero());
}
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure (a - (-a)) = 0.
let mut a = Fq::rand(&mut rng);
let mut a = Fq::random(&mut rng);
let mut b = a;
b.negate();
a.add_assign(&b);
@@ -2003,12 +2034,15 @@ fn test_fq_negate() {
#[test]
fn test_fq_pow() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for i in 0..1000 {
// Exponentiate by various small numbers and ensure it consists with repeated
// multiplication.
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let target = a.pow(&[i]);
let mut c = Fq::one();
for _ in 0..i {
@@ -2019,7 +2053,7 @@ fn test_fq_pow() {
for _ in 0..1000 {
// Exponentiating by the modulus should have no effect in a prime field.
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
assert_eq!(a, a.pow(Fq::char()));
}
@@ -2029,13 +2063,16 @@ fn test_fq_pow() {
fn test_fq_sqrt() {
use ff::SqrtField;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
assert_eq!(Fq::zero().sqrt().unwrap(), Fq::zero());
for _ in 0..1000 {
// Ensure sqrt(a^2) = a or -a
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let mut nega = a;
nega.negate();
let mut b = a;
@@ -2048,7 +2085,7 @@ fn test_fq_sqrt() {
for _ in 0..1000 {
// Ensure sqrt(a)^2 = a for random a
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
if let Some(mut tmp) = a.sqrt() {
tmp.square();
@@ -2061,16 +2098,15 @@ fn test_fq_sqrt() {
#[test]
fn test_fq_from_into_repr() {
// q + 1 should not be in the field
assert!(
Fq::from_repr(FqRepr([
0xb9feffffffffaaac,
0x1eabfffeb153ffff,
0x6730d2a0f6b0f624,
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
])).is_err()
);
assert!(Fq::from_repr(FqRepr([
0xb9feffffffffaaac,
0x1eabfffeb153ffff,
0x6730d2a0f6b0f624,
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
]))
.is_err());
// q should not be in the field
assert!(Fq::from_repr(Fq::char()).is_err());
@@ -2108,11 +2144,14 @@ fn test_fq_from_into_repr() {
// Zero should be in the field.
assert!(Fq::from_repr(FqRepr::from(0)).unwrap().is_zero());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Try to turn Fq elements into representations and back again, and compare.
let a = Fq::rand(&mut rng);
let a = Fq::random(&mut rng);
let a_repr = a.into_repr();
let b_repr = FqRepr::from(a);
assert_eq!(a_repr, b_repr);
@@ -2205,7 +2244,7 @@ fn test_fq_ordering() {
#[test]
fn fq_repr_tests() {
::tests::repr::random_repr_tests::<FqRepr>();
::tests::repr::random_repr_tests::<Fq>();
}
#[test]

View File

@@ -2,7 +2,7 @@ use super::fq::FROBENIUS_COEFF_FQ12_C1;
use super::fq2::Fq2;
use super::fq6::Fq6;
use ff::Field;
use rand::{Rand, Rng};
use rand_core::RngCore;
/// An element of Fq12, represented by c0 + c1 * w.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
@@ -17,15 +17,6 @@ impl ::std::fmt::Display for Fq12 {
}
}
impl Rand for Fq12 {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fq12 {
c0: rng.gen(),
c1: rng.gen(),
}
}
}
impl Fq12 {
pub fn conjugate(&mut self) {
self.c1.negate();
@@ -49,6 +40,13 @@ impl Fq12 {
}
impl Field for Fq12 {
fn random<R: RngCore>(rng: &mut R) -> Self {
Fq12 {
c0: Fq6::random(rng),
c1: Fq6::random(rng),
}
}
fn zero() -> Self {
Fq12 {
c0: Fq6::zero(),
@@ -149,24 +147,29 @@ impl Field for Fq12 {
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
#[cfg(test)]
use rand_xorshift::XorShiftRng;
#[test]
fn test_fq12_mul_by_014() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let c0 = Fq2::rand(&mut rng);
let c1 = Fq2::rand(&mut rng);
let c5 = Fq2::rand(&mut rng);
let mut a = Fq12::rand(&mut rng);
let c0 = Fq2::random(&mut rng);
let c1 = Fq2::random(&mut rng);
let c5 = Fq2::random(&mut rng);
let mut a = Fq12::random(&mut rng);
let mut b = a;
a.mul_by_014(&c0, &c1, &c5);
b.mul_assign(&Fq12 {
c0: Fq6 {
c0: c0,
c1: c1,
c0,
c1,
c2: Fq2::zero(),
},
c1: Fq6 {

View File

@@ -1,6 +1,6 @@
use super::fq::{FROBENIUS_COEFF_FQ2_C1, Fq, NEGATIVE_ONE};
use super::fq::{Fq, FROBENIUS_COEFF_FQ2_C1, NEGATIVE_ONE};
use ff::{Field, SqrtField};
use rand::{Rand, Rng};
use rand_core::RngCore;
use std::cmp::Ordering;
@@ -56,16 +56,14 @@ impl Fq2 {
}
}
impl Rand for Fq2 {
fn rand<R: Rng>(rng: &mut R) -> Self {
impl Field for Fq2 {
fn random<R: RngCore>(rng: &mut R) -> Self {
Fq2 {
c0: rng.gen(),
c1: rng.gen(),
c0: Fq::random(rng),
c1: Fq::random(rng),
}
}
}
impl Field for Fq2 {
fn zero() -> Self {
Fq2 {
c0: Fq::zero(),
@@ -263,12 +261,11 @@ fn test_fq2_basics() {
);
assert!(Fq2::zero().is_zero());
assert!(!Fq2::one().is_zero());
assert!(
!Fq2 {
c0: Fq::zero(),
c1: Fq::one(),
}.is_zero()
);
assert!(!Fq2 {
c0: Fq::zero(),
c1: Fq::one(),
}
.is_zero());
}
#[test]
@@ -311,7 +308,8 @@ fn test_fq2_squaring() {
0xf7f295a94e58ae7c,
0x41b76dcc1c3fbe5e,
0x7080c5fa1d8e042,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x38f473b3c870a4ab,
0x6ad3291177c8c7e5,
@@ -319,7 +317,8 @@ fn test_fq2_squaring() {
0xbfb99020604137a0,
0xfc58a7b7be815407,
0x10d1615e75250a21,
])).unwrap(),
]))
.unwrap(),
};
a.square();
assert_eq!(
@@ -332,7 +331,8 @@ fn test_fq2_squaring() {
0xcb674157618da176,
0x4cf17b5893c3d327,
0x7eac81369c43361
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xc1579cf58e980cf8,
0xa23eb7e12dd54d98,
@@ -340,7 +340,8 @@ fn test_fq2_squaring() {
0x38d0d7275a9689e1,
0x739c983042779a65,
0x1542a61c8a8db994
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -358,7 +359,8 @@ fn test_fq2_mul() {
0x9ee53e7e84d7532e,
0x1c202d8ed97afb45,
0x51d3f9253e2516f,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xa7348a8b511aedcf,
0x143c215d8176b319,
@@ -366,7 +368,8 @@ fn test_fq2_mul() {
0x9533e4a9a5158be,
0x7a5e1ecb676d65f9,
0x180c3ee46656b008,
])).unwrap(),
]))
.unwrap(),
};
a.mul_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -376,7 +379,8 @@ fn test_fq2_mul() {
0xcd460f9f0c23e430,
0x6c9110292bfa409,
0x2c93a72eb8af83e,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x4b1c3f936d8992d4,
0x1d2a72916dba4c8a,
@@ -384,7 +388,8 @@ fn test_fq2_mul() {
0x57a06d3135a752ae,
0x634cd3c6c565096d,
0x19e17334d4e93558,
])).unwrap(),
]))
.unwrap(),
});
assert_eq!(
a,
@@ -396,7 +401,8 @@ fn test_fq2_mul() {
0x5511fe4d84ee5f78,
0x5310a202d92f9963,
0x1751afbe166e5399
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x84af0e1bd630117a,
0x6c63cd4da2c2aa7,
@@ -404,7 +410,8 @@ fn test_fq2_mul() {
0xc975106579c275ee,
0x33a9ac82ce4c5083,
0x1ef1a36c201589d
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -424,7 +431,8 @@ fn test_fq2_inverse() {
0x9ee53e7e84d7532e,
0x1c202d8ed97afb45,
0x51d3f9253e2516f,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xa7348a8b511aedcf,
0x143c215d8176b319,
@@ -432,7 +440,8 @@ fn test_fq2_inverse() {
0x9533e4a9a5158be,
0x7a5e1ecb676d65f9,
0x180c3ee46656b008,
])).unwrap(),
]))
.unwrap(),
};
let a = a.inverse().unwrap();
assert_eq!(
@@ -445,7 +454,8 @@ fn test_fq2_inverse() {
0xdfba703293941c30,
0xa6c3d8f9586f2636,
0x1351ef01941b70c4
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x8c39fd76a8312cb4,
0x15d7b6b95defbff0,
@@ -453,7 +463,8 @@ fn test_fq2_inverse() {
0xcbf651a0f367afb2,
0xdf4e54f0d3ef15a6,
0x103bdf241afb0019
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -471,7 +482,8 @@ fn test_fq2_addition() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -479,7 +491,8 @@ fn test_fq2_addition() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
]))
.unwrap(),
};
a.add_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -489,7 +502,8 @@ fn test_fq2_addition() {
0x3b88899a42a6318f,
0x986a4a62fa82a49d,
0x13ce433fa26027f5,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x66323bf80b58b9b9,
0xa1379b6facf6e596,
@@ -497,7 +511,8 @@ fn test_fq2_addition() {
0x2236f55246d0d44d,
0x4c8c1800eb104566,
0x11d6e20e986c2085,
])).unwrap(),
]))
.unwrap(),
});
assert_eq!(
a,
@@ -509,7 +524,8 @@ fn test_fq2_addition() {
0xf4ef57d604b6bca2,
0x65309427b3d5d090,
0x14c715d5553f01d2
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xfdb032e7d9079a94,
0x35a2809d15468d83,
@@ -517,7 +533,8 @@ fn test_fq2_addition() {
0xd62fa51334f560fa,
0x9ad265eb46e01984,
0x1303f3465112c8bc
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -535,7 +552,8 @@ fn test_fq2_subtraction() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -543,7 +561,8 @@ fn test_fq2_subtraction() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
]))
.unwrap(),
};
a.sub_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
@@ -553,7 +572,8 @@ fn test_fq2_subtraction() {
0x3b88899a42a6318f,
0x986a4a62fa82a49d,
0x13ce433fa26027f5,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x66323bf80b58b9b9,
0xa1379b6facf6e596,
@@ -561,7 +581,8 @@ fn test_fq2_subtraction() {
0x2236f55246d0d44d,
0x4c8c1800eb104566,
0x11d6e20e986c2085,
])).unwrap(),
]))
.unwrap(),
});
assert_eq!(
a,
@@ -573,7 +594,8 @@ fn test_fq2_subtraction() {
0xe255902672ef6c43,
0x7f77a718021c342d,
0x72ba14049fe9881
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xeb4abaf7c255d1cd,
0x11df49bc6cacc256,
@@ -581,7 +603,8 @@ fn test_fq2_subtraction() {
0xf63905f39ad8cb1f,
0x4cd5dd9fb40b3b8f,
0x957411359ba6e4c
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -599,7 +622,8 @@ fn test_fq2_negation() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -607,7 +631,8 @@ fn test_fq2_negation() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
]))
.unwrap(),
};
a.negate();
assert_eq!(
@@ -620,7 +645,8 @@ fn test_fq2_negation() {
0xab107d49317487ab,
0x7e555df189f880e3,
0x19083f5486a10cbd
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x228109103250c9d0,
0x8a411ad149045812,
@@ -628,7 +654,8 @@ fn test_fq2_negation() {
0xb07e9bc405608611,
0xfcd559cbe77bd8b8,
0x18d400b280d93e62
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -646,7 +673,8 @@ fn test_fq2_doubling() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -654,7 +682,8 @@ fn test_fq2_doubling() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
]))
.unwrap(),
};
a.double();
assert_eq!(
@@ -667,7 +696,8 @@ fn test_fq2_doubling() {
0x72cd9c7784211627,
0x998c938972a657e7,
0x1f1a52b65bdb3b9
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x2efbeddf9b5dc1b6,
0x28d5ca5ad09f4fdb,
@@ -675,7 +705,8 @@ fn test_fq2_doubling() {
0x67f15f81dc49195b,
0x9c8c9bd4b79fa83d,
0x25a226f714d506e
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -693,7 +724,8 @@ fn test_fq2_frobenius_map() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -701,7 +733,8 @@ fn test_fq2_frobenius_map() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
]))
.unwrap(),
};
a.frobenius_map(0);
assert_eq!(
@@ -714,7 +747,8 @@ fn test_fq2_frobenius_map() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -722,7 +756,8 @@ fn test_fq2_frobenius_map() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
]))
.unwrap(),
}
);
a.frobenius_map(1);
@@ -736,7 +771,8 @@ fn test_fq2_frobenius_map() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x228109103250c9d0,
0x8a411ad149045812,
@@ -744,7 +780,8 @@ fn test_fq2_frobenius_map() {
0xb07e9bc405608611,
0xfcd559cbe77bd8b8,
0x18d400b280d93e62
])).unwrap(),
]))
.unwrap(),
}
);
a.frobenius_map(1);
@@ -758,7 +795,8 @@ fn test_fq2_frobenius_map() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -766,7 +804,8 @@ fn test_fq2_frobenius_map() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
]))
.unwrap(),
}
);
a.frobenius_map(2);
@@ -780,7 +819,8 @@ fn test_fq2_frobenius_map() {
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
@@ -788,7 +828,8 @@ fn test_fq2_frobenius_map() {
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -807,7 +848,8 @@ fn test_fq2_sqrt() {
0xdb4a116b5bf74aa1,
0x1e58b2159dfe10e2,
0x7ca7da1f13606ac
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xfa8de88b7516d2c3,
0x371a75ed14f41629,
@@ -815,9 +857,11 @@ fn test_fq2_sqrt() {
0x212611bca4e99121,
0x8ee5394d77afb3d,
0xec92336650e49d5
])).unwrap(),
}.sqrt()
]))
.unwrap(),
}
.sqrt()
.unwrap(),
Fq2 {
c0: Fq::from_repr(FqRepr([
0x40b299b2704258c5,
@@ -826,7 +870,8 @@ fn test_fq2_sqrt() {
0x8d7f1f723d02c1d3,
0x881b3e01b611c070,
0x10f6963bbad2ebc5
])).unwrap(),
]))
.unwrap(),
c1: Fq::from_repr(FqRepr([
0xc099534fc209e752,
0x7670594665676447,
@@ -834,7 +879,8 @@ fn test_fq2_sqrt() {
0x6b852aeaf2afcb1b,
0xa4c93b08105d71a9,
0x8d7cfff94216330
])).unwrap(),
]))
.unwrap(),
}
);
@@ -847,10 +893,12 @@ fn test_fq2_sqrt() {
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
])).unwrap(),
c1: Fq::zero(),
}.sqrt()
]))
.unwrap(),
c1: Fq::zero(),
}
.sqrt()
.unwrap(),
Fq2 {
c0: Fq::zero(),
c1: Fq::from_repr(FqRepr([
@@ -860,7 +908,8 @@ fn test_fq2_sqrt() {
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
])).unwrap(),
]))
.unwrap(),
}
);
}
@@ -879,11 +928,16 @@ fn test_fq2_legendre() {
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
#[cfg(test)]
use rand_xorshift::XorShiftRng;
#[test]
fn test_fq2_mul_nonresidue() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let nqr = Fq2 {
c0: Fq::one(),
@@ -891,7 +945,7 @@ fn test_fq2_mul_nonresidue() {
};
for _ in 0..1000 {
let mut a = Fq2::rand(&mut rng);
let mut a = Fq2::random(&mut rng);
let mut b = a;
a.mul_by_nonresidue();
b.mul_assign(&nqr);

View File

@@ -1,7 +1,7 @@
use super::fq::{FROBENIUS_COEFF_FQ6_C1, FROBENIUS_COEFF_FQ6_C2};
use super::fq2::Fq2;
use ff::Field;
use rand::{Rand, Rng};
use rand_core::RngCore;
/// An element of Fq6, represented by c0 + c1 * v + c2 * v^(2).
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
@@ -17,16 +17,6 @@ impl ::std::fmt::Display for Fq6 {
}
}
impl Rand for Fq6 {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fq6 {
c0: rng.gen(),
c1: rng.gen(),
c2: rng.gen(),
}
}
}
impl Fq6 {
/// Multiply by quadratic nonresidue v.
pub fn mul_by_nonresidue(&mut self) {
@@ -110,6 +100,14 @@ impl Fq6 {
}
impl Field for Fq6 {
fn random<R: RngCore>(rng: &mut R) -> Self {
Fq6 {
c0: Fq2::random(rng),
c1: Fq2::random(rng),
c2: Fq2::random(rng),
}
}
fn zero() -> Self {
Fq6 {
c0: Fq2::zero(),
@@ -302,11 +300,16 @@ impl Field for Fq6 {
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
#[cfg(test)]
use rand_xorshift::XorShiftRng;
#[test]
fn test_fq6_mul_nonresidue() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let nqr = Fq6 {
c0: Fq2::zero(),
@@ -315,7 +318,7 @@ fn test_fq6_mul_nonresidue() {
};
for _ in 0..1000 {
let mut a = Fq6::rand(&mut rng);
let mut a = Fq6::random(&mut rng);
let mut b = a;
a.mul_by_nonresidue();
b.mul_assign(&nqr);
@@ -326,17 +329,20 @@ fn test_fq6_mul_nonresidue() {
#[test]
fn test_fq6_mul_by_1() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let c1 = Fq2::rand(&mut rng);
let mut a = Fq6::rand(&mut rng);
let c1 = Fq2::random(&mut rng);
let mut a = Fq6::random(&mut rng);
let mut b = a;
a.mul_by_1(&c1);
b.mul_assign(&Fq6 {
c0: Fq2::zero(),
c1: c1,
c1,
c2: Fq2::zero(),
});
@@ -346,18 +352,21 @@ fn test_fq6_mul_by_1() {
#[test]
fn test_fq6_mul_by_01() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let c0 = Fq2::rand(&mut rng);
let c1 = Fq2::rand(&mut rng);
let mut a = Fq6::rand(&mut rng);
let c0 = Fq2::random(&mut rng);
let c1 = Fq2::random(&mut rng);
let mut a = Fq6::random(&mut rng);
let mut b = a;
a.mul_by_01(&c0, &c1);
b.mul_assign(&Fq6 {
c0: c0,
c1: c1,
c0,
c1,
c2: Fq2::zero(),
});

View File

@@ -6,7 +6,9 @@ use ff::{Field, PrimeField, PrimeFieldDecodingError, PrimeFieldRepr};
pub struct Fr(FrRepr);
#[cfg(test)]
use rand::{Rand, SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
#[cfg(test)]
use rand_xorshift::XorShiftRng;
#[test]
fn test_fr_repr_ordering() {
@@ -197,7 +199,10 @@ fn test_fr_repr_num_bits() {
#[test]
fn test_fr_repr_sub_noborrow() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let mut t = FrRepr([
0x8e62a7e85264e2c3,
@@ -221,7 +226,7 @@ fn test_fr_repr_sub_noborrow() {
);
for _ in 0..1000 {
let mut a = FrRepr::rand(&mut rng);
let mut a = Fr::random(&mut rng).into_repr();
a.0[3] >>= 30;
let mut b = a;
for _ in 0..10 {
@@ -296,7 +301,10 @@ fn test_fr_legendre() {
#[test]
fn test_fr_repr_add_nocarry() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let mut t = FrRepr([
0xd64f669809cbc6a4,
@@ -322,9 +330,9 @@ fn test_fr_repr_add_nocarry() {
// Test for the associativity of addition.
for _ in 0..1000 {
let mut a = FrRepr::rand(&mut rng);
let mut b = FrRepr::rand(&mut rng);
let mut c = FrRepr::rand(&mut rng);
let mut a = Fr::random(&mut rng).into_repr();
let mut b = Fr::random(&mut rng).into_repr();
let mut c = Fr::random(&mut rng).into_repr();
// Unset the first few bits, so that overflow won't occur.
a.0[3] >>= 3;
@@ -380,27 +388,28 @@ fn test_fr_is_valid() {
a.0.sub_noborrow(&FrRepr::from(1));
assert!(a.is_valid());
assert!(Fr(FrRepr::from(0)).is_valid());
assert!(
Fr(FrRepr([
0xffffffff00000000,
0x53bda402fffe5bfe,
0x3339d80809a1d805,
0x73eda753299d7d48
])).is_valid()
);
assert!(
!Fr(FrRepr([
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
])).is_valid()
);
assert!(Fr(FrRepr([
0xffffffff00000000,
0x53bda402fffe5bfe,
0x3339d80809a1d805,
0x73eda753299d7d48
]))
.is_valid());
assert!(!Fr(FrRepr([
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff
]))
.is_valid());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
assert!(a.is_valid());
}
}
@@ -492,13 +501,16 @@ fn test_fr_add_assign() {
// Test associativity
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Generate a, b, c and ensure (a + b) + c == a + (b + c).
let a = Fr::rand(&mut rng);
let b = Fr::rand(&mut rng);
let c = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let b = Fr::random(&mut rng);
let c = Fr::random(&mut rng);
let mut tmp1 = a;
tmp1.add_assign(&b);
@@ -586,12 +598,15 @@ fn test_fr_sub_assign() {
);
}
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure that (a - b) + (b - a) = 0.
let a = Fr::rand(&mut rng);
let b = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let b = Fr::random(&mut rng);
let mut tmp1 = a;
tmp1.sub_assign(&b);
@@ -627,13 +642,16 @@ fn test_fr_mul_assign() {
]))
);
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000000 {
// Ensure that (a * b) * c = a * (b * c)
let a = Fr::rand(&mut rng);
let b = Fr::rand(&mut rng);
let c = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let b = Fr::random(&mut rng);
let c = Fr::random(&mut rng);
let mut tmp1 = a;
tmp1.mul_assign(&b);
@@ -649,10 +667,10 @@ fn test_fr_mul_assign() {
for _ in 0..1000000 {
// Ensure that r * (a + b + c) = r*a + r*b + r*c
let r = Fr::rand(&mut rng);
let mut a = Fr::rand(&mut rng);
let mut b = Fr::rand(&mut rng);
let mut c = Fr::rand(&mut rng);
let r = Fr::random(&mut rng);
let mut a = Fr::random(&mut rng);
let mut b = Fr::random(&mut rng);
let mut c = Fr::random(&mut rng);
let mut tmp1 = a;
tmp1.add_assign(&b);
@@ -687,14 +705,18 @@ fn test_fr_squaring() {
0xb79a310579e76ec2,
0xac1da8d0a9af4e5f,
0x13f629c49bf23e97
])).unwrap()
]))
.unwrap()
);
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000000 {
// Ensure that (a * a) = a^2
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let mut tmp = a;
tmp.square();
@@ -710,13 +732,16 @@ fn test_fr_squaring() {
fn test_fr_inverse() {
assert!(Fr::zero().inverse().is_none());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let one = Fr::one();
for _ in 0..1000 {
// Ensure that a * a^-1 = 1
let mut a = Fr::rand(&mut rng);
let mut a = Fr::random(&mut rng);
let ainv = a.inverse().unwrap();
a.mul_assign(&ainv);
assert_eq!(a, one);
@@ -725,11 +750,14 @@ fn test_fr_inverse() {
#[test]
fn test_fr_double() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure doubling a is equivalent to adding a to itself.
let mut a = Fr::rand(&mut rng);
let mut a = Fr::random(&mut rng);
let mut b = a;
b.add_assign(&a);
a.double();
@@ -746,11 +774,14 @@ fn test_fr_negate() {
assert!(a.is_zero());
}
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Ensure (a - (-a)) = 0.
let mut a = Fr::rand(&mut rng);
let mut a = Fr::random(&mut rng);
let mut b = a;
b.negate();
a.add_assign(&b);
@@ -761,12 +792,15 @@ fn test_fr_negate() {
#[test]
fn test_fr_pow() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for i in 0..1000 {
// Exponentiate by various small numbers and ensure it consists with repeated
// multiplication.
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let target = a.pow(&[i]);
let mut c = Fr::one();
for _ in 0..i {
@@ -777,7 +811,7 @@ fn test_fr_pow() {
for _ in 0..1000 {
// Exponentiating by the modulus should have no effect in a prime field.
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
assert_eq!(a, a.pow(Fr::char()));
}
@@ -787,13 +821,16 @@ fn test_fr_pow() {
fn test_fr_sqrt() {
use ff::SqrtField;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
assert_eq!(Fr::zero().sqrt().unwrap(), Fr::zero());
for _ in 0..1000 {
// Ensure sqrt(a^2) = a or -a
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let mut nega = a;
nega.negate();
let mut b = a;
@@ -806,7 +843,7 @@ fn test_fr_sqrt() {
for _ in 0..1000 {
// Ensure sqrt(a)^2 = a for random a
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
if let Some(mut tmp) = a.sqrt() {
tmp.square();
@@ -819,14 +856,13 @@ fn test_fr_sqrt() {
#[test]
fn test_fr_from_into_repr() {
// r + 1 should not be in the field
assert!(
Fr::from_repr(FrRepr([
0xffffffff00000002,
0x53bda402fffe5bfe,
0x3339d80809a1d805,
0x73eda753299d7d48
])).is_err()
);
assert!(Fr::from_repr(FrRepr([
0xffffffff00000002,
0x53bda402fffe5bfe,
0x3339d80809a1d805,
0x73eda753299d7d48
]))
.is_err());
// r should not be in the field
assert!(Fr::from_repr(Fr::char()).is_err());
@@ -858,11 +894,14 @@ fn test_fr_from_into_repr() {
// Zero should be in the field.
assert!(Fr::from_repr(FrRepr::from(0)).unwrap().is_zero());
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
// Try to turn Fr elements into representations and back again, and compare.
let a = Fr::rand(&mut rng);
let a = Fr::random(&mut rng);
let a_repr = a.into_repr();
let b_repr = FrRepr::from(a);
assert_eq!(a_repr, b_repr);
@@ -926,7 +965,8 @@ fn test_fr_display() {
0x185ec8eb3f5b5aee,
0x684499ffe4b9dd99,
0x7c9bba7afb68faa
])).unwrap()
]))
.unwrap()
),
"Fr(0x07c9bba7afb68faa684499ffe4b9dd99185ec8eb3f5b5aeec3cae746a3b5ecc7)".to_string()
);
@@ -938,7 +978,8 @@ fn test_fr_display() {
0xb0ad10817df79b6a,
0xd034a80a2b74132b,
0x41cf9a1336f50719
])).unwrap()
]))
.unwrap()
),
"Fr(0x41cf9a1336f50719d034a80a2b74132bb0ad10817df79b6a44c71298ff198106)".to_string()
);
@@ -982,5 +1023,5 @@ fn fr_field_tests() {
#[test]
fn fr_repr_tests() {
::tests::repr::random_repr_tests::<FrRepr>();
::tests::repr::random_repr_tests::<Fr>();
}

View File

@@ -9,8 +9,8 @@ mod fr;
mod tests;
pub use self::ec::{
G1, G1Affine, G1Compressed, G1Prepared, G1Uncompressed, G2, G2Affine, G2Compressed, G2Prepared,
G2Uncompressed,
G1Affine, G1Compressed, G1Prepared, G1Uncompressed, G2Affine, G2Compressed, G2Prepared,
G2Uncompressed, G1, G2,
};
pub use self::fq::{Fq, FqRepr};
pub use self::fq12::Fq12;

View File

@@ -2,19 +2,22 @@
// common mistakes or strange code patterns. If the `cargo-clippy` feature
// is provided, all compiler warnings are prohibited.
#![cfg_attr(feature = "cargo-clippy", deny(warnings))]
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))]
#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#![cfg_attr(feature = "cargo-clippy", allow(write_literal))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::many_single_char_names))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::new_without_default))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::write_literal))]
// Force public structures to implement Debug
#![deny(missing_debug_implementations)]
extern crate byteorder;
extern crate ff;
extern crate group;
extern crate rand;
extern crate rand_core;
#[cfg(test)]
extern crate rand_xorshift;
#[cfg(test)]
pub mod tests;
@@ -34,8 +37,7 @@ pub trait Engine: ScalarEngine {
Base = Self::Fq,
Scalar = Self::Fr,
Affine = Self::G1Affine,
>
+ From<Self::G1Affine>;
> + From<Self::G1Affine>;
/// The affine representation of an element in G1.
type G1Affine: PairingCurveAffine<
@@ -45,8 +47,7 @@ pub trait Engine: ScalarEngine {
Projective = Self::G1,
Pair = Self::G2Affine,
PairingResult = Self::Fqk,
>
+ From<Self::G1>;
> + From<Self::G1>;
/// The projective representation of an element in G2.
type G2: CurveProjective<
@@ -54,8 +55,7 @@ pub trait Engine: ScalarEngine {
Base = Self::Fqe,
Scalar = Self::Fr,
Affine = Self::G2Affine,
>
+ From<Self::G2Affine>;
> + From<Self::G2Affine>;
/// The affine representation of an element in G2.
type G2Affine: PairingCurveAffine<
@@ -65,8 +65,7 @@ pub trait Engine: ScalarEngine {
Projective = Self::G2,
Pair = Self::G1Affine,
PairingResult = Self::Fqk,
>
+ From<Self::G2>;
> + From<Self::G2>;
/// The base field that hosts G1.
type Fq: PrimeField + SqrtField;
@@ -97,8 +96,9 @@ pub trait Engine: ScalarEngine {
G2: Into<Self::G2Affine>,
{
Self::final_exponentiation(&Self::miller_loop(
[(&(p.into().prepare()), &(q.into().prepare()))].into_iter(),
)).unwrap()
[(&(p.into().prepare()), &(q.into().prepare()))].iter(),
))
.unwrap()
}
}

View File

@@ -1,14 +1,18 @@
use group::{CurveAffine, CurveProjective};
use rand::{Rand, SeedableRng, XorShiftRng};
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
use {Engine, Field, PairingCurveAffine, PrimeField};
pub fn engine_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..10 {
let a = E::G1::rand(&mut rng).into_affine();
let b = E::G2::rand(&mut rng).into_affine();
let a = E::G1::random(&mut rng).into_affine();
let b = E::G2::random(&mut rng).into_affine();
assert!(a.pairing_with(&b) == b.pairing_with(&a));
assert!(a.pairing_with(&b) == E::pairing(a, b));
@@ -18,10 +22,10 @@ pub fn engine_tests<E: Engine>() {
let z1 = E::G1Affine::zero().prepare();
let z2 = E::G2Affine::zero().prepare();
let a = E::G1::rand(&mut rng).into_affine().prepare();
let b = E::G2::rand(&mut rng).into_affine().prepare();
let c = E::G1::rand(&mut rng).into_affine().prepare();
let d = E::G2::rand(&mut rng).into_affine().prepare();
let a = E::G1::random(&mut rng).into_affine().prepare();
let b = E::G2::random(&mut rng).into_affine().prepare();
let c = E::G1::random(&mut rng).into_affine().prepare();
let d = E::G2::random(&mut rng).into_affine().prepare();
assert_eq!(
E::Fqk::one(),
@@ -49,12 +53,15 @@ pub fn engine_tests<E: Engine>() {
}
fn random_miller_loop_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
// Exercise the miller loop for a reduced pairing
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let a = E::G1::random(&mut rng);
let b = E::G2::random(&mut rng);
let p2 = E::pairing(a, b);
@@ -68,10 +75,10 @@ fn random_miller_loop_tests<E: Engine>() {
// Exercise a double miller loop
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let c = E::G1::rand(&mut rng);
let d = E::G2::rand(&mut rng);
let a = E::G1::random(&mut rng);
let b = E::G2::random(&mut rng);
let c = E::G1::random(&mut rng);
let d = E::G2::random(&mut rng);
let ab = E::pairing(a, b);
let cd = E::pairing(c, d);
@@ -92,14 +99,17 @@ fn random_miller_loop_tests<E: Engine>() {
}
fn random_bilinearity_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let a = E::G1::random(&mut rng);
let b = E::G2::random(&mut rng);
let c = E::Fr::rand(&mut rng);
let d = E::Fr::rand(&mut rng);
let c = E::Fr::random(&mut rng);
let d = E::Fr::random(&mut rng);
let mut ac = a;
ac.mul_assign(c);

View File

@@ -1,12 +1,16 @@
use ff::{Field, LegendreSymbol, PrimeField, SqrtField};
use rand::{Rng, SeedableRng, XorShiftRng};
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
pub fn random_frobenius_tests<F: Field, C: AsRef<[u64]>>(characteristic: C, maxpower: usize) {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..100 {
for i in 0..(maxpower + 1) {
let mut a = F::rand(&mut rng);
let mut a = F::random(&mut rng);
let mut b = a;
for _ in 0..i {
@@ -20,10 +24,13 @@ pub fn random_frobenius_tests<F: Field, C: AsRef<[u64]>>(characteristic: C, maxp
}
pub fn random_sqrt_tests<F: SqrtField>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..10000 {
let a = F::rand(&mut rng);
let a = F::random(&mut rng);
let mut b = a;
b.square();
assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue);
@@ -54,7 +61,10 @@ pub fn random_sqrt_tests<F: SqrtField>() {
}
pub fn random_field_tests<F: Field>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
random_multiplication_tests::<F, _>(&mut rng);
random_addition_tests::<F, _>(&mut rng);
@@ -76,14 +86,14 @@ pub fn random_field_tests<F: Field>() {
// Multiplication by zero
{
let mut a = F::rand(&mut rng);
let mut a = F::random(&mut rng);
a.mul_assign(&F::zero());
assert!(a.is_zero());
}
// Addition by zero
{
let mut a = F::rand(&mut rng);
let mut a = F::random(&mut rng);
let copy = a;
a.add_assign(&F::zero());
assert_eq!(a, copy);
@@ -106,10 +116,13 @@ pub fn from_str_tests<F: PrimeField>() {
}
{
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let n: u64 = rng.gen();
let n = rng.next_u64();
let a = F::from_str(&format!("{}", n)).unwrap();
let b = F::from_repr(n.into()).unwrap();
@@ -124,11 +137,11 @@ pub fn from_str_tests<F: PrimeField>() {
assert!(F::from_str("00000000000").is_none());
}
fn random_multiplication_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_multiplication_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let a = F::random(rng);
let b = F::random(rng);
let c = F::random(rng);
let mut t0 = a; // (a * b) * c
t0.mul_assign(&b);
@@ -147,11 +160,11 @@ fn random_multiplication_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_addition_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_addition_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let a = F::random(rng);
let b = F::random(rng);
let c = F::random(rng);
let mut t0 = a; // (a + b) + c
t0.add_assign(&b);
@@ -170,10 +183,10 @@ fn random_addition_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_subtraction_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_subtraction_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let b = F::random(rng);
let a = F::random(rng);
let mut t0 = a; // (a - b)
t0.sub_assign(&b);
@@ -188,9 +201,9 @@ fn random_subtraction_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_negation_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_negation_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let a = F::random(rng);
let mut b = a;
b.negate();
b.add_assign(&a);
@@ -199,9 +212,9 @@ fn random_negation_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_doubling_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_doubling_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let mut a = F::rand(rng);
let mut a = F::random(rng);
let mut b = a;
a.add_assign(&b);
b.double();
@@ -210,9 +223,9 @@ fn random_doubling_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_squaring_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_squaring_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
let mut a = F::rand(rng);
let mut a = F::random(rng);
let mut b = a;
a.mul_assign(&b);
b.square();
@@ -221,11 +234,11 @@ fn random_squaring_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_inversion_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_inversion_tests<F: Field, R: RngCore>(rng: &mut R) {
assert!(F::zero().inverse().is_none());
for _ in 0..10000 {
let mut a = F::rand(rng);
let mut a = F::random(rng);
let b = a.inverse().unwrap(); // probablistically nonzero
a.mul_assign(&b);
@@ -233,14 +246,14 @@ fn random_inversion_tests<F: Field, R: Rng>(rng: &mut R) {
}
}
fn random_expansion_tests<F: Field, R: Rng>(rng: &mut R) {
fn random_expansion_tests<F: Field, R: RngCore>(rng: &mut R) {
for _ in 0..10000 {
// Compare (a + b)(c + d) and (a*c + b*c + a*d + b*d)
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let d = F::rand(rng);
let a = F::random(rng);
let b = F::random(rng);
let c = F::random(rng);
let d = F::random(rng);
let mut t0 = a;
t0.add_assign(&b);

View File

@@ -1,21 +1,25 @@
use ff::PrimeFieldRepr;
use rand::{SeedableRng, XorShiftRng};
use ff::{PrimeField, PrimeFieldRepr};
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
pub fn random_repr_tests<R: PrimeFieldRepr>() {
random_encoding_tests::<R>();
random_shl_tests::<R>();
random_shr_tests::<R>();
pub fn random_repr_tests<P: PrimeField>() {
random_encoding_tests::<P>();
random_shl_tests::<P>();
random_shr_tests::<P>();
}
fn random_encoding_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
fn random_encoding_tests<P: PrimeField>() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let r = R::rand(&mut rng);
let r = P::random(&mut rng).into_repr();
// Big endian
{
let mut rdecoded = R::default();
let mut rdecoded = <P as PrimeField>::Repr::default();
let mut v: Vec<u8> = vec![];
r.write_be(&mut v).unwrap();
@@ -26,7 +30,7 @@ fn random_encoding_tests<R: PrimeFieldRepr>() {
// Little endian
{
let mut rdecoded = R::default();
let mut rdecoded = <P as PrimeField>::Repr::default();
let mut v: Vec<u8> = vec![];
r.write_le(&mut v).unwrap();
@@ -36,8 +40,8 @@ fn random_encoding_tests<R: PrimeFieldRepr>() {
}
{
let mut rdecoded_le = R::default();
let mut rdecoded_be_flip = R::default();
let mut rdecoded_le = <P as PrimeField>::Repr::default();
let mut rdecoded_be_flip = <P as PrimeField>::Repr::default();
let mut v: Vec<u8> = vec![];
r.write_le(&mut v).unwrap();
@@ -55,11 +59,14 @@ fn random_encoding_tests<R: PrimeFieldRepr>() {
}
}
fn random_shl_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
fn random_shl_tests<P: PrimeField>() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..100 {
let r = R::rand(&mut rng);
let r = P::random(&mut rng).into_repr();
for shift in 0..(r.num_bits() + 1) {
let mut r1 = r;
@@ -76,11 +83,14 @@ fn random_shl_tests<R: PrimeFieldRepr>() {
}
}
fn random_shr_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
fn random_shr_tests<P: PrimeField>() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..100 {
let r = R::rand(&mut rng);
let r = P::random(&mut rng).into_repr();
for shift in 0..(r.num_bits() + 1) {
let mut r1 = r;

View File

@@ -1,3 +0,0 @@
/target/
**/*.rs.bk
Cargo.lock

View File

@@ -1,14 +0,0 @@
Copyrights in the "sapling-crypto" library are retained by their contributors. No
copyright assignment is required to contribute to the "sapling-crypto" library.
The "sapling-crypto" library is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@@ -1,28 +0,0 @@
[package]
authors = ["Sean Bowe <sean@z.cash>"]
description = "Cryptographic library for Zcash Sapling"
documentation = "https://github.com/zcash-hackworks/sapling"
homepage = "https://github.com/zcash-hackworks/sapling"
license = "MIT/Apache-2.0"
name = "sapling-crypto"
repository = "https://github.com/zcash-hackworks/sapling"
version = "0.0.1"
[dependencies.pairing]
path = "../pairing"
features = ["expose-arith"]
[dependencies]
bellman = { path = "../bellman" }
ff = { path = "../ff" }
rand = "0.4"
digest = "0.7"
byteorder = "1"
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
[dev-dependencies]
hex-literal = "0.1"
rust-crypto = "0.2"

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,23 +0,0 @@
# sapling-crypto
This repository contains a (work-in-progress) implementation of Zcash's "Sapling" cryptography.
## Security Warnings
This library is currently under development and has not been reviewed.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@@ -1,23 +0,0 @@
#![feature(test)]
extern crate rand;
extern crate test;
extern crate pairing;
extern crate sapling_crypto;
use rand::{Rand, thread_rng};
use pairing::bls12_381::Bls12;
use sapling_crypto::jubjub::JubjubBls12;
use sapling_crypto::pedersen_hash::{pedersen_hash, Personalization};
#[bench]
fn bench_pedersen_hash(b: &mut test::Bencher) {
let params = JubjubBls12::new();
let rng = &mut thread_rng();
let bits = (0..510).map(|_| bool::rand(rng)).collect::<Vec<_>>();
let personalization = Personalization::MerkleTree(31);
b.iter(|| {
pedersen_hash::<Bls12, _>(personalization, bits.clone(), &params)
});
}

View File

@@ -1,102 +0,0 @@
extern crate sapling_crypto;
extern crate bellman;
extern crate rand;
extern crate pairing;
use std::time::{Duration, Instant};
use sapling_crypto::jubjub::{
JubjubBls12,
edwards,
fs,
};
use sapling_crypto::circuit::sapling::{
Spend
};
use sapling_crypto::primitives::{
Diversifier,
ProofGenerationKey,
ValueCommitment
};
use bellman::groth16::*;
use rand::{XorShiftRng, SeedableRng, Rng};
use pairing::bls12_381::{Bls12, Fr};
const TREE_DEPTH: usize = 32;
fn main() {
let jubjub_params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
println!("Creating sample parameters...");
let groth_params = generate_random_parameters::<Bls12, _, _>(
Spend {
params: jubjub_params,
value_commitment: None,
proof_generation_key: None,
payment_address: None,
commitment_randomness: None,
ar: None,
auth_path: vec![None; TREE_DEPTH],
anchor: None
},
rng
).unwrap();
const SAMPLES: u32 = 50;
let mut total_time = Duration::new(0, 0);
for _ in 0..SAMPLES {
let value_commitment = ValueCommitment {
value: 1,
randomness: rng.gen()
};
let nsk: fs::Fs = rng.gen();
let ak = edwards::Point::rand(rng, jubjub_params).mul_by_cofactor(jubjub_params);
let proof_generation_key = ProofGenerationKey {
ak: ak.clone(),
nsk: nsk.clone()
};
let viewing_key = proof_generation_key.into_viewing_key(jubjub_params);
let payment_address;
loop {
let diversifier = Diversifier(rng.gen());
if let Some(p) = viewing_key.into_payment_address(
diversifier,
jubjub_params
)
{
payment_address = p;
break;
}
}
let commitment_randomness: fs::Fs = rng.gen();
let auth_path = vec![Some((rng.gen(), rng.gen())); TREE_DEPTH];
let ar: fs::Fs = rng.gen();
let anchor: Fr = rng.gen();
let start = Instant::now();
let _ = create_random_proof(Spend {
params: jubjub_params,
value_commitment: Some(value_commitment),
proof_generation_key: Some(proof_generation_key),
payment_address: Some(payment_address),
commitment_randomness: Some(commitment_randomness),
ar: Some(ar),
auth_path: auth_path,
anchor: Some(anchor)
}, &groth_params, rng).unwrap();
total_time += start.elapsed();
}
let avg = total_time / SAMPLES;
let avg = avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (avg.as_secs() as f64);
println!("Average proving time (in seconds): {}", avg);
}

View File

@@ -1,114 +0,0 @@
use ff::{Field, PrimeField};
use pairing::Engine;
use bellman::{ConstraintSystem, SynthesisError};
use super::boolean::{Boolean};
use super::num::Num;
use super::Assignment;
/// Takes a sequence of booleans and exposes them as compact
/// public inputs
pub fn pack_into_inputs<E, CS>(
mut cs: CS,
bits: &[Boolean]
) -> Result<(), SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
for (i, bits) in bits.chunks(E::Fr::CAPACITY as usize).enumerate()
{
let mut num = Num::<E>::zero();
let mut coeff = E::Fr::one();
for bit in bits {
num = num.add_bool_with_coeff(CS::one(), bit, coeff);
coeff.double();
}
let input = cs.alloc_input(|| format!("input {}", i), || {
Ok(*num.get_value().get()?)
})?;
// num * 1 = input
cs.enforce(
|| format!("packing constraint {}", i),
|_| num.lc(E::Fr::one()),
|lc| lc + CS::one(),
|lc| lc + input
);
}
Ok(())
}
pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool>
{
bytes.iter()
.flat_map(|&v| (0..8).rev().map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn bytes_to_bits_le(bytes: &[u8]) -> Vec<bool>
{
bytes.iter()
.flat_map(|&v| (0..8).map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn compute_multipacking<E: Engine>(
bits: &[bool]
) -> Vec<E::Fr>
{
let mut result = vec![];
for bits in bits.chunks(E::Fr::CAPACITY as usize)
{
let mut cur = E::Fr::zero();
let mut coeff = E::Fr::one();
for bit in bits {
if *bit {
cur.add_assign(&coeff);
}
coeff.double();
}
result.push(cur);
}
result
}
#[test]
fn test_multipacking() {
use rand::{SeedableRng, Rng, XorShiftRng};
use bellman::{ConstraintSystem};
use pairing::bls12_381::{Bls12};
use ::circuit::test::*;
use super::boolean::{AllocatedBit, Boolean};
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for num_bits in 0..1500 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let bits: Vec<bool> = (0..num_bits).map(|_| rng.gen()).collect();
let circuit_bits = bits.iter().enumerate()
.map(|(i, &b)| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
Some(b)
).unwrap()
)
})
.collect::<Vec<_>>();
let expected_inputs = compute_multipacking::<Bls12>(&bits);
pack_into_inputs(cs.namespace(|| "pack"), &circuit_bits).unwrap();
assert!(cs.is_satisfied());
assert!(cs.verify(&expected_inputs));
}
}

View File

@@ -1,194 +0,0 @@
use super::*;
use super::ecc::{
MontgomeryPoint,
EdwardsPoint
};
use super::boolean::Boolean;
use ::jubjub::*;
use bellman::{
ConstraintSystem
};
use super::lookup::*;
pub use pedersen_hash::Personalization;
impl Personalization {
fn get_constant_bools(&self) -> Vec<Boolean> {
self.get_bits()
.into_iter()
.map(|e| Boolean::constant(e))
.collect()
}
}
pub fn pedersen_hash<E: JubjubEngine, CS>(
mut cs: CS,
personalization: Personalization,
bits: &[Boolean],
params: &E::Params
) -> Result<EdwardsPoint<E>, SynthesisError>
where CS: ConstraintSystem<E>
{
let personalization = personalization.get_constant_bools();
assert_eq!(personalization.len(), 6);
let mut edwards_result = None;
let mut bits = personalization.iter().chain(bits.iter());
let mut segment_generators = params.pedersen_circuit_generators().iter();
let boolean_false = Boolean::constant(false);
let mut segment_i = 0;
loop {
let mut segment_result = None;
let mut segment_windows = &segment_generators.next()
.expect("enough segments")[..];
let mut window_i = 0;
while let Some(a) = bits.next() {
let b = bits.next().unwrap_or(&boolean_false);
let c = bits.next().unwrap_or(&boolean_false);
let tmp = lookup3_xy_with_conditional_negation(
cs.namespace(|| format!("segment {}, window {}", segment_i, window_i)),
&[a.clone(), b.clone(), c.clone()],
&segment_windows[0]
)?;
let tmp = MontgomeryPoint::interpret_unchecked(tmp.0, tmp.1);
match segment_result {
None => {
segment_result = Some(tmp);
},
Some(ref mut segment_result) => {
*segment_result = tmp.add(
cs.namespace(|| format!("addition of segment {}, window {}", segment_i, window_i)),
segment_result,
params
)?;
}
}
segment_windows = &segment_windows[1..];
if segment_windows.len() == 0 {
break;
}
window_i += 1;
}
match segment_result {
Some(segment_result) => {
// Convert this segment into twisted Edwards form.
let segment_result = segment_result.into_edwards(
cs.namespace(|| format!("conversion of segment {} into edwards", segment_i)),
params
)?;
match edwards_result {
Some(ref mut edwards_result) => {
*edwards_result = segment_result.add(
cs.namespace(|| format!("addition of segment {} to accumulator", segment_i)),
edwards_result,
params
)?;
},
None => {
edwards_result = Some(segment_result);
}
}
},
None => {
// We didn't process any new bits.
break;
}
}
segment_i += 1;
}
Ok(edwards_result.unwrap())
}
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rng, XorShiftRng};
use super::*;
use ::circuit::test::*;
use ::circuit::boolean::{Boolean, AllocatedBit};
use ff::PrimeField;
use pairing::bls12_381::{Bls12, Fr};
#[test]
fn test_pedersen_hash_constraints() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let params = &JubjubBls12::new();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input: Vec<bool> = (0..(Fr::NUM_BITS * 2)).map(|_| rng.gen()).collect();
let input_bools: Vec<Boolean> = input.iter().enumerate().map(|(i, b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("input {}", i)), Some(*b)).unwrap()
)
}).collect();
pedersen_hash(
cs.namespace(|| "pedersen hash"),
Personalization::NoteCommitment,
&input_bools,
params
).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1377);
}
#[test]
fn test_pedersen_hash() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let params = &JubjubBls12::new();
for length in 0..751 {
for _ in 0..5 {
let mut input: Vec<bool> = (0..length).map(|_| rng.gen()).collect();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bools: Vec<Boolean> = input.iter().enumerate().map(|(i, b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("input {}", i)), Some(*b)).unwrap()
)
}).collect();
let res = pedersen_hash(
cs.namespace(|| "pedersen hash"),
Personalization::MerkleTree(1),
&input_bools,
params
).unwrap();
assert!(cs.is_satisfied());
let expected = ::pedersen_hash::pedersen_hash::<Bls12, _>(
Personalization::MerkleTree(1),
input.clone().into_iter(),
params
).into_xy();
assert_eq!(res.get_x().get_value().unwrap(), expected.0);
assert_eq!(res.get_y().get_value().unwrap(), expected.1);
// Test against the output of a different personalization
let unexpected = ::pedersen_hash::pedersen_hash::<Bls12, _>(
Personalization::MerkleTree(0),
input.into_iter(),
params
).into_xy();
assert!(res.get_x().get_value().unwrap() != unexpected.0);
assert!(res.get_y().get_value().unwrap() != unexpected.1);
}
}
}
}

View File

@@ -1,23 +0,0 @@
extern crate pairing;
extern crate bellman;
extern crate blake2_rfc;
extern crate digest;
extern crate ff;
extern crate rand;
extern crate byteorder;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate crypto;
pub mod jubjub;
pub mod group_hash;
pub mod circuit;
pub mod pedersen_hash;
pub mod primitives;
pub mod constants;
pub mod redjubjub;
pub mod util;

2
zcash_client_backend/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
# Protobufs
src/proto/

View File

@@ -7,10 +7,19 @@ authors = [
edition = "2018"
[dependencies]
bech32 = "0.6"
bech32 = "0.7"
bs58 = { version = "0.2", features = ["check"] }
ff = { path = "../ff" }
hex = "0.3"
pairing = { path = "../pairing" }
sapling-crypto = { path = "../sapling-crypto" }
protobuf = "2"
subtle = "2"
zcash_primitives = { path = "../zcash_primitives" }
[build-dependencies]
protobuf-codegen-pure = "2"
[dev-dependencies]
rand = "0.4"
rand_core = "0.5"
rand_os = "0.2"
rand_xorshift = "0.2"

View File

@@ -0,0 +1,11 @@
use protobuf_codegen_pure;
fn main() {
protobuf_codegen_pure::run(protobuf_codegen_pure::Args {
out_dir: "src/proto",
input: &["proto/compact_formats.proto"],
includes: &["proto"],
customize: Default::default(),
})
.expect("protoc");
}

View File

@@ -0,0 +1,48 @@
syntax = "proto3";
package cash.z.wallet.sdk.rpc;
option go_package = "walletrpc";
// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value.
// bytes fields of hashes are in canonical little-endian format.
// CompactBlock is a packaging of ONLY the data from a block that's needed to:
// 1. Detect a payment to your shielded Sapling address
// 2. Detect a spend of your shielded Sapling notes
// 3. Update your witnesses to generate new Sapling spend proofs.
message CompactBlock {
uint32 protoVersion = 1; // the version of this wire format, for storage
uint64 height = 2; // the height of this block
bytes hash = 3;
bytes prevHash = 4;
uint32 time = 5;
bytes header = 6; // (hash, prevHash, and time) OR (full header)
repeated CompactTx vtx = 7; // compact transactions from this block
}
message CompactTx {
// Index and hash will allow the receiver to call out to chain
// explorers or other data structures to retrieve more information
// about this transaction.
uint64 index = 1;
bytes hash = 2;
// The transaction fee: present if server can provide. In the case of a
// stateless server and a transaction with transparent inputs, this will be
// unset because the calculation requires reference to prior transactions.
// in a pure-Sapling context, the fee will be calculable as:
// valueBalance + (sum(vPubNew) - sum(vPubOld) - sum(tOut))
uint32 fee = 3;
repeated CompactSpend spends = 4;
repeated CompactOutput outputs = 5;
}
message CompactSpend {
bytes nf = 1;
}
message CompactOutput {
bytes cmu = 1;
bytes epk = 2;
bytes ciphertext = 3;
}

View File

@@ -2,3 +2,7 @@
pub mod mainnet;
pub mod testnet;
pub const SPROUT_CONSENSUS_BRANCH_ID: u32 = 0;
pub const OVERWINTER_CONSENSUS_BRANCH_ID: u32 = 0x5ba8_1b19;
pub const SAPLING_CONSENSUS_BRANCH_ID: u32 = 0x76b8_09bb;

View File

@@ -26,3 +26,13 @@ pub const HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY: &str = "zxviews";
/// [`PaymentAddress`]: sapling_crypto::primitives::PaymentAddress
/// [Zcash Protocol Specification]: https://github.com/zcash/zips/blob/master/protocol/protocol.pdf
pub const HRP_SAPLING_PAYMENT_ADDRESS: &str = "zs";
/// The prefix for a Base58Check-encoded mainnet [`TransparentAddress::PublicKey`].
///
/// [`TransparentAddress::PublicKey`]: zcash_primitives::legacy::TransparentAddress::PublicKey
pub const B58_PUBKEY_ADDRESS_PREFIX: [u8; 2] = [0x1c, 0xb8];
/// The prefix for a Base58Check-encoded mainnet [`TransparentAddress::Script`].
///
/// [`TransparentAddress::Script`]: zcash_primitives::legacy::TransparentAddress::Script
pub const B58_SCRIPT_ADDRESS_PREFIX: [u8; 2] = [0x1c, 0xbd];

View File

@@ -26,3 +26,13 @@ pub const HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY: &str = "zxviewtestsapling";
/// [`PaymentAddress`]: sapling_crypto::primitives::PaymentAddress
/// [Zcash Protocol Specification]: https://github.com/zcash/zips/blob/master/protocol/protocol.pdf
pub const HRP_SAPLING_PAYMENT_ADDRESS: &str = "ztestsapling";
/// The prefix for a Base58Check-encoded testnet [`TransparentAddress::PublicKey`].
///
/// [`TransparentAddress::PublicKey`]: zcash_primitives::legacy::TransparentAddress::PublicKey
pub const B58_PUBKEY_ADDRESS_PREFIX: [u8; 2] = [0x1d, 0x25];
/// The prefix for a Base58Check-encoded testnet [`TransparentAddress::Script`].
///
/// [`TransparentAddress::Script`]: zcash_primitives::legacy::TransparentAddress::Script
pub const B58_SCRIPT_ADDRESS_PREFIX: [u8; 2] = [0x1c, 0xba];

View File

@@ -3,14 +3,16 @@
//! Human-Readable Prefixes (HRPs) for Bech32 encodings are located in the [`constants`]
//! module.
use bech32::{convert_bits, Bech32, Error};
use bech32::{self, Error, FromBase32, ToBase32};
use bs58::{self, decode::DecodeError};
use pairing::bls12_381::Bls12;
use sapling_crypto::{
use std::io::{self, Write};
use zcash_primitives::{
jubjub::edwards,
primitives::{Diversifier, PaymentAddress},
};
use std::io::{self, Write};
use zcash_primitives::{
legacy::TransparentAddress,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
JUBJUB,
};
@@ -21,21 +23,16 @@ where
{
let mut data: Vec<u8> = vec![];
write(&mut data).expect("Should be able to write to a Vec");
let converted =
convert_bits(&data, 8, 5, true).expect("Should be able to convert Vec<u8> to Vec<u5>");
let encoded = Bech32::new_check_data(hrp.into(), converted).expect("hrp is not empty");
encoded.to_string()
bech32::encode(hrp, data.to_base32()).expect("hrp is invalid")
}
fn bech32_decode<T, F>(hrp: &str, s: &str, read: F) -> Result<Option<T>, Error>
where
F: Fn(Vec<u8>) -> Option<T>,
{
let decoded = s.parse::<Bech32>()?;
if decoded.hrp() == hrp {
convert_bits(decoded.data(), 5, 8, false).map(|data| read(data))
let (decoded_hrp, data) = bech32::decode(s)?;
if decoded_hrp == hrp {
Vec::<u8>::from_base32(&data).map(|data| read(data))
} else {
Ok(None)
}
@@ -101,18 +98,22 @@ pub fn decode_extended_full_viewing_key(
///
/// ```
/// use pairing::bls12_381::Bls12;
/// use rand::{SeedableRng, XorShiftRng};
/// use sapling_crypto::{
/// jubjub::edwards,
/// primitives::{Diversifier, PaymentAddress},
/// };
/// use rand_core::SeedableRng;
/// use rand_xorshift::XorShiftRng;
/// use zcash_client_backend::{
/// constants::testnet::HRP_SAPLING_PAYMENT_ADDRESS,
/// encoding::encode_payment_address,
/// };
/// use zcash_primitives::JUBJUB;
/// use zcash_primitives::{
/// jubjub::edwards,
/// primitives::{Diversifier, PaymentAddress},
/// JUBJUB,
/// };
///
/// let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
/// let rng = &mut XorShiftRng::from_seed([
/// 0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
/// 0xbc, 0xe5,
/// ]);
///
/// let pa = PaymentAddress {
/// diversifier: Diversifier([0u8; 11]),
@@ -121,7 +122,7 @@ pub fn decode_extended_full_viewing_key(
///
/// assert_eq!(
/// encode_payment_address(HRP_SAPLING_PAYMENT_ADDRESS, &pa),
/// "ztestsapling1qqqqqqqqqqqqqqqqqqxrrfaccydp867g6zg7ne5ht37z38jtfyw0ygmp0ja6hhf07twjq6awtaj",
/// "ztestsapling1qqqqqqqqqqqqqqqqqrjq05nyfku05msvu49mawhg6kr0wwljahypwyk2h88z6975u563j0ym7pe",
/// );
/// ```
pub fn encode_payment_address(hrp: &str, addr: &PaymentAddress<Bls12>) -> String {
@@ -137,18 +138,22 @@ pub fn encode_payment_address(hrp: &str, addr: &PaymentAddress<Bls12>) -> String
///
/// ```
/// use pairing::bls12_381::Bls12;
/// use rand::{SeedableRng, XorShiftRng};
/// use sapling_crypto::{
/// jubjub::edwards,
/// primitives::{Diversifier, PaymentAddress},
/// };
/// use rand_core::SeedableRng;
/// use rand_xorshift::XorShiftRng;
/// use zcash_client_backend::{
/// constants::testnet::HRP_SAPLING_PAYMENT_ADDRESS,
/// encoding::decode_payment_address,
/// };
/// use zcash_primitives::JUBJUB;
/// use zcash_primitives::{
/// jubjub::edwards,
/// primitives::{Diversifier, PaymentAddress},
/// JUBJUB,
/// };
///
/// let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
/// let rng = &mut XorShiftRng::from_seed([
/// 0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
/// 0xbc, 0xe5,
/// ]);
///
/// let pa = PaymentAddress {
/// diversifier: Diversifier([0u8; 11]),
@@ -158,7 +163,7 @@ pub fn encode_payment_address(hrp: &str, addr: &PaymentAddress<Bls12>) -> String
/// assert_eq!(
/// decode_payment_address(
/// HRP_SAPLING_PAYMENT_ADDRESS,
/// "ztestsapling1qqqqqqqqqqqqqqqqqqxrrfaccydp867g6zg7ne5ht37z38jtfyw0ygmp0ja6hhf07twjq6awtaj",
/// "ztestsapling1qqqqqqqqqqqqqqqqqrjq05nyfku05msvu49mawhg6kr0wwljahypwyk2h88z6975u563j0ym7pe",
/// ),
/// Ok(Some(pa)),
/// );
@@ -179,22 +184,133 @@ pub fn decode_payment_address(hrp: &str, s: &str) -> Result<Option<PaymentAddres
})
}
/// Writes a [`TransparentAddress`] as a Base58Check-encoded string.
///
/// # Examples
///
/// ```
/// use zcash_client_backend::{
/// constants::testnet::{B58_PUBKEY_ADDRESS_PREFIX, B58_SCRIPT_ADDRESS_PREFIX},
/// encoding::encode_transparent_address,
/// };
/// use zcash_primitives::legacy::TransparentAddress;
///
/// assert_eq!(
/// encode_transparent_address(
/// &B58_PUBKEY_ADDRESS_PREFIX,
/// &B58_SCRIPT_ADDRESS_PREFIX,
/// &TransparentAddress::PublicKey([0; 20]),
/// ),
/// "tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma",
/// );
///
/// assert_eq!(
/// encode_transparent_address(
/// &B58_PUBKEY_ADDRESS_PREFIX,
/// &B58_SCRIPT_ADDRESS_PREFIX,
/// &TransparentAddress::Script([0; 20]),
/// ),
/// "t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2",
/// );
/// ```
pub fn encode_transparent_address(
pubkey_version: &[u8],
script_version: &[u8],
addr: &TransparentAddress,
) -> String {
let decoded = match addr {
TransparentAddress::PublicKey(key_id) => {
let mut decoded = vec![0; pubkey_version.len() + 20];
decoded[..pubkey_version.len()].copy_from_slice(pubkey_version);
decoded[pubkey_version.len()..].copy_from_slice(key_id);
decoded
}
TransparentAddress::Script(script_id) => {
let mut decoded = vec![0; script_version.len() + 20];
decoded[..script_version.len()].copy_from_slice(script_version);
decoded[script_version.len()..].copy_from_slice(script_id);
decoded
}
};
bs58::encode(decoded).with_check().into_string()
}
/// Decodes a [`TransparentAddress`] from a Base58Check-encoded string.
///
/// # Examples
///
/// ```
/// use zcash_client_backend::{
/// constants::testnet::{B58_PUBKEY_ADDRESS_PREFIX, B58_SCRIPT_ADDRESS_PREFIX},
/// encoding::decode_transparent_address,
/// };
/// use zcash_primitives::legacy::TransparentAddress;
///
/// assert_eq!(
/// decode_transparent_address(
/// &B58_PUBKEY_ADDRESS_PREFIX,
/// &B58_SCRIPT_ADDRESS_PREFIX,
/// "tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma",
/// ),
/// Ok(Some(TransparentAddress::PublicKey([0; 20]))),
/// );
///
/// assert_eq!(
/// decode_transparent_address(
/// &B58_PUBKEY_ADDRESS_PREFIX,
/// &B58_SCRIPT_ADDRESS_PREFIX,
/// "t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2",
/// ),
/// Ok(Some(TransparentAddress::Script([0; 20]))),
/// );
/// ```
pub fn decode_transparent_address(
pubkey_version: &[u8],
script_version: &[u8],
s: &str,
) -> Result<Option<TransparentAddress>, DecodeError> {
let decoded = bs58::decode(s).with_check(None).into_vec()?;
if &decoded[..pubkey_version.len()] == pubkey_version {
if decoded.len() == pubkey_version.len() + 20 {
let mut data = [0; 20];
data.copy_from_slice(&decoded[pubkey_version.len()..]);
Ok(Some(TransparentAddress::PublicKey(data)))
} else {
Ok(None)
}
} else if &decoded[..script_version.len()] == script_version {
if decoded.len() == script_version.len() + 20 {
let mut data = [0; 20];
data.copy_from_slice(&decoded[script_version.len()..]);
Ok(Some(TransparentAddress::Script(data)))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
#[cfg(test)]
mod tests {
use pairing::bls12_381::Bls12;
use rand::{SeedableRng, XorShiftRng};
use sapling_crypto::{
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
use zcash_primitives::JUBJUB;
use zcash_primitives::{
jubjub::edwards,
primitives::{Diversifier, PaymentAddress},
};
use zcash_primitives::JUBJUB;
use super::{decode_payment_address, encode_payment_address};
use crate::constants;
#[test]
fn payment_address() {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let addr = PaymentAddress {
diversifier: Diversifier([0u8; 11]),
@@ -202,9 +318,9 @@ mod tests {
};
let encoded_main =
"zs1qqqqqqqqqqqqqqqqqqxrrfaccydp867g6zg7ne5ht37z38jtfyw0ygmp0ja6hhf07twjqj2ug6x";
"zs1qqqqqqqqqqqqqqqqqrjq05nyfku05msvu49mawhg6kr0wwljahypwyk2h88z6975u563j8nfaxd";
let encoded_test =
"ztestsapling1qqqqqqqqqqqqqqqqqqxrrfaccydp867g6zg7ne5ht37z38jtfyw0ygmp0ja6hhf07twjq6awtaj";
"ztestsapling1qqqqqqqqqqqqqqqqqrjq05nyfku05msvu49mawhg6kr0wwljahypwyk2h88z6975u563j0ym7pe";
assert_eq!(
encode_payment_address(constants::mainnet::HRP_SAPLING_PAYMENT_ADDRESS, &addr),
@@ -235,7 +351,10 @@ mod tests {
#[test]
fn invalid_diversifier() {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let addr = PaymentAddress {
diversifier: Diversifier([1u8; 11]),

View File

@@ -6,3 +6,6 @@
pub mod constants;
pub mod encoding;
pub mod keys;
pub mod proto;
pub mod wallet;
pub mod welding_rig;

View File

@@ -0,0 +1,83 @@
//! Generated code for handling light client protobuf structs.
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr, FrRepr};
use zcash_primitives::{
block::{BlockHash, BlockHeader},
jubjub::{edwards, PrimeOrder},
JUBJUB,
};
pub mod compact_formats;
impl compact_formats::CompactBlock {
/// Returns the [`BlockHash`] for this block.
///
/// # Panics
///
/// This function will panic if [`CompactBlock.header`] is not set and
/// [`CompactBlock.hash`] is not exactly 32 bytes.
///
/// [`CompactBlock.header`]: #structfield.header
/// [`CompactBlock.hash`]: #structfield.hash
pub fn hash(&self) -> BlockHash {
if let Some(header) = self.header() {
header.hash()
} else {
BlockHash::from_slice(&self.hash)
}
}
/// Returns the [`BlockHash`] for this block's parent.
///
/// # Panics
///
/// This function will panic if [`CompactBlock.header`] is not set and
/// [`CompactBlock.prevHash`] is not exactly 32 bytes.
///
/// [`CompactBlock.header`]: #structfield.header
/// [`CompactBlock.prevHash`]: #structfield.prevHash
pub fn prev_hash(&self) -> BlockHash {
if let Some(header) = self.header() {
header.prev_block
} else {
BlockHash::from_slice(&self.prevHash)
}
}
/// Returns the [`BlockHeader`] for this block if present.
///
/// A convenience method that parses [`CompactBlock.header`] if present.
///
/// [`CompactBlock.header`]: #structfield.header
pub fn header(&self) -> Option<BlockHeader> {
if self.header.is_empty() {
None
} else {
BlockHeader::read(&self.header[..]).ok()
}
}
}
impl compact_formats::CompactOutput {
/// Returns the note commitment for this output.
///
/// A convenience method that parses [`CompactOutput.cmu`].
///
/// [`CompactOutput.cmu`]: #structfield.cmu
pub fn cmu(&self) -> Result<Fr, ()> {
let mut repr = FrRepr::default();
repr.read_le(&self.cmu[..]).map_err(|_| ())?;
Fr::from_repr(repr).map_err(|_| ())
}
/// Returns the ephemeral public key for this output.
///
/// A convenience method that parses [`CompactOutput.epk`].
///
/// [`CompactOutput.epk`]: #structfield.epk
pub fn epk(&self) -> Result<edwards::Point<Bls12, PrimeOrder>, ()> {
let p = edwards::Point::<Bls12, _>::read(&self.epk[..], &JUBJUB).map_err(|_| ())?;
p.as_prime_order(&JUBJUB).ok_or(())
}
}

View File

@@ -0,0 +1,46 @@
//! Structs representing transaction data scanned from the block chain by a wallet or
//! light client.
use pairing::bls12_381::{Bls12, Fr};
use zcash_primitives::{
jubjub::{edwards, PrimeOrder},
merkle_tree::IncrementalWitness,
primitives::{Note, PaymentAddress},
sapling::Node,
transaction::TxId,
};
/// A subset of a [`Transaction`] relevant to wallets and light clients.
///
/// [`Transaction`]: zcash_primitives::transaction::Transaction
pub struct WalletTx {
pub txid: TxId,
pub index: usize,
pub num_spends: usize,
pub num_outputs: usize,
pub shielded_spends: Vec<WalletShieldedSpend>,
pub shielded_outputs: Vec<WalletShieldedOutput>,
}
/// A subset of a [`SpendDescription`] relevant to wallets and light clients.
///
/// [`SpendDescription`]: zcash_primitives::transaction::components::SpendDescription
pub struct WalletShieldedSpend {
pub index: usize,
pub nf: Vec<u8>,
pub account: usize,
}
/// A subset of an [`OutputDescription`] relevant to wallets and light clients.
///
/// [`OutputDescription`]: zcash_primitives::transaction::components::OutputDescription
pub struct WalletShieldedOutput {
pub index: usize,
pub cmu: Fr,
pub epk: edwards::Point<Bls12, PrimeOrder>,
pub account: usize,
pub note: Note<Bls12>,
pub to: PaymentAddress<Bls12>,
pub is_change: bool,
pub witness: IncrementalWitness<Node>,
}

View File

@@ -0,0 +1,399 @@
//! Tools for scanning a compact representation of the Zcash block chain.
use ff::PrimeField;
use std::collections::HashSet;
use subtle::{ConditionallySelectable, ConstantTimeEq, CtOption};
use zcash_primitives::{
jubjub::fs::Fs,
merkle_tree::{CommitmentTree, IncrementalWitness},
note_encryption::try_sapling_compact_note_decryption,
sapling::Node,
transaction::TxId,
zip32::ExtendedFullViewingKey,
};
use crate::proto::compact_formats::{CompactBlock, CompactOutput};
use crate::wallet::{WalletShieldedOutput, WalletShieldedSpend, WalletTx};
/// Scans a [`CompactOutput`] with a set of [`ExtendedFullViewingKey`]s.
///
/// Returns a [`WalletShieldedOutput`] and corresponding [`IncrementalWitness`] if this
/// output belongs to any of the given [`ExtendedFullViewingKey`]s.
///
/// The given [`CommitmentTree`] and existing [`IncrementalWitness`]es are incremented
/// with this output's commitment.
fn scan_output(
(index, output): (usize, CompactOutput),
ivks: &[Fs],
spent_from_accounts: &HashSet<usize>,
tree: &mut CommitmentTree<Node>,
existing_witnesses: &mut [&mut IncrementalWitness<Node>],
block_witnesses: &mut [&mut IncrementalWitness<Node>],
new_witnesses: &mut [&mut IncrementalWitness<Node>],
) -> Option<WalletShieldedOutput> {
let cmu = output.cmu().ok()?;
let epk = output.epk().ok()?;
let ct = output.ciphertext;
// Increment tree and witnesses
let node = Node::new(cmu.into_repr());
for witness in existing_witnesses {
witness.append(node).unwrap();
}
for witness in block_witnesses {
witness.append(node).unwrap();
}
for witness in new_witnesses {
witness.append(node).unwrap();
}
tree.append(node).unwrap();
for (account, ivk) in ivks.iter().enumerate() {
let (note, to) = match try_sapling_compact_note_decryption(ivk, &epk, &cmu, &ct) {
Some(ret) => ret,
None => continue,
};
// A note is marked as "change" if the account that received it
// also spent notes in the same transaction. This will catch,
// for instance:
// - Change created by spending fractions of notes.
// - Notes created by consolidation transactions.
// - Notes sent from one account to itself.
let is_change = spent_from_accounts.contains(&account);
return Some(WalletShieldedOutput {
index,
cmu,
epk,
account,
note,
to,
is_change,
witness: IncrementalWitness::from_tree(tree),
});
}
None
}
/// Scans a [`CompactBlock`] with a set of [`ExtendedFullViewingKey`]s.
///
/// Returns a vector of [`WalletTx`]s belonging to any of the given
/// [`ExtendedFullViewingKey`]s, and the corresponding new [`IncrementalWitness`]es.
///
/// The given [`CommitmentTree`] and existing [`IncrementalWitness`]es are
/// incremented appropriately.
pub fn scan_block(
block: CompactBlock,
extfvks: &[ExtendedFullViewingKey],
nullifiers: &[(&[u8], usize)],
tree: &mut CommitmentTree<Node>,
existing_witnesses: &mut [&mut IncrementalWitness<Node>],
) -> Vec<WalletTx> {
let mut wtxs: Vec<WalletTx> = vec![];
let ivks: Vec<_> = extfvks.iter().map(|extfvk| extfvk.fvk.vk.ivk()).collect();
for tx in block.vtx.into_iter() {
let num_spends = tx.spends.len();
let num_outputs = tx.outputs.len();
// Check for spent notes
// The only step that is not constant-time is the filter() at the end.
let shielded_spends: Vec<_> = tx
.spends
.into_iter()
.enumerate()
.map(|(index, spend)| {
// Find the first tracked nullifier that matches this spend, and produce
// a WalletShieldedSpend if there is a match, in constant time.
nullifiers
.iter()
.map(|&(nf, account)| CtOption::new(account as u64, nf.ct_eq(&spend.nf[..])))
.fold(CtOption::new(0, 0.into()), |first, next| {
CtOption::conditional_select(&next, &first, first.is_some())
})
.map(|account| WalletShieldedSpend {
index,
nf: spend.nf,
account: account as usize,
})
})
.filter(|spend| spend.is_some().into())
.map(|spend| spend.unwrap())
.collect();
// Collect the set of accounts that were spent from in this transaction
let spent_from_accounts: HashSet<_> =
shielded_spends.iter().map(|spend| spend.account).collect();
// Check for incoming notes while incrementing tree and witnesses
let mut shielded_outputs: Vec<WalletShieldedOutput> = vec![];
{
// Grab mutable references to new witnesses from previous transactions
// in this block so that we can update them. Scoped so we don't hold
// mutable references to wtxs for too long.
let mut block_witnesses: Vec<_> = wtxs
.iter_mut()
.map(|tx| {
tx.shielded_outputs
.iter_mut()
.map(|output| &mut output.witness)
})
.flatten()
.collect();
for to_scan in tx.outputs.into_iter().enumerate() {
// Grab mutable references to new witnesses from previous outputs
// in this transaction so that we can update them. Scoped so we
// don't hold mutable references to shielded_outputs for too long.
let mut new_witnesses: Vec<_> = shielded_outputs
.iter_mut()
.map(|output| &mut output.witness)
.collect();
if let Some(output) = scan_output(
to_scan,
&ivks,
&spent_from_accounts,
tree,
existing_witnesses,
&mut block_witnesses,
&mut new_witnesses,
) {
shielded_outputs.push(output);
}
}
}
if !(shielded_spends.is_empty() && shielded_outputs.is_empty()) {
let mut txid = TxId([0u8; 32]);
txid.0.copy_from_slice(&tx.hash);
wtxs.push(WalletTx {
txid,
index: tx.index as usize,
num_spends,
num_outputs,
shielded_spends,
shielded_outputs,
});
}
}
wtxs
}
#[cfg(test)]
mod tests {
use ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr};
use rand_core::RngCore;
use rand_os::OsRng;
use zcash_primitives::{
jubjub::{fs::Fs, FixedGenerators, JubjubParams, ToUniform},
merkle_tree::CommitmentTree,
note_encryption::{Memo, SaplingNoteEncryption},
primitives::Note,
transaction::components::Amount,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
JUBJUB,
};
use super::scan_block;
use crate::proto::compact_formats::{CompactBlock, CompactOutput, CompactSpend, CompactTx};
fn random_compact_tx<R: RngCore>(rng: &mut R) -> CompactTx {
let fake_nf = {
let mut nf = vec![0; 32];
rng.fill_bytes(&mut nf);
nf
};
let fake_cmu = {
let fake_cmu = Fr::random(rng);
let mut bytes = vec![];
fake_cmu.into_repr().write_le(&mut bytes).unwrap();
bytes
};
let fake_epk = {
let mut buffer = vec![0; 64];
rng.fill_bytes(&mut buffer);
let fake_esk = Fs::to_uniform(&buffer[..]);
let fake_epk = JUBJUB
.generator(FixedGenerators::SpendingKeyGenerator)
.mul(fake_esk, &JUBJUB);
let mut bytes = vec![];
fake_epk.write(&mut bytes).unwrap();
bytes
};
let mut cspend = CompactSpend::new();
cspend.set_nf(fake_nf);
let mut cout = CompactOutput::new();
cout.set_cmu(fake_cmu);
cout.set_epk(fake_epk);
cout.set_ciphertext(vec![0; 52]);
let mut ctx = CompactTx::new();
let mut txid = vec![0; 32];
rng.fill_bytes(&mut txid);
ctx.set_hash(txid);
ctx.spends.push(cspend);
ctx.outputs.push(cout);
ctx
}
/// Create a fake CompactBlock at the given height, with a transaction containing a
/// single spend of the given nullifier and a single output paying the given address.
/// Returns the CompactBlock.
fn fake_compact_block(
height: i32,
nf: [u8; 32],
extfvk: ExtendedFullViewingKey,
value: Amount,
tx_after: bool,
) -> CompactBlock {
let to = extfvk.default_address().unwrap().1;
// Create a fake Note for the account
let mut rng = OsRng;
let note = Note {
g_d: to.diversifier.g_d::<Bls12>(&JUBJUB).unwrap(),
pk_d: to.pk_d.clone(),
value: value.into(),
r: Fs::random(&mut rng),
};
let encryptor = SaplingNoteEncryption::new(
extfvk.fvk.ovk,
note.clone(),
to.clone(),
Memo::default(),
&mut rng,
);
let mut cmu = vec![];
note.cm(&JUBJUB).into_repr().write_le(&mut cmu).unwrap();
let mut epk = vec![];
encryptor.epk().write(&mut epk).unwrap();
let enc_ciphertext = encryptor.encrypt_note_plaintext();
// Create a fake CompactBlock containing the note
let mut cb = CompactBlock::new();
cb.set_height(height as u64);
// Add a random Sapling tx before ours
{
let mut tx = random_compact_tx(&mut rng);
tx.index = cb.vtx.len() as u64;
cb.vtx.push(tx);
}
let mut cspend = CompactSpend::new();
cspend.set_nf(nf.to_vec());
let mut cout = CompactOutput::new();
cout.set_cmu(cmu);
cout.set_epk(epk);
cout.set_ciphertext(enc_ciphertext[..52].to_vec());
let mut ctx = CompactTx::new();
let mut txid = vec![0; 32];
rng.fill_bytes(&mut txid);
ctx.set_hash(txid);
ctx.spends.push(cspend);
ctx.outputs.push(cout);
ctx.index = cb.vtx.len() as u64;
cb.vtx.push(ctx);
// Optionally add another random Sapling tx after ours
if tx_after {
let mut tx = random_compact_tx(&mut rng);
tx.index = cb.vtx.len() as u64;
cb.vtx.push(tx);
}
cb
}
#[test]
fn scan_block_with_my_tx() {
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
let cb = fake_compact_block(
1,
[0; 32],
extfvk.clone(),
Amount::from_u64(5).unwrap(),
false,
);
assert_eq!(cb.vtx.len(), 2);
let mut tree = CommitmentTree::new();
let txs = scan_block(cb, &[extfvk], &[], &mut tree, &mut []);
assert_eq!(txs.len(), 1);
let tx = &txs[0];
assert_eq!(tx.index, 1);
assert_eq!(tx.num_spends, 1);
assert_eq!(tx.num_outputs, 1);
assert_eq!(tx.shielded_spends.len(), 0);
assert_eq!(tx.shielded_outputs.len(), 1);
assert_eq!(tx.shielded_outputs[0].index, 0);
assert_eq!(tx.shielded_outputs[0].account, 0);
assert_eq!(tx.shielded_outputs[0].note.value, 5);
// Check that the witness root matches
assert_eq!(tx.shielded_outputs[0].witness.root(), tree.root());
}
#[test]
fn scan_block_with_txs_after_my_tx() {
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
let cb = fake_compact_block(
1,
[0; 32],
extfvk.clone(),
Amount::from_u64(5).unwrap(),
true,
);
assert_eq!(cb.vtx.len(), 3);
let mut tree = CommitmentTree::new();
let txs = scan_block(cb, &[extfvk], &[], &mut tree, &mut []);
assert_eq!(txs.len(), 1);
let tx = &txs[0];
assert_eq!(tx.index, 1);
assert_eq!(tx.num_spends, 1);
assert_eq!(tx.num_outputs, 1);
assert_eq!(tx.shielded_spends.len(), 0);
assert_eq!(tx.shielded_outputs.len(), 1);
assert_eq!(tx.shielded_outputs[0].index, 0);
assert_eq!(tx.shielded_outputs[0].account, 0);
assert_eq!(tx.shielded_outputs[0].note.value, 5);
// Check that the witness root matches
assert_eq!(tx.shielded_outputs[0].witness.root(), tree.root());
}
#[test]
fn scan_block_with_my_spend() {
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
let nf = [7; 32];
let account = 12;
let cb = fake_compact_block(1, nf, extfvk, Amount::from_u64(5).unwrap(), false);
assert_eq!(cb.vtx.len(), 2);
let mut tree = CommitmentTree::new();
let txs = scan_block(cb, &[], &[(&nf, account)], &mut tree, &mut []);
assert_eq!(txs.len(), 1);
let tx = &txs[0];
assert_eq!(tx.index, 1);
assert_eq!(tx.num_spends, 1);
assert_eq!(tx.num_outputs, 1);
assert_eq!(tx.shielded_spends.len(), 1);
assert_eq!(tx.shielded_outputs.len(), 0);
assert_eq!(tx.shielded_spends[0].index, 0);
assert_eq!(tx.shielded_spends[0].nf, nf);
assert_eq!(tx.shielded_spends[0].account, account);
}
}

View File

@@ -0,0 +1,27 @@
[package]
name = "zcash_client_sqlite"
version = "0.0.0"
authors = [
"Jack Grigg <jack@z.cash>",
]
edition = "2018"
[dependencies]
bech32 = "0.7"
bs58 = { version = "0.2", features = ["check"] }
ff = { path = "../ff" }
pairing = { path = "../pairing" }
protobuf = "2"
rusqlite = { version = "0.20", features = ["bundled"] }
time = "0.1"
zcash_client_backend = { path = "../zcash_client_backend" }
zcash_primitives = { path = "../zcash_primitives" }
[dev-dependencies]
rand_core = "0.5"
rand_os = "0.2"
tempfile = "3"
zcash_proofs = { path = "../zcash_proofs" }
[features]
mainnet = []

View File

@@ -0,0 +1,60 @@
# Security Disclaimer
#### :warning: WARNING: This is an *early preview*
----
In the spirit of transparency, we provide this as a window into what we are actively
developing. This is an alpha build, not yet intended for 3rd party use. Please be advised
of the following:
* 🛑 This code currently is not audited. 🛑
* ❌ This is a public, active branch with **no support**.
* ❌ The code **does not have** documentation that is reviewed and approved by our Documentation team.
* ❌ The code **does not have** adequate unit tests, acceptance tests and stress tests.
* ❌ The code **does not have** automated tests that use the officially supported CI system.
* ❌ The code **has not been subjected to thorough review** by engineers at the Electric Coin Company.
* :warning: This library **is** compatible with the latest version of zcashd, but there **is no** automated testing of this.
* :heavy_check_mark: The library **is not** majorly broken in some way.
* :heavy_check_mark: The library **does run** on mainnet and testnet.
* ❌ We **are actively rebasing** this branch and adding features where/when needed.
* ❌ We **do not** undertake appropriate security coverage (threat models, review, response, etc.).
* :heavy_check_mark: There is a product manager for this library.
* :heavy_check_mark: Electric Coin Company maintains the library as we discover bugs and do network upgrades/minor releases.
* :heavy_check_mark: Users can expect to get a response within a few weeks after submitting an issue.
* ❌ The User Support team **has not yet been briefed** on the features provided to users and the functionality of the associated test-framework.
* ❌ The code is **not fully-documented**.
### 🛑 Use of this code may lead to a loss of funds 🛑
Use of this code in its current form or with modifications may lead to loss of funds, loss
of "expected" privacy, or denial of service for a large portion of users, or a bug which
could leverage any of those kinds of attacks (especially a "0 day" where we suspect few
people know about the vulnerability).
### :eyes: At this time, this is for preview purposes only. :eyes:
----
# zcash_client_sqlite
This library contains APIs that collectively implement a Zcash light client in
an SQLite database.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@@ -0,0 +1,63 @@
//! Structs for handling supported address types.
use pairing::bls12_381::Bls12;
use zcash_client_backend::encoding::{
decode_payment_address, decode_transparent_address, encode_payment_address,
encode_transparent_address,
};
use zcash_primitives::{legacy::TransparentAddress, primitives::PaymentAddress};
#[cfg(feature = "mainnet")]
use zcash_client_backend::constants::mainnet::{
B58_PUBKEY_ADDRESS_PREFIX, B58_SCRIPT_ADDRESS_PREFIX, HRP_SAPLING_PAYMENT_ADDRESS,
};
#[cfg(not(feature = "mainnet"))]
use zcash_client_backend::constants::testnet::{
B58_PUBKEY_ADDRESS_PREFIX, B58_SCRIPT_ADDRESS_PREFIX, HRP_SAPLING_PAYMENT_ADDRESS,
};
/// An address that funds can be sent to.
pub enum RecipientAddress {
Shielded(PaymentAddress<Bls12>),
Transparent(TransparentAddress),
}
impl From<PaymentAddress<Bls12>> for RecipientAddress {
fn from(addr: PaymentAddress<Bls12>) -> Self {
RecipientAddress::Shielded(addr)
}
}
impl From<TransparentAddress> for RecipientAddress {
fn from(addr: TransparentAddress) -> Self {
RecipientAddress::Transparent(addr)
}
}
impl RecipientAddress {
pub fn from_str(s: &str) -> Option<Self> {
if let Ok(Some(pa)) = decode_payment_address(HRP_SAPLING_PAYMENT_ADDRESS, s) {
Some(pa.into())
} else if let Ok(Some(addr)) =
decode_transparent_address(&B58_PUBKEY_ADDRESS_PREFIX, &B58_SCRIPT_ADDRESS_PREFIX, s)
{
Some(addr.into())
} else {
None
}
}
pub fn to_string(&self) -> String {
match self {
RecipientAddress::Shielded(pa) => {
encode_payment_address(HRP_SAPLING_PAYMENT_ADDRESS, pa)
}
RecipientAddress::Transparent(addr) => encode_transparent_address(
&B58_PUBKEY_ADDRESS_PREFIX,
&B58_SCRIPT_ADDRESS_PREFIX,
addr,
),
}
}
}

View File

@@ -0,0 +1,469 @@
//! Functions for enforcing chain validity and handling chain reorgs.
//!
//! # Examples
//!
//! ```
//! use zcash_client_sqlite::{
//! chain::{rewind_to_height, validate_combined_chain},
//! error::ErrorKind,
//! scan::scan_cached_blocks,
//! };
//!
//! let db_cache = "/path/to/cache.db";
//! let db_data = "/path/to/data.db";
//!
//! // 1) Download new CompactBlocks into db_cache.
//!
//! // 2) Run the chain validator on the received blocks.
//! //
//! // Given that we assume the server always gives us correct-at-the-time blocks, any
//! // errors are in the blocks we have previously cached or scanned.
//! if let Err(e) = validate_combined_chain(&db_cache, &db_data) {
//! match e.kind() {
//! ErrorKind::InvalidChain(upper_bound, _) => {
//! // a) Pick a height to rewind to.
//! //
//! // This might be informed by some external chain reorg information, or
//! // heuristics such as the platform, available bandwidth, size of recent
//! // CompactBlocks, etc.
//! let rewind_height = upper_bound - 10;
//!
//! // b) Rewind scanned block information.
//! rewind_to_height(&db_data, rewind_height);
//!
//! // c) Delete cached blocks from rewind_height onwards.
//! //
//! // This does imply that assumed-valid blocks will be re-downloaded, but it
//! // is also possible that in the intervening time, a chain reorg has
//! // occurred that orphaned some of those blocks.
//!
//! // d) If there is some separate thread or service downloading
//! // CompactBlocks, tell it to go back and download from rewind_height
//! // onwards.
//! }
//! _ => {
//! // Handle other errors.
//! }
//! }
//! }
//!
//! // 3) Scan (any remaining) cached blocks.
//! //
//! // At this point, the cache and scanned data are locally consistent (though not
//! // necessarily consistent with the latest chain tip - this would be discovered the
//! // next time this codepath is executed after new blocks are received).
//! scan_cached_blocks(&db_cache, &db_data);
//! ```
use protobuf::parse_from_bytes;
use rusqlite::{Connection, NO_PARAMS};
use std::path::Path;
use zcash_client_backend::proto::compact_formats::CompactBlock;
use crate::{
error::{Error, ErrorKind},
SAPLING_ACTIVATION_HEIGHT,
};
#[derive(Debug)]
pub enum ChainInvalidCause {
PrevHashMismatch,
}
struct CompactBlockRow {
height: i32,
data: Vec<u8>,
}
/// Checks that the scanned blocks in the data database, when combined with the recent
/// `CompactBlock`s in the cache database, form a valid chain.
///
/// This function is built on the core assumption that the information provided in the
/// cache database is more likely to be accurate than the previously-scanned information.
/// This follows from the design (and trust) assumption that the `lightwalletd` server
/// provides accurate block information as of the time it was requested.
///
/// Returns:
/// - `Ok(())` if the combined chain is valid.
/// - `Err(ErrorKind::InvalidChain(upper_bound, cause))` if the combined chain is invalid.
/// `upper_bound` is the height of the highest invalid block (on the assumption that the
/// highest block in the cache database is correct).
/// - `Err(e)` if there was an error during validation unrelated to chain validity.
///
/// This function does not mutate either of the databases.
pub fn validate_combined_chain<P: AsRef<Path>, Q: AsRef<Path>>(
db_cache: P,
db_data: Q,
) -> Result<(), Error> {
let cache = Connection::open(db_cache)?;
let data = Connection::open(db_data)?;
// Recall where we synced up to previously.
// If we have never synced, use Sapling activation height to select all cached CompactBlocks.
let (have_scanned, last_scanned_height) =
data.query_row("SELECT MAX(height) FROM blocks", NO_PARAMS, |row| {
row.get(0)
.map(|h| (true, h))
.or(Ok((false, SAPLING_ACTIVATION_HEIGHT - 1)))
})?;
// Fetch the CompactBlocks we need to validate
let mut stmt_blocks = cache
.prepare("SELECT height, data FROM compactblocks WHERE height > ? ORDER BY height DESC")?;
let mut rows = stmt_blocks.query_map(&[last_scanned_height], |row| {
Ok(CompactBlockRow {
height: row.get(0)?,
data: row.get(1)?,
})
})?;
// Take the highest cached block as accurate.
let (mut last_height, mut last_prev_hash) = {
let assumed_correct = match rows.next() {
Some(row) => row?,
None => {
// No cached blocks, and we've already validated the blocks we've scanned,
// so there's nothing to validate.
// TODO: Maybe we still want to check if there are cached blocks that are
// at heights we previously scanned? Check scanning flow again.
return Ok(());
}
};
let block: CompactBlock = parse_from_bytes(&assumed_correct.data)?;
(block.height as i32, block.prev_hash())
};
for row in rows {
let row = row?;
// Scanned blocks MUST be height-sequential.
if row.height != (last_height - 1) {
return Err(Error(ErrorKind::InvalidHeight(last_height - 1, row.height)));
}
last_height = row.height;
let block: CompactBlock = parse_from_bytes(&row.data)?;
// Cached blocks MUST be hash-chained.
if block.hash() != last_prev_hash {
return Err(Error(ErrorKind::InvalidChain(
last_height,
ChainInvalidCause::PrevHashMismatch,
)));
}
last_prev_hash = block.prev_hash();
}
if have_scanned {
// Cached blocks MUST hash-chain to the last scanned block.
let last_scanned_hash = data.query_row(
"SELECT hash FROM blocks WHERE height = ?",
&[last_scanned_height],
|row| row.get::<_, Vec<_>>(0),
)?;
if &last_scanned_hash[..] != &last_prev_hash.0[..] {
return Err(Error(ErrorKind::InvalidChain(
last_scanned_height,
ChainInvalidCause::PrevHashMismatch,
)));
}
}
// All good!
Ok(())
}
/// Rewinds the data database to the given height.
///
/// If the requested height is greater than or equal to the height of the last scanned
/// block, this function does nothing.
pub fn rewind_to_height<P: AsRef<Path>>(db_data: P, height: i32) -> Result<(), Error> {
let data = Connection::open(db_data)?;
// Recall where we synced up to previously.
// If we have never synced, use Sapling activation height.
let last_scanned_height =
data.query_row("SELECT MAX(height) FROM blocks", NO_PARAMS, |row| {
row.get(0).or(Ok(SAPLING_ACTIVATION_HEIGHT - 1))
})?;
if height >= last_scanned_height {
// Nothing to do.
return Ok(());
}
// Start an SQL transaction for rewinding.
data.execute("BEGIN IMMEDIATE", NO_PARAMS)?;
// Decrement witnesses.
data.execute("DELETE FROM sapling_witnesses WHERE block > ?", &[height])?;
// Un-mine transactions.
data.execute(
"UPDATE transactions SET block = NULL, tx_index = NULL WHERE block > ?",
&[height],
)?;
// Now that they aren't depended on, delete scanned blocks.
data.execute("DELETE FROM blocks WHERE height > ?", &[height])?;
// Commit the SQL transaction, rewinding atomically.
data.execute("COMMIT", NO_PARAMS)?;
Ok(())
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use zcash_primitives::{
block::BlockHash,
transaction::components::Amount,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use super::{rewind_to_height, validate_combined_chain};
use crate::{
error::ErrorKind,
init::{init_accounts_table, init_cache_database, init_data_database},
query::get_balance,
scan::scan_cached_blocks,
tests::{fake_compact_block, insert_into_cache},
SAPLING_ACTIVATION_HEIGHT,
};
#[test]
fn valid_chain_states() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Empty chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Create a fake CompactBlock sending value to the address
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
Amount::from_u64(5).unwrap(),
);
insert_into_cache(db_cache, &cb);
// Cache-only chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Data-only chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Create a second fake CompactBlock sending more value to the address
let (cb2, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 1,
cb.hash(),
extfvk,
Amount::from_u64(7).unwrap(),
);
insert_into_cache(db_cache, &cb2);
// Data+cache chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Scan the cache again
scan_cached_blocks(db_cache, db_data).unwrap();
// Data-only chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
}
#[test]
fn invalid_chain_cache_disconnected() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Create some fake CompactBlocks
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
Amount::from_u64(5).unwrap(),
);
let (cb2, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 1,
cb.hash(),
extfvk.clone(),
Amount::from_u64(7).unwrap(),
);
insert_into_cache(db_cache, &cb);
insert_into_cache(db_cache, &cb2);
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Data-only chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Create more fake CompactBlocks that don't connect to the scanned ones
let (cb3, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 2,
BlockHash([1; 32]),
extfvk.clone(),
Amount::from_u64(8).unwrap(),
);
let (cb4, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 3,
cb3.hash(),
extfvk.clone(),
Amount::from_u64(3).unwrap(),
);
insert_into_cache(db_cache, &cb3);
insert_into_cache(db_cache, &cb4);
// Data+cache chain should be invalid at the data/cache boundary
match validate_combined_chain(db_cache, db_data) {
Err(e) => match e.kind() {
ErrorKind::InvalidChain(upper_bound, _) => {
assert_eq!(*upper_bound, SAPLING_ACTIVATION_HEIGHT + 1)
}
_ => panic!(),
},
_ => panic!(),
}
}
#[test]
fn invalid_chain_cache_reorg() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Create some fake CompactBlocks
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
Amount::from_u64(5).unwrap(),
);
let (cb2, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 1,
cb.hash(),
extfvk.clone(),
Amount::from_u64(7).unwrap(),
);
insert_into_cache(db_cache, &cb);
insert_into_cache(db_cache, &cb2);
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Data-only chain should be valid
validate_combined_chain(db_cache, db_data).unwrap();
// Create more fake CompactBlocks that contain a reorg
let (cb3, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 2,
cb2.hash(),
extfvk.clone(),
Amount::from_u64(8).unwrap(),
);
let (cb4, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 3,
BlockHash([1; 32]),
extfvk.clone(),
Amount::from_u64(3).unwrap(),
);
insert_into_cache(db_cache, &cb3);
insert_into_cache(db_cache, &cb4);
// Data+cache chain should be invalid inside the cache
match validate_combined_chain(db_cache, db_data) {
Err(e) => match e.kind() {
ErrorKind::InvalidChain(upper_bound, _) => {
assert_eq!(*upper_bound, SAPLING_ACTIVATION_HEIGHT + 2)
}
_ => panic!(),
},
_ => panic!(),
}
}
#[test]
fn data_db_rewinding() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Account balance should be zero
assert_eq!(get_balance(db_data, 0).unwrap(), Amount::zero());
// Create fake CompactBlocks sending value to the address
let value = Amount::from_u64(5).unwrap();
let value2 = Amount::from_u64(7).unwrap();
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
let (cb2, _) = fake_compact_block(SAPLING_ACTIVATION_HEIGHT + 1, cb.hash(), extfvk, value2);
insert_into_cache(db_cache, &cb);
insert_into_cache(db_cache, &cb2);
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Account balance should reflect both received notes
assert_eq!(get_balance(db_data, 0).unwrap(), value + value2);
// "Rewind" to height of last scanned block
rewind_to_height(db_data, SAPLING_ACTIVATION_HEIGHT + 1).unwrap();
// Account balance should be unaltered
assert_eq!(get_balance(db_data, 0).unwrap(), value + value2);
// Rewind so that one block is dropped
rewind_to_height(db_data, SAPLING_ACTIVATION_HEIGHT).unwrap();
// Account balance should only contain the first received note
assert_eq!(get_balance(db_data, 0).unwrap(), value);
}
}

View File

@@ -0,0 +1,132 @@
use std::error;
use std::fmt;
use zcash_primitives::{
sapling::Node,
transaction::{builder, TxId},
};
#[derive(Debug)]
pub enum ErrorKind {
CorruptedData(&'static str),
IncorrectHRPExtFVK,
InsufficientBalance(u64, u64),
InvalidChain(i32, crate::chain::ChainInvalidCause),
InvalidExtSK(u32),
InvalidHeight(i32, i32),
InvalidMemo(std::str::Utf8Error),
InvalidNewWitnessAnchor(usize, TxId, i32, Node),
InvalidNote,
InvalidWitnessAnchor(i64, i32),
ScanRequired,
TableNotEmpty,
Bech32(bech32::Error),
Base58(bs58::decode::DecodeError),
Builder(builder::Error),
Database(rusqlite::Error),
Io(std::io::Error),
Protobuf(protobuf::ProtobufError),
}
#[derive(Debug)]
pub struct Error(pub(crate) ErrorKind);
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
ErrorKind::CorruptedData(reason) => write!(f, "Data DB is corrupted: {}", reason),
ErrorKind::IncorrectHRPExtFVK => write!(f, "Incorrect HRP for extfvk"),
ErrorKind::InsufficientBalance(have, need) => write!(
f,
"Insufficient balance (have {}, need {} including fee)",
have, need
),
ErrorKind::InvalidChain(upper_bound, cause) => {
write!(f, "Invalid chain (upper bound: {}): {:?}", upper_bound, cause)
}
ErrorKind::InvalidExtSK(account) => {
write!(f, "Incorrect ExtendedSpendingKey for account {}", account)
}
ErrorKind::InvalidHeight(expected, actual) => write!(
f,
"Expected height of next CompactBlock to be {}, but was {}",
expected, actual
),
ErrorKind::InvalidMemo(e) => write!(f, "{}", e),
ErrorKind::InvalidNewWitnessAnchor(output, txid, last_height, anchor) => write!(
f,
"New witness for output {} in tx {} has incorrect anchor after scanning block {}: {:?}",
output, txid, last_height, anchor,
),
ErrorKind::InvalidNote => write!(f, "Invalid note"),
ErrorKind::InvalidWitnessAnchor(id_note, last_height) => write!(
f,
"Witness for note {} has incorrect anchor after scanning block {}",
id_note, last_height
),
ErrorKind::ScanRequired => write!(f, "Must scan blocks first"),
ErrorKind::TableNotEmpty => write!(f, "Table is not empty"),
ErrorKind::Bech32(e) => write!(f, "{}", e),
ErrorKind::Base58(e) => write!(f, "{}", e),
ErrorKind::Builder(e) => write!(f, "{:?}", e),
ErrorKind::Database(e) => write!(f, "{}", e),
ErrorKind::Io(e) => write!(f, "{}", e),
ErrorKind::Protobuf(e) => write!(f, "{}", e),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self.0 {
ErrorKind::InvalidMemo(e) => Some(e),
ErrorKind::Bech32(e) => Some(e),
ErrorKind::Builder(e) => Some(e),
ErrorKind::Database(e) => Some(e),
ErrorKind::Io(e) => Some(e),
ErrorKind::Protobuf(e) => Some(e),
_ => None,
}
}
}
impl From<bech32::Error> for Error {
fn from(e: bech32::Error) -> Self {
Error(ErrorKind::Bech32(e))
}
}
impl From<bs58::decode::DecodeError> for Error {
fn from(e: bs58::decode::DecodeError) -> Self {
Error(ErrorKind::Base58(e))
}
}
impl From<builder::Error> for Error {
fn from(e: builder::Error) -> Self {
Error(ErrorKind::Builder(e))
}
}
impl From<rusqlite::Error> for Error {
fn from(e: rusqlite::Error) -> Self {
Error(ErrorKind::Database(e))
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error(ErrorKind::Io(e))
}
}
impl From<protobuf::ProtobufError> for Error {
fn from(e: protobuf::ProtobufError) -> Self {
Error(ErrorKind::Protobuf(e))
}
}
impl Error {
pub fn kind(&self) -> &ErrorKind {
&self.0
}
}

View File

@@ -0,0 +1,303 @@
//! Functions for initializing the various databases.
use rusqlite::{types::ToSql, Connection, NO_PARAMS};
use std::path::Path;
use zcash_client_backend::encoding::encode_extended_full_viewing_key;
use zcash_primitives::{block::BlockHash, zip32::ExtendedFullViewingKey};
use crate::{
address_from_extfvk,
error::{Error, ErrorKind},
HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY,
};
/// Sets up the internal structure of the cache database.
///
/// # Examples
///
/// ```
/// use tempfile::NamedTempFile;
/// use zcash_client_sqlite::init::init_cache_database;
///
/// let data_file = NamedTempFile::new().unwrap();
/// let db_cache = data_file.path();
/// init_cache_database(&db_cache).unwrap();
/// ```
pub fn init_cache_database<P: AsRef<Path>>(db_cache: P) -> Result<(), Error> {
let cache = Connection::open(db_cache)?;
cache.execute(
"CREATE TABLE IF NOT EXISTS compactblocks (
height INTEGER PRIMARY KEY,
data BLOB NOT NULL
)",
NO_PARAMS,
)?;
Ok(())
}
/// Sets up the internal structure of the data database.
///
/// # Examples
///
/// ```
/// use tempfile::NamedTempFile;
/// use zcash_client_sqlite::init::init_data_database;
///
/// let data_file = NamedTempFile::new().unwrap();
/// let db_data = data_file.path();
/// init_data_database(&db_data).unwrap();
/// ```
pub fn init_data_database<P: AsRef<Path>>(db_data: P) -> Result<(), Error> {
let data = Connection::open(db_data)?;
data.execute(
"CREATE TABLE IF NOT EXISTS accounts (
account INTEGER PRIMARY KEY,
extfvk TEXT NOT NULL,
address TEXT NOT NULL
)",
NO_PARAMS,
)?;
data.execute(
"CREATE TABLE IF NOT EXISTS blocks (
height INTEGER PRIMARY KEY,
hash BLOB NOT NULL,
time INTEGER NOT NULL,
sapling_tree BLOB NOT NULL
)",
NO_PARAMS,
)?;
data.execute(
"CREATE TABLE IF NOT EXISTS transactions (
id_tx INTEGER PRIMARY KEY,
txid BLOB NOT NULL UNIQUE,
created TEXT,
block INTEGER,
tx_index INTEGER,
expiry_height INTEGER,
raw BLOB,
FOREIGN KEY (block) REFERENCES blocks(height)
)",
NO_PARAMS,
)?;
data.execute(
"CREATE TABLE IF NOT EXISTS received_notes (
id_note INTEGER PRIMARY KEY,
tx INTEGER NOT NULL,
output_index INTEGER NOT NULL,
account INTEGER NOT NULL,
diversifier BLOB NOT NULL,
value INTEGER NOT NULL,
rcm BLOB NOT NULL,
nf BLOB NOT NULL UNIQUE,
is_change BOOLEAN NOT NULL,
memo BLOB,
spent INTEGER,
FOREIGN KEY (tx) REFERENCES transactions(id_tx),
FOREIGN KEY (account) REFERENCES accounts(account),
FOREIGN KEY (spent) REFERENCES transactions(id_tx),
CONSTRAINT tx_output UNIQUE (tx, output_index)
)",
NO_PARAMS,
)?;
data.execute(
"CREATE TABLE IF NOT EXISTS sapling_witnesses (
id_witness INTEGER PRIMARY KEY,
note INTEGER NOT NULL,
block INTEGER NOT NULL,
witness BLOB NOT NULL,
FOREIGN KEY (note) REFERENCES received_notes(id_note),
FOREIGN KEY (block) REFERENCES blocks(height),
CONSTRAINT witness_height UNIQUE (note, block)
)",
NO_PARAMS,
)?;
data.execute(
"CREATE TABLE IF NOT EXISTS sent_notes (
id_note INTEGER PRIMARY KEY,
tx INTEGER NOT NULL,
output_index INTEGER NOT NULL,
from_account INTEGER NOT NULL,
address TEXT NOT NULL,
value INTEGER NOT NULL,
memo BLOB,
FOREIGN KEY (tx) REFERENCES transactions(id_tx),
FOREIGN KEY (from_account) REFERENCES accounts(account),
CONSTRAINT tx_output UNIQUE (tx, output_index)
)",
NO_PARAMS,
)?;
Ok(())
}
/// Initialises the data database with the given [`ExtendedFullViewingKey`]s.
///
/// The [`ExtendedFullViewingKey`]s are stored internally and used by other APIs such as
/// [`get_address`], [`scan_cached_blocks`], and [`create_to_address`]. `extfvks` **MUST**
/// be arranged in account-order; that is, the [`ExtendedFullViewingKey`] for ZIP 32
/// account `i` **MUST** be at `extfvks[i]`.
///
/// # Examples
///
/// ```
/// use tempfile::NamedTempFile;
/// use zcash_client_sqlite::init::{init_accounts_table, init_data_database};
/// use zcash_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey};
///
/// let data_file = NamedTempFile::new().unwrap();
/// let db_data = data_file.path();
/// init_data_database(&db_data).unwrap();
///
/// let extsk = ExtendedSpendingKey::master(&[]);
/// let extfvks = [ExtendedFullViewingKey::from(&extsk)];
/// init_accounts_table(&db_data, &extfvks).unwrap();
/// ```
///
/// [`get_address`]: crate::query::get_address
/// [`scan_cached_blocks`]: crate::scan::scan_cached_blocks
/// [`create_to_address`]: crate::transact::create_to_address
pub fn init_accounts_table<P: AsRef<Path>>(
db_data: P,
extfvks: &[ExtendedFullViewingKey],
) -> Result<(), Error> {
let data = Connection::open(db_data)?;
let mut empty_check = data.prepare("SELECT * FROM accounts LIMIT 1")?;
if empty_check.exists(NO_PARAMS)? {
return Err(Error(ErrorKind::TableNotEmpty));
}
// Insert accounts atomically
data.execute("BEGIN IMMEDIATE", NO_PARAMS)?;
for (account, extfvk) in extfvks.iter().enumerate() {
let address = address_from_extfvk(extfvk);
let extfvk =
encode_extended_full_viewing_key(HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, extfvk);
data.execute(
"INSERT INTO accounts (account, extfvk, address)
VALUES (?, ?, ?)",
&[
(account as u32).to_sql()?,
extfvk.to_sql()?,
address.to_sql()?,
],
)?;
}
data.execute("COMMIT", NO_PARAMS)?;
Ok(())
}
/// Initialises the data database with the given block.
///
/// This enables a newly-created database to be immediately-usable, without needing to
/// synchronise historic blocks.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::init::init_blocks_table;
/// use zcash_primitives::block::BlockHash;
///
/// // The block height.
/// let height = 500_000;
/// // The hash of the block header.
/// let hash = BlockHash([0; 32]);
/// // The nTime field from the block header.
/// let time = 12_3456_7890;
/// // The serialized Sapling commitment tree as of this block.
/// // Pre-compute and hard-code, or obtain from a service.
/// let sapling_tree = &[];
///
/// init_blocks_table("/path/to/data.db", height, hash, time, sapling_tree);
/// ```
pub fn init_blocks_table<P: AsRef<Path>>(
db_data: P,
height: i32,
hash: BlockHash,
time: u32,
sapling_tree: &[u8],
) -> Result<(), Error> {
let data = Connection::open(db_data)?;
let mut empty_check = data.prepare("SELECT * FROM blocks LIMIT 1")?;
if empty_check.exists(NO_PARAMS)? {
return Err(Error(ErrorKind::TableNotEmpty));
}
data.execute(
"INSERT INTO blocks (height, hash, time, sapling_tree)
VALUES (?, ?, ?, ?)",
&[
height.to_sql()?,
hash.0.to_sql()?,
time.to_sql()?,
sapling_tree.to_sql()?,
],
)?;
Ok(())
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use zcash_client_backend::encoding::decode_payment_address;
use zcash_primitives::{
block::BlockHash,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use super::{init_accounts_table, init_blocks_table, init_data_database};
use crate::{query::get_address, HRP_SAPLING_PAYMENT_ADDRESS};
#[test]
fn init_accounts_table_only_works_once() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// We can call the function as many times as we want with no data
init_accounts_table(&db_data, &[]).unwrap();
init_accounts_table(&db_data, &[]).unwrap();
// First call with data should initialise the accounts table
let extfvks = [ExtendedFullViewingKey::from(&ExtendedSpendingKey::master(
&[],
))];
init_accounts_table(&db_data, &extfvks).unwrap();
// Subsequent calls should return an error
init_accounts_table(&db_data, &[]).unwrap_err();
init_accounts_table(&db_data, &extfvks).unwrap_err();
}
#[test]
fn init_blocks_table_only_works_once() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// First call with data should initialise the blocks table
init_blocks_table(&db_data, 1, BlockHash([1; 32]), 1, &[]).unwrap();
// Subsequent calls should return an error
init_blocks_table(&db_data, 2, BlockHash([2; 32]), 2, &[]).unwrap_err();
}
#[test]
fn init_accounts_table_stores_correct_address() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvks = [ExtendedFullViewingKey::from(&extsk)];
init_accounts_table(&db_data, &extfvks).unwrap();
// The account's address should be in the data DB
let addr = get_address(&db_data, 0).unwrap();
let pa = decode_payment_address(HRP_SAPLING_PAYMENT_ADDRESS, &addr).unwrap();
assert_eq!(pa.unwrap(), extsk.default_address().unwrap().1);
}
}

View File

@@ -0,0 +1,262 @@
//! *An SQLite-based Zcash light client.*
//!
//! `zcash_client_backend` contains a set of APIs that collectively implement an
//! SQLite-based light client for the Zcash network.
//!
//! # Design
//!
//! The light client is built around two SQLite databases:
//!
//! - A cache database, used to inform the light client about new [`CompactBlock`]s. It is
//! read-only within all light client APIs *except* for [`init_cache_database`] which
//! can be used to initialize the database.
//!
//! - A data database, where the light client's state is stored. It is read-write within
//! the light client APIs, and **assumed to be read-only outside these APIs**. Callers
//! **MUST NOT** write to the database without using these APIs. Callers **MAY** read
//! the database directly in order to extract information for display to users.
//!
//! # Features
//!
//! The `mainnet` feature configures the light client for use with the Zcash mainnet. By
//! default, the light client is configured for use with the Zcash testnet.
//!
//! [`CompactBlock`]: zcash_client_backend::proto::compact_formats::CompactBlock
//! [`init_cache_database`]: crate::init::init_cache_database
use rusqlite::{Connection, NO_PARAMS};
use std::cmp;
use zcash_client_backend::encoding::encode_payment_address;
use zcash_primitives::zip32::ExtendedFullViewingKey;
#[cfg(feature = "mainnet")]
use zcash_client_backend::constants::mainnet::{
HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, HRP_SAPLING_PAYMENT_ADDRESS,
};
#[cfg(not(feature = "mainnet"))]
use zcash_client_backend::constants::testnet::{
HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, HRP_SAPLING_PAYMENT_ADDRESS,
};
pub mod address;
pub mod chain;
pub mod error;
pub mod init;
pub mod query;
pub mod scan;
pub mod transact;
const ANCHOR_OFFSET: u32 = 10;
#[cfg(feature = "mainnet")]
const SAPLING_ACTIVATION_HEIGHT: i32 = 419_200;
#[cfg(not(feature = "mainnet"))]
const SAPLING_ACTIVATION_HEIGHT: i32 = 280_000;
fn address_from_extfvk(extfvk: &ExtendedFullViewingKey) -> String {
let addr = extfvk.default_address().unwrap().1;
encode_payment_address(HRP_SAPLING_PAYMENT_ADDRESS, &addr)
}
/// Determines the target height for a transaction, and the height from which to
/// select anchors, based on the current synchronised block chain.
fn get_target_and_anchor_heights(data: &Connection) -> Result<(u32, u32), error::Error> {
data.query_row_and_then(
"SELECT MIN(height), MAX(height) FROM blocks",
NO_PARAMS,
|row| match (row.get::<_, u32>(0), row.get::<_, u32>(1)) {
// If there are no blocks, the query returns NULL.
(Err(rusqlite::Error::InvalidColumnType(_, _, _)), _)
| (_, Err(rusqlite::Error::InvalidColumnType(_, _, _))) => {
Err(error::Error(error::ErrorKind::ScanRequired))
}
(Err(e), _) | (_, Err(e)) => Err(e.into()),
(Ok(min_height), Ok(max_height)) => {
let target_height = max_height + 1;
// Select an anchor ANCHOR_OFFSET back from the target block,
// unless that would be before the earliest block we have.
let anchor_height =
cmp::max(target_height.saturating_sub(ANCHOR_OFFSET), min_height);
Ok((target_height, anchor_height))
}
},
)
}
#[cfg(test)]
mod tests {
use ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::bls12_381::Bls12;
use protobuf::Message;
use rand_core::RngCore;
use rand_os::OsRng;
use rusqlite::{types::ToSql, Connection};
use std::path::Path;
use zcash_client_backend::proto::compact_formats::{
CompactBlock, CompactOutput, CompactSpend, CompactTx,
};
use zcash_primitives::{
block::BlockHash,
jubjub::fs::Fs,
note_encryption::{Memo, SaplingNoteEncryption},
primitives::{Note, PaymentAddress},
transaction::components::Amount,
zip32::ExtendedFullViewingKey,
JUBJUB,
};
/// Create a fake CompactBlock at the given height, containing a single output paying
/// the given address. Returns the CompactBlock and the nullifier for the new note.
pub(crate) fn fake_compact_block(
height: i32,
prev_hash: BlockHash,
extfvk: ExtendedFullViewingKey,
value: Amount,
) -> (CompactBlock, Vec<u8>) {
let to = extfvk.default_address().unwrap().1;
// Create a fake Note for the account
let mut rng = OsRng;
let note = Note {
g_d: to.diversifier.g_d::<Bls12>(&JUBJUB).unwrap(),
pk_d: to.pk_d.clone(),
value: value.into(),
r: Fs::random(&mut rng),
};
let encryptor = SaplingNoteEncryption::new(
extfvk.fvk.ovk,
note.clone(),
to.clone(),
Memo::default(),
&mut rng,
);
let mut cmu = vec![];
note.cm(&JUBJUB).into_repr().write_le(&mut cmu).unwrap();
let mut epk = vec![];
encryptor.epk().write(&mut epk).unwrap();
let enc_ciphertext = encryptor.encrypt_note_plaintext();
// Create a fake CompactBlock containing the note
let mut cout = CompactOutput::new();
cout.set_cmu(cmu);
cout.set_epk(epk);
cout.set_ciphertext(enc_ciphertext[..52].to_vec());
let mut ctx = CompactTx::new();
let mut txid = vec![0; 32];
rng.fill_bytes(&mut txid);
ctx.set_hash(txid);
ctx.outputs.push(cout);
let mut cb = CompactBlock::new();
cb.set_height(height as u64);
cb.hash.resize(32, 0);
rng.fill_bytes(&mut cb.hash);
cb.prevHash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx);
(cb, note.nf(&extfvk.fvk.vk, 0, &JUBJUB))
}
/// Create a fake CompactBlock at the given height, spending a single note from the
/// given address.
pub(crate) fn fake_compact_block_spending(
height: i32,
prev_hash: BlockHash,
(nf, in_value): (Vec<u8>, Amount),
extfvk: ExtendedFullViewingKey,
to: PaymentAddress<Bls12>,
value: Amount,
) -> CompactBlock {
let mut rng = OsRng;
// Create a fake CompactBlock containing the note
let mut cspend = CompactSpend::new();
cspend.set_nf(nf);
let mut ctx = CompactTx::new();
let mut txid = vec![0; 32];
rng.fill_bytes(&mut txid);
ctx.set_hash(txid);
ctx.spends.push(cspend);
// Create a fake Note for the payment
ctx.outputs.push({
let note = Note {
g_d: to.diversifier.g_d::<Bls12>(&JUBJUB).unwrap(),
pk_d: to.pk_d.clone(),
value: value.into(),
r: Fs::random(&mut rng),
};
let encryptor = SaplingNoteEncryption::new(
extfvk.fvk.ovk,
note.clone(),
to,
Memo::default(),
&mut rng,
);
let mut cmu = vec![];
note.cm(&JUBJUB).into_repr().write_le(&mut cmu).unwrap();
let mut epk = vec![];
encryptor.epk().write(&mut epk).unwrap();
let enc_ciphertext = encryptor.encrypt_note_plaintext();
let mut cout = CompactOutput::new();
cout.set_cmu(cmu);
cout.set_epk(epk);
cout.set_ciphertext(enc_ciphertext[..52].to_vec());
cout
});
// Create a fake Note for the change
ctx.outputs.push({
let change_addr = extfvk.default_address().unwrap().1;
let note = Note {
g_d: change_addr.diversifier.g_d::<Bls12>(&JUBJUB).unwrap(),
pk_d: change_addr.pk_d.clone(),
value: (in_value - value).into(),
r: Fs::random(&mut rng),
};
let encryptor = SaplingNoteEncryption::new(
extfvk.fvk.ovk,
note.clone(),
change_addr,
Memo::default(),
&mut rng,
);
let mut cmu = vec![];
note.cm(&JUBJUB).into_repr().write_le(&mut cmu).unwrap();
let mut epk = vec![];
encryptor.epk().write(&mut epk).unwrap();
let enc_ciphertext = encryptor.encrypt_note_plaintext();
let mut cout = CompactOutput::new();
cout.set_cmu(cmu);
cout.set_epk(epk);
cout.set_ciphertext(enc_ciphertext[..52].to_vec());
cout
});
let mut cb = CompactBlock::new();
cb.set_height(height as u64);
cb.hash.resize(32, 0);
rng.fill_bytes(&mut cb.hash);
cb.prevHash.extend_from_slice(&prev_hash.0);
cb.vtx.push(ctx);
cb
}
/// Insert a fake CompactBlock into the cache DB.
pub(crate) fn insert_into_cache<P: AsRef<Path>>(db_cache: P, cb: &CompactBlock) {
let cb_bytes = cb.write_to_bytes().unwrap();
let cache = Connection::open(&db_cache).unwrap();
cache
.prepare("INSERT INTO compactblocks (height, data) VALUES (?, ?)")
.unwrap()
.execute(&[
(cb.height as i32).to_sql().unwrap(),
cb_bytes.to_sql().unwrap(),
])
.unwrap();
}
}

View File

@@ -0,0 +1,201 @@
//! Functions for querying information in the data database.
use rusqlite::Connection;
use std::path::Path;
use zcash_primitives::{note_encryption::Memo, transaction::components::Amount};
use crate::{
error::{Error, ErrorKind},
get_target_and_anchor_heights,
};
/// Returns the address for the account.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::query::get_address;
///
/// let addr = get_address("/path/to/data.db", 0);
/// ```
pub fn get_address<P: AsRef<Path>>(db_data: P, account: u32) -> Result<String, Error> {
let data = Connection::open(db_data)?;
let addr = data.query_row(
"SELECT address FROM accounts
WHERE account = ?",
&[account],
|row| row.get(0),
)?;
Ok(addr)
}
/// Returns the balance for the account, including all mined unspent notes that we know
/// about.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::query::get_balance;
///
/// let addr = get_balance("/path/to/data.db", 0);
/// ```
pub fn get_balance<P: AsRef<Path>>(db_data: P, account: u32) -> Result<Amount, Error> {
let data = Connection::open(db_data)?;
let balance = data.query_row(
"SELECT SUM(value) FROM received_notes
INNER JOIN transactions ON transactions.id_tx = received_notes.tx
WHERE account = ? AND spent IS NULL AND transactions.block IS NOT NULL",
&[account],
|row| row.get(0).or(Ok(0)),
)?;
match Amount::from_i64(balance) {
Ok(amount) if !amount.is_negative() => Ok(amount),
_ => Err(Error(ErrorKind::CorruptedData(
"Sum of values in received_notes is out of range",
))),
}
}
/// Returns the verified balance for the account, which ignores notes that have been
/// received too recently and are not yet deemed spendable.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::query::get_verified_balance;
///
/// let addr = get_verified_balance("/path/to/data.db", 0);
/// ```
pub fn get_verified_balance<P: AsRef<Path>>(db_data: P, account: u32) -> Result<Amount, Error> {
let data = Connection::open(db_data)?;
let (_, anchor_height) = get_target_and_anchor_heights(&data)?;
let balance = data.query_row(
"SELECT SUM(value) FROM received_notes
INNER JOIN transactions ON transactions.id_tx = received_notes.tx
WHERE account = ? AND spent IS NULL AND transactions.block <= ?",
&[account, anchor_height],
|row| row.get(0).or(Ok(0)),
)?;
match Amount::from_i64(balance) {
Ok(amount) if !amount.is_negative() => Ok(amount),
_ => Err(Error(ErrorKind::CorruptedData(
"Sum of values in received_notes is out of range",
))),
}
}
/// Returns the memo for a received note, if it is known and a valid UTF-8 string.
///
/// The note is identified by its row index in the `received_notes` table within the data
/// database.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::query::get_received_memo_as_utf8;
///
/// let memo = get_received_memo_as_utf8("/path/to/data.db", 27);
pub fn get_received_memo_as_utf8<P: AsRef<Path>>(
db_data: P,
id_note: i64,
) -> Result<Option<String>, Error> {
let data = Connection::open(db_data)?;
let memo: Vec<_> = data.query_row(
"SELECT memo FROM received_notes
WHERE id_note = ?",
&[id_note],
|row| row.get(0),
)?;
match Memo::from_bytes(&memo) {
Some(memo) => match memo.to_utf8() {
Some(Ok(res)) => Ok(Some(res)),
Some(Err(e)) => Err(Error(ErrorKind::InvalidMemo(e))),
None => Ok(None),
},
None => Ok(None),
}
}
/// Returns the memo for a sent note, if it is known and a valid UTF-8 string.
///
/// The note is identified by its row index in the `sent_notes` table within the data
/// database.
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::query::get_sent_memo_as_utf8;
///
/// let memo = get_sent_memo_as_utf8("/path/to/data.db", 12);
pub fn get_sent_memo_as_utf8<P: AsRef<Path>>(
db_data: P,
id_note: i64,
) -> Result<Option<String>, Error> {
let data = Connection::open(db_data)?;
let memo: Vec<_> = data.query_row(
"SELECT memo FROM sent_notes
WHERE id_note = ?",
&[id_note],
|row| row.get(0),
)?;
match Memo::from_bytes(&memo) {
Some(memo) => match memo.to_utf8() {
Some(Ok(res)) => Ok(Some(res)),
Some(Err(e)) => Err(Error(ErrorKind::InvalidMemo(e))),
None => Ok(None),
},
None => Ok(None),
}
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use zcash_primitives::{
transaction::components::Amount,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use super::{get_address, get_balance, get_verified_balance};
use crate::{
error::ErrorKind,
init::{init_accounts_table, init_data_database},
};
#[test]
fn empty_database_has_no_balance() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvks = [ExtendedFullViewingKey::from(&extsk)];
init_accounts_table(&db_data, &extfvks).unwrap();
// The account should be empty
assert_eq!(get_balance(db_data, 0).unwrap(), Amount::zero());
// The account should have no verified balance, as we haven't scanned any blocks
let e = get_verified_balance(db_data, 0).unwrap_err();
match e.kind() {
ErrorKind::ScanRequired => (),
_ => panic!("Unexpected error: {:?}", e),
}
// An invalid account has zero balance
assert!(get_address(db_data, 1).is_err());
assert_eq!(get_balance(db_data, 1).unwrap(), Amount::zero());
}
}

View File

@@ -0,0 +1,500 @@
//! Functions for scanning the chain and extracting relevant information.
use ff::{PrimeField, PrimeFieldRepr};
use protobuf::parse_from_bytes;
use rusqlite::{types::ToSql, Connection, NO_PARAMS};
use std::path::Path;
use zcash_client_backend::{
encoding::decode_extended_full_viewing_key, proto::compact_formats::CompactBlock,
welding_rig::scan_block,
};
use zcash_primitives::{
merkle_tree::{CommitmentTree, IncrementalWitness},
sapling::Node,
JUBJUB,
};
use crate::{
error::{Error, ErrorKind},
HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, SAPLING_ACTIVATION_HEIGHT,
};
struct CompactBlockRow {
height: i32,
data: Vec<u8>,
}
#[derive(Clone)]
struct WitnessRow {
id_note: i64,
witness: IncrementalWitness<Node>,
}
/// Scans new blocks added to the cache for any transactions received by the tracked
/// accounts.
///
/// This function pays attention only to cached blocks with heights greater than the
/// highest scanned block in `db_data`. Cached blocks with lower heights are not verified
/// against previously-scanned blocks. In particular, this function **assumes** that the
/// caller is handling rollbacks.
///
/// For brand-new light client databases, this function starts scanning from the Sapling
/// activation height. This height can be fast-forwarded to a more recent block by calling
/// [`init_blocks_table`] before this function.
///
/// Scanned blocks are required to be height-sequential. If a block is missing from the
/// cache, an error will be returned with kind [`ErrorKind::InvalidHeight`].
///
/// # Examples
///
/// ```
/// use zcash_client_sqlite::scan::scan_cached_blocks;
///
/// scan_cached_blocks("/path/to/cache.db", "/path/to/data.db");
/// ```
///
/// [`init_blocks_table`]: crate::init::init_blocks_table
pub fn scan_cached_blocks<P: AsRef<Path>, Q: AsRef<Path>>(
db_cache: P,
db_data: Q,
) -> Result<(), Error> {
let cache = Connection::open(db_cache)?;
let data = Connection::open(db_data)?;
// Recall where we synced up to previously.
// If we have never synced, use sapling activation height to select all cached CompactBlocks.
let mut last_height = data.query_row("SELECT MAX(height) FROM blocks", NO_PARAMS, |row| {
row.get(0).or(Ok(SAPLING_ACTIVATION_HEIGHT - 1))
})?;
// Fetch the CompactBlocks we need to scan
let mut stmt_blocks = cache
.prepare("SELECT height, data FROM compactblocks WHERE height > ? ORDER BY height ASC")?;
let rows = stmt_blocks.query_map(&[last_height], |row| {
Ok(CompactBlockRow {
height: row.get(0)?,
data: row.get(1)?,
})
})?;
// Fetch the ExtendedFullViewingKeys we are tracking
let mut stmt_fetch_accounts =
data.prepare("SELECT extfvk FROM accounts ORDER BY account ASC")?;
let extfvks = stmt_fetch_accounts.query_map(NO_PARAMS, |row| {
row.get(0).map(|extfvk: String| {
decode_extended_full_viewing_key(HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, &extfvk)
})
})?;
// Raise SQL errors from the query, IO errors from parsing, and incorrect HRP errors.
let extfvks: Vec<_> = extfvks
.collect::<Result<Result<Option<_>, _>, _>>()??
.ok_or(Error(ErrorKind::IncorrectHRPExtFVK))?;
// Get the most recent CommitmentTree
let mut stmt_fetch_tree = data.prepare("SELECT sapling_tree FROM blocks WHERE height = ?")?;
let mut tree = stmt_fetch_tree
.query_row(&[last_height], |row| {
row.get(0).map(|data: Vec<_>| {
CommitmentTree::read(&data[..]).unwrap_or_else(|_| CommitmentTree::new())
})
})
.unwrap_or_else(|_| CommitmentTree::new());
// Get most recent incremental witnesses for the notes we are tracking
let mut stmt_fetch_witnesses =
data.prepare("SELECT note, witness FROM sapling_witnesses WHERE block = ?")?;
let witnesses = stmt_fetch_witnesses.query_map(&[last_height], |row| {
let id_note = row.get(0)?;
let data: Vec<_> = row.get(1)?;
Ok(IncrementalWitness::read(&data[..]).map(|witness| WitnessRow { id_note, witness }))
})?;
let mut witnesses: Vec<_> = witnesses.collect::<Result<Result<_, _>, _>>()??;
// Get the nullifiers for the notes we are tracking
let mut stmt_fetch_nullifiers =
data.prepare("SELECT id_note, nf, account FROM received_notes WHERE spent IS NULL")?;
let nullifiers = stmt_fetch_nullifiers.query_map(NO_PARAMS, |row| {
let nf: Vec<_> = row.get(1)?;
let account: i64 = row.get(2)?;
Ok((nf, account as usize))
})?;
let mut nullifiers: Vec<_> = nullifiers.collect::<Result<_, _>>()?;
// Prepare per-block SQL statements
let mut stmt_insert_block = data.prepare(
"INSERT INTO blocks (height, hash, time, sapling_tree)
VALUES (?, ?, ?, ?)",
)?;
let mut stmt_update_tx = data.prepare(
"UPDATE transactions
SET block = ?, tx_index = ? WHERE txid = ?",
)?;
let mut stmt_insert_tx = data.prepare(
"INSERT INTO transactions (txid, block, tx_index)
VALUES (?, ?, ?)",
)?;
let mut stmt_select_tx = data.prepare("SELECT id_tx FROM transactions WHERE txid = ?")?;
let mut stmt_mark_spent_note =
data.prepare("UPDATE received_notes SET spent = ? WHERE nf = ?")?;
let mut stmt_insert_note = data.prepare(
"INSERT INTO received_notes (tx, output_index, account, diversifier, value, rcm, nf, is_change)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
)?;
let mut stmt_insert_witness = data.prepare(
"INSERT INTO sapling_witnesses (note, block, witness)
VALUES (?, ?, ?)",
)?;
let mut stmt_prune_witnesses = data.prepare("DELETE FROM sapling_witnesses WHERE block < ?")?;
let mut stmt_update_expired = data.prepare(
"UPDATE received_notes SET spent = NULL WHERE EXISTS (
SELECT id_tx FROM transactions
WHERE id_tx = received_notes.spent AND block IS NULL AND expiry_height < ?
)",
)?;
for row in rows {
let row = row?;
// Start an SQL transaction for this block.
data.execute("BEGIN IMMEDIATE", NO_PARAMS)?;
// Scanned blocks MUST be height-sequential.
if row.height != (last_height + 1) {
return Err(Error(ErrorKind::InvalidHeight(last_height + 1, row.height)));
}
last_height = row.height;
let block: CompactBlock = parse_from_bytes(&row.data)?;
let block_hash = block.hash.clone();
let block_time = block.time;
let txs = {
let nf_refs: Vec<_> = nullifiers.iter().map(|(nf, acc)| (&nf[..], *acc)).collect();
let mut witness_refs: Vec<_> = witnesses.iter_mut().map(|w| &mut w.witness).collect();
scan_block(
block,
&extfvks[..],
&nf_refs,
&mut tree,
&mut witness_refs[..],
)
};
// Enforce that all roots match. This is slow, so only include in debug builds.
#[cfg(debug_assertions)]
{
let cur_root = tree.root();
for row in &witnesses {
if row.witness.root() != cur_root {
return Err(Error(ErrorKind::InvalidWitnessAnchor(
row.id_note,
last_height,
)));
}
}
for tx in &txs {
for output in tx.shielded_outputs.iter() {
if output.witness.root() != cur_root {
return Err(Error(ErrorKind::InvalidNewWitnessAnchor(
output.index,
tx.txid,
last_height,
output.witness.root(),
)));
}
}
}
}
// Insert the block into the database.
let mut encoded_tree = Vec::new();
tree.write(&mut encoded_tree)
.expect("Should be able to write to a Vec");
stmt_insert_block.execute(&[
row.height.to_sql()?,
block_hash.to_sql()?,
block_time.to_sql()?,
encoded_tree.to_sql()?,
])?;
for tx in txs {
// First try update an existing transaction in the database.
let txid = tx.txid.0.to_vec();
let tx_row = if stmt_update_tx.execute(&[
row.height.to_sql()?,
(tx.index as i64).to_sql()?,
txid.to_sql()?,
])? == 0
{
// It isn't there, so insert our transaction into the database.
stmt_insert_tx.execute(&[
txid.to_sql()?,
row.height.to_sql()?,
(tx.index as i64).to_sql()?,
])?;
data.last_insert_rowid()
} else {
// It was there, so grab its row number.
stmt_select_tx.query_row(&[txid], |row| row.get(0))?
};
// Mark notes as spent and remove them from the scanning cache
for spend in &tx.shielded_spends {
stmt_mark_spent_note.execute(&[tx_row.to_sql()?, spend.nf.to_sql()?])?;
}
nullifiers = nullifiers
.into_iter()
.filter(|(nf, _acc)| {
tx.shielded_spends
.iter()
.find(|spend| &spend.nf == nf)
.is_none()
})
.collect();
for output in tx.shielded_outputs {
let mut rcm = [0; 32];
output.note.r.into_repr().write_le(&mut rcm[..])?;
let nf = output.note.nf(
&extfvks[output.account].fvk.vk,
output.witness.position() as u64,
&JUBJUB,
);
// Insert received note into the database.
// Assumptions:
// - A transaction will not contain more than 2^63 shielded outputs.
// - A note value will never exceed 2^63 zatoshis.
stmt_insert_note.execute(&[
tx_row.to_sql()?,
(output.index as i64).to_sql()?,
(output.account as i64).to_sql()?,
output.to.diversifier.0.to_sql()?,
(output.note.value as i64).to_sql()?,
rcm.to_sql()?,
nf.to_sql()?,
output.is_change.to_sql()?,
])?;
let note_row = data.last_insert_rowid();
// Save witness for note.
witnesses.push(WitnessRow {
id_note: note_row,
witness: output.witness,
});
// Cache nullifier for note (to detect subsequent spends in this scan).
nullifiers.push((nf, output.account));
}
}
// Insert current witnesses into the database.
let mut encoded = Vec::new();
for witness_row in witnesses.iter() {
encoded.clear();
witness_row
.witness
.write(&mut encoded)
.expect("Should be able to write to a Vec");
stmt_insert_witness.execute(&[
witness_row.id_note.to_sql()?,
last_height.to_sql()?,
encoded.to_sql()?,
])?;
}
// Prune the stored witnesses (we only expect rollbacks of at most 100 blocks).
stmt_prune_witnesses.execute(&[last_height - 100])?;
// Update now-expired transactions that didn't get mined.
stmt_update_expired.execute(&[last_height])?;
// Commit the SQL transaction, writing this block's data atomically.
data.execute("COMMIT", NO_PARAMS)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use zcash_primitives::{
block::BlockHash,
transaction::components::Amount,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use super::scan_cached_blocks;
use crate::{
init::{init_accounts_table, init_cache_database, init_data_database},
query::get_balance,
tests::{fake_compact_block, fake_compact_block_spending, insert_into_cache},
SAPLING_ACTIVATION_HEIGHT,
};
#[test]
fn scan_cached_blocks_requires_sequential_blocks() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Create a block with height SAPLING_ACTIVATION_HEIGHT
let value = Amount::from_u64(50000).unwrap();
let (cb1, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb1);
scan_cached_blocks(db_cache, db_data).unwrap();
assert_eq!(get_balance(db_data, 0).unwrap(), value);
// We cannot scan a block of height SAPLING_ACTIVATION_HEIGHT + 2 next
let (cb2, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 1,
cb1.hash(),
extfvk.clone(),
value,
);
let (cb3, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 2,
cb2.hash(),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb3);
match scan_cached_blocks(db_cache, db_data) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
format!(
"Expected height of next CompactBlock to be {}, but was {}",
SAPLING_ACTIVATION_HEIGHT + 1,
SAPLING_ACTIVATION_HEIGHT + 2
)
),
}
// If we add a block of height SAPLING_ACTIVATION_HEIGHT + 1, we can now scan both
insert_into_cache(db_cache, &cb2);
scan_cached_blocks(db_cache, db_data).unwrap();
assert_eq!(
get_balance(db_data, 0).unwrap(),
Amount::from_u64(150_000).unwrap()
);
}
#[test]
fn scan_cached_blocks_finds_received_notes() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Account balance should be zero
assert_eq!(get_balance(db_data, 0).unwrap(), Amount::zero());
// Create a fake CompactBlock sending value to the address
let value = Amount::from_u64(5).unwrap();
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Account balance should reflect the received note
assert_eq!(get_balance(db_data, 0).unwrap(), value);
// Create a second fake CompactBlock sending more value to the address
let value2 = Amount::from_u64(7).unwrap();
let (cb2, _) = fake_compact_block(SAPLING_ACTIVATION_HEIGHT + 1, cb.hash(), extfvk, value2);
insert_into_cache(db_cache, &cb2);
// Scan the cache again
scan_cached_blocks(db_cache, db_data).unwrap();
// Account balance should reflect both received notes
assert_eq!(get_balance(db_data, 0).unwrap(), value + value2);
}
#[test]
fn scan_cached_blocks_finds_change_notes() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Account balance should be zero
assert_eq!(get_balance(db_data, 0).unwrap(), Amount::zero());
// Create a fake CompactBlock sending value to the address
let value = Amount::from_u64(5).unwrap();
let (cb, nf) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
// Scan the cache
scan_cached_blocks(db_cache, db_data).unwrap();
// Account balance should reflect the received note
assert_eq!(get_balance(db_data, 0).unwrap(), value);
// Create a second fake CompactBlock spending value from the address
let extsk2 = ExtendedSpendingKey::master(&[0]);
let to2 = extsk2.default_address().unwrap().1;
let value2 = Amount::from_u64(2).unwrap();
insert_into_cache(
db_cache,
&fake_compact_block_spending(
SAPLING_ACTIVATION_HEIGHT + 1,
cb.hash(),
(nf, value),
extfvk,
to2,
value2,
),
);
// Scan the cache again
scan_cached_blocks(db_cache, db_data).unwrap();
// Account balance should equal the change
assert_eq!(get_balance(db_data, 0).unwrap(), value - value2);
}
}

View File

@@ -0,0 +1,665 @@
//! Functions for creating transactions.
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::Bls12;
use rusqlite::{types::ToSql, Connection, NO_PARAMS};
use std::path::Path;
use zcash_client_backend::encoding::encode_extended_full_viewing_key;
use zcash_primitives::{
jubjub::fs::{Fs, FsRepr},
merkle_tree::IncrementalWitness,
note_encryption::Memo,
primitives::{Diversifier, Note},
prover::TxProver,
sapling::Node,
transaction::{
builder::Builder,
components::{amount::DEFAULT_FEE, Amount},
},
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
JUBJUB,
};
use crate::{
address::RecipientAddress,
error::{Error, ErrorKind},
get_target_and_anchor_heights, HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY,
};
struct SelectedNoteRow {
diversifier: Diversifier,
note: Note<Bls12>,
witness: IncrementalWitness<Node>,
}
/// Creates a transaction paying the specified address from the given account.
///
/// Returns the row index of the newly-created transaction in the `transactions` table
/// within the data database. The caller can read the raw transaction bytes from the `raw`
/// column in order to broadcast the transaction to the network.
///
/// Do not call this multiple times in parallel, or you will generate transactions that
/// double-spend the same notes.
///
/// # Examples
///
/// ```
/// use zcash_client_backend::{
/// constants::{testnet::COIN_TYPE, SAPLING_CONSENSUS_BRANCH_ID},
/// keys::spending_key,
/// };
/// use zcash_client_sqlite::transact::create_to_address;
/// use zcash_primitives::transaction::components::Amount;
/// use zcash_proofs::prover::LocalTxProver;
///
/// let tx_prover = match LocalTxProver::with_default_location() {
/// Some(tx_prover) => tx_prover,
/// None => {
/// panic!("Cannot locate the Zcash parameters. Please run zcash-fetch-params or fetch-params.sh to download the parameters, and then re-run the tests.");
/// }
/// };
///
/// let account = 0;
/// let extsk = spending_key(&[0; 32][..], COIN_TYPE, account);
/// let to = extsk.default_address().unwrap().1.into();
/// match create_to_address(
/// "/path/to/data.db",
/// SAPLING_CONSENSUS_BRANCH_ID,
/// tx_prover,
/// (account, &extsk),
/// &to,
/// Amount::from_u64(1).unwrap(),
/// None,
/// ) {
/// Ok(tx_row) => (),
/// Err(e) => (),
/// }
/// ```
pub fn create_to_address<P: AsRef<Path>>(
db_data: P,
consensus_branch_id: u32,
prover: impl TxProver,
(account, extsk): (u32, &ExtendedSpendingKey),
to: &RecipientAddress,
value: Amount,
memo: Option<Memo>,
) -> Result<i64, Error> {
let data = Connection::open(db_data)?;
// Check that the ExtendedSpendingKey we have been given corresponds to the
// ExtendedFullViewingKey for the account we are spending from.
let extfvk = ExtendedFullViewingKey::from(extsk);
if !data
.prepare("SELECT * FROM accounts WHERE account = ? AND extfvk = ?")?
.exists(&[
account.to_sql()?,
encode_extended_full_viewing_key(HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, &extfvk)
.to_sql()?,
])?
{
return Err(Error(ErrorKind::InvalidExtSK(account)));
}
let ovk = extfvk.fvk.ovk;
// Target the next block, assuming we are up-to-date.
let (height, anchor_height) = {
let (target_height, anchor_height) = get_target_and_anchor_heights(&data)?;
(target_height, i64::from(anchor_height))
};
// The goal of this SQL statement is to select the oldest notes until the required
// value has been reached, and then fetch the witnesses at the desired height for the
// selected notes. This is achieved in several steps:
//
// 1) Use a window function to create a view of all notes, ordered from oldest to
// newest, with an additional column containing a running sum:
// - Unspent notes accumulate the values of all unspent notes in that note's
// account, up to itself.
// - Spent notes accumulate the values of all notes in the transaction they were
// spent in, up to itself.
//
// 2) Select all unspent notes in the desired account, along with their running sum.
//
// 3) Select all notes for which the running sum was less than the required value, as
// well as a single note for which the sum was greater than or equal to the
// required value, bringing the sum of all selected notes across the threshold.
//
// 4) Match the selected notes against the witnesses at the desired height.
let target_value = i64::from(value + DEFAULT_FEE);
let mut stmt_select_notes = data.prepare(
"WITH selected AS (
WITH eligible AS (
SELECT id_note, diversifier, value, rcm,
SUM(value) OVER
(PARTITION BY account, spent ORDER BY id_note) AS so_far
FROM received_notes
INNER JOIN transactions ON transactions.id_tx = received_notes.tx
WHERE account = ? AND spent IS NULL AND transactions.block <= ?
)
SELECT * FROM eligible WHERE so_far < ?
UNION
SELECT * FROM (SELECT * FROM eligible WHERE so_far >= ? LIMIT 1)
), witnesses AS (
SELECT note, witness FROM sapling_witnesses
WHERE block = ?
)
SELECT selected.diversifier, selected.value, selected.rcm, witnesses.witness
FROM selected
INNER JOIN witnesses ON selected.id_note = witnesses.note",
)?;
// Select notes
let notes = stmt_select_notes.query_and_then::<_, Error, _, _>(
&[
i64::from(account),
anchor_height,
target_value,
target_value,
anchor_height,
],
|row| {
let diversifier = {
let d: Vec<_> = row.get(0)?;
if d.len() != 11 {
return Err(Error(ErrorKind::CorruptedData(
"Invalid diversifier length",
)));
}
let mut tmp = [0; 11];
tmp.copy_from_slice(&d);
Diversifier(tmp)
};
let note_value: i64 = row.get(1)?;
let rcm = {
let d: Vec<_> = row.get(2)?;
let mut tmp = FsRepr::default();
tmp.read_le(&d[..])?;
Fs::from_repr(tmp).map_err(|_| Error(ErrorKind::InvalidNote))?
};
let from = extfvk
.fvk
.vk
.into_payment_address(diversifier, &JUBJUB)
.unwrap();
let note = from.create_note(note_value as u64, rcm, &JUBJUB).unwrap();
let witness = {
let d: Vec<_> = row.get(3)?;
IncrementalWitness::read(&d[..])?
};
Ok(SelectedNoteRow {
diversifier,
note,
witness,
})
},
)?;
let notes: Vec<SelectedNoteRow> = notes.collect::<Result<_, _>>()?;
// Confirm we were able to select sufficient value
let selected_value = notes
.iter()
.fold(0, |acc, selected| acc + selected.note.value);
if selected_value < target_value as u64 {
return Err(Error(ErrorKind::InsufficientBalance(
selected_value,
target_value as u64,
)));
}
// Create the transaction
let mut builder = Builder::new(height);
for selected in notes {
builder.add_sapling_spend(
extsk.clone(),
selected.diversifier,
selected.note,
selected.witness,
)?;
}
match to {
RecipientAddress::Shielded(to) => {
builder.add_sapling_output(ovk, to.clone(), value, memo.clone())
}
RecipientAddress::Transparent(to) => builder.add_transparent_output(&to, value),
}?;
let (tx, tx_metadata) = builder.build(consensus_branch_id, prover)?;
// We only called add_sapling_output() once.
let output_index = match tx_metadata.output_index(0) {
Some(idx) => idx as i64,
None => panic!("Output 0 should exist in the transaction"),
};
let created = time::get_time();
// Update the database atomically, to ensure the result is internally consistent.
data.execute("BEGIN IMMEDIATE", NO_PARAMS)?;
// Save the transaction in the database.
let mut raw_tx = vec![];
tx.write(&mut raw_tx)?;
let mut stmt_insert_tx = data.prepare(
"INSERT INTO transactions (txid, created, expiry_height, raw)
VALUES (?, ?, ?, ?)",
)?;
stmt_insert_tx.execute(&[
tx.txid().0.to_sql()?,
created.to_sql()?,
tx.expiry_height.to_sql()?,
raw_tx.to_sql()?,
])?;
let id_tx = data.last_insert_rowid();
// Mark notes as spent.
//
// This locks the notes so they aren't selected again by a subsequent call to
// create_to_address() before this transaction has been mined (at which point the notes
// get re-marked as spent).
//
// Assumes that create_to_address() will never be called in parallel, which is a
// reasonable assumption for a light client such as a mobile phone.
let mut stmt_mark_spent_note =
data.prepare("UPDATE received_notes SET spent = ? WHERE nf = ?")?;
for spend in &tx.shielded_spends {
stmt_mark_spent_note.execute(&[id_tx.to_sql()?, spend.nullifier.to_sql()?])?;
}
// Save the sent note in the database.
// TODO: Decide how to save transparent output information.
let to_str = to.to_string();
if let Some(memo) = memo {
let mut stmt_insert_sent_note = data.prepare(
"INSERT INTO sent_notes (tx, output_index, from_account, address, value, memo)
VALUES (?, ?, ?, ?, ?, ?)",
)?;
stmt_insert_sent_note.execute(&[
id_tx.to_sql()?,
output_index.to_sql()?,
account.to_sql()?,
to_str.to_sql()?,
i64::from(value).to_sql()?,
memo.as_bytes().to_sql()?,
])?;
} else {
let mut stmt_insert_sent_note = data.prepare(
"INSERT INTO sent_notes (tx, output_index, from_account, address, value)
VALUES (?, ?, ?, ?, ?)",
)?;
stmt_insert_sent_note.execute(&[
id_tx.to_sql()?,
output_index.to_sql()?,
account.to_sql()?,
to_str.to_sql()?,
i64::from(value).to_sql()?,
])?;
}
data.execute("COMMIT", NO_PARAMS)?;
// Return the row number of the transaction, so the caller can fetch it for sending.
Ok(id_tx)
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use zcash_primitives::{
block::BlockHash,
prover::TxProver,
transaction::components::Amount,
zip32::{ExtendedFullViewingKey, ExtendedSpendingKey},
};
use zcash_proofs::prover::LocalTxProver;
use super::create_to_address;
use crate::{
init::{init_accounts_table, init_blocks_table, init_cache_database, init_data_database},
query::{get_balance, get_verified_balance},
scan::scan_cached_blocks,
tests::{fake_compact_block, insert_into_cache},
SAPLING_ACTIVATION_HEIGHT,
};
fn test_prover() -> impl TxProver {
match LocalTxProver::with_default_location() {
Some(tx_prover) => tx_prover,
None => {
panic!("Cannot locate the Zcash parameters. Please run zcash-fetch-params or fetch-params.sh to download the parameters, and then re-run the tests.");
}
}
}
#[test]
fn create_to_address_fails_on_incorrect_extsk() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add two accounts to the wallet
let extsk0 = ExtendedSpendingKey::master(&[]);
let extsk1 = ExtendedSpendingKey::master(&[0]);
let extfvks = [
ExtendedFullViewingKey::from(&extsk0),
ExtendedFullViewingKey::from(&extsk1),
];
init_accounts_table(&db_data, &extfvks).unwrap();
let to = extsk0.default_address().unwrap().1.into();
// Invalid extsk for the given account should cause an error
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk1),
&to,
Amount::from_u64(1).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(e.to_string(), "Incorrect ExtendedSpendingKey for account 0"),
}
match create_to_address(
db_data,
1,
test_prover(),
(1, &extsk0),
&to,
Amount::from_u64(1).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(e.to_string(), "Incorrect ExtendedSpendingKey for account 1"),
}
}
#[test]
fn create_to_address_fails_with_no_blocks() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvks = [ExtendedFullViewingKey::from(&extsk)];
init_accounts_table(&db_data, &extfvks).unwrap();
let to = extsk.default_address().unwrap().1.into();
// We cannot do anything if we aren't synchronised
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(1).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(e.to_string(), "Must scan blocks first"),
}
}
#[test]
fn create_to_address_fails_on_insufficient_balance() {
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
init_blocks_table(&db_data, 1, BlockHash([1; 32]), 1, &[]).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvks = [ExtendedFullViewingKey::from(&extsk)];
init_accounts_table(&db_data, &extfvks).unwrap();
let to = extsk.default_address().unwrap().1.into();
// Account balance should be zero
assert_eq!(get_balance(db_data, 0).unwrap(), Amount::zero());
// We cannot spend anything
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(1).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
"Insufficient balance (have 0, need 10001 including fee)"
),
}
}
#[test]
fn create_to_address_fails_on_unverified_notes() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
scan_cached_blocks(db_cache, db_data).unwrap();
// Verified balance matches total balance
assert_eq!(get_balance(db_data, 0).unwrap(), value);
assert_eq!(get_verified_balance(db_data, 0).unwrap(), value);
// Add more funds to the wallet in a second note
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 1,
cb.hash(),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
scan_cached_blocks(db_cache, db_data).unwrap();
// Verified balance does not include the second note
assert_eq!(get_balance(db_data, 0).unwrap(), value + value);
assert_eq!(get_verified_balance(db_data, 0).unwrap(), value);
// Spend fails because there are insufficient verified notes
let extsk2 = ExtendedSpendingKey::master(&[]);
let to = extsk2.default_address().unwrap().1.into();
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(70000).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
"Insufficient balance (have 50000, need 80000 including fee)"
),
}
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 2 to 9 until just before the second
// note is verified
for i in 2..10 {
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + i,
cb.hash(),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
}
scan_cached_blocks(db_cache, db_data).unwrap();
// Second spend still fails
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(70000).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
"Insufficient balance (have 50000, need 80000 including fee)"
),
}
// Mine block 11 so that the second note becomes verified
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 10,
cb.hash(),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
scan_cached_blocks(db_cache, db_data).unwrap();
// Second spend should now succeed
create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(70000).unwrap(),
None,
)
.unwrap();
}
#[test]
fn create_to_address_fails_on_locked_notes() {
let cache_file = NamedTempFile::new().unwrap();
let db_cache = cache_file.path();
init_cache_database(&db_cache).unwrap();
let data_file = NamedTempFile::new().unwrap();
let db_data = data_file.path();
init_data_database(&db_data).unwrap();
// Add an account to the wallet
let extsk = ExtendedSpendingKey::master(&[]);
let extfvk = ExtendedFullViewingKey::from(&extsk);
init_accounts_table(&db_data, &[extfvk.clone()]).unwrap();
// Add funds to the wallet in a single note
let value = Amount::from_u64(50000).unwrap();
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT,
BlockHash([0; 32]),
extfvk.clone(),
value,
);
insert_into_cache(db_cache, &cb);
scan_cached_blocks(db_cache, db_data).unwrap();
assert_eq!(get_balance(db_data, 0).unwrap(), value);
// Send some of the funds to another address
let extsk2 = ExtendedSpendingKey::master(&[]);
let to = extsk2.default_address().unwrap().1.into();
create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(15000).unwrap(),
None,
)
.unwrap();
// A second spend fails because there are no usable notes
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(2000).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
"Insufficient balance (have 0, need 12000 including fee)"
),
}
// Mine blocks SAPLING_ACTIVATION_HEIGHT + 1 to 21 (that don't send us funds)
// until just before the first transaction expires
for i in 1..22 {
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + i,
cb.hash(),
ExtendedFullViewingKey::from(&ExtendedSpendingKey::master(&[i as u8])),
value,
);
insert_into_cache(db_cache, &cb);
}
scan_cached_blocks(db_cache, db_data).unwrap();
// Second spend still fails
match create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(2000).unwrap(),
None,
) {
Ok(_) => panic!("Should have failed"),
Err(e) => assert_eq!(
e.to_string(),
"Insufficient balance (have 0, need 12000 including fee)"
),
}
// Mine block SAPLING_ACTIVATION_HEIGHT + 22 so that the first transaction expires
let (cb, _) = fake_compact_block(
SAPLING_ACTIVATION_HEIGHT + 22,
cb.hash(),
ExtendedFullViewingKey::from(&ExtendedSpendingKey::master(&[22])),
value,
);
insert_into_cache(db_cache, &cb);
scan_cached_blocks(db_cache, db_data).unwrap();
// Second spend should now succeed
create_to_address(
db_data,
1,
test_prover(),
(0, &extsk),
&to,
Amount::from_u64(2000).unwrap(),
None,
)
.unwrap();
}
}

View File

@@ -6,18 +6,26 @@ authors = [
]
[dependencies]
aes = "0.2"
aes = "0.3"
blake2b_simd = "0.5"
blake2s_simd = "0.5"
byteorder = "1"
crypto_api_chachapoly = "0.1"
crypto_api_chachapoly = "0.2.1"
ff = { path = "../ff" }
fpe = "0.1"
fpe = "0.2"
hex = "0.3"
lazy_static = "1"
pairing = { path = "../pairing" }
rand = "0.4"
sapling-crypto = { path = "../sapling-crypto" }
rand = "0.7"
rand_core = "0.5"
rand_os = "0.2"
ripemd160 = { version = "0.8", optional = true }
secp256k1 = { version = "=0.15.0", optional = true }
sha2 = "0.8"
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
[dev-dependencies]
hex-literal = "0.1"
rand_xorshift = "0.2"
[features]
transparent-inputs = ["ripemd160", "secp256k1"]

View File

@@ -0,0 +1,25 @@
#![feature(test)]
extern crate pairing;
extern crate rand_core;
extern crate rand_os;
extern crate test;
extern crate zcash_primitives;
use pairing::bls12_381::Bls12;
use rand_core::RngCore;
use rand_os::OsRng;
use zcash_primitives::jubjub::JubjubBls12;
use zcash_primitives::pedersen_hash::{pedersen_hash, Personalization};
#[bench]
fn bench_pedersen_hash(b: &mut test::Bencher) {
let params = JubjubBls12::new();
let rng = &mut OsRng;
let bits = (0..510)
.map(|_| (rng.next_u32() % 2) != 0)
.collect::<Vec<_>>();
let personalization = Personalization::MerkleTree(31);
b.iter(|| pedersen_hash::<Bls12, _>(personalization, bits.clone(), &params));
}

View File

@@ -1,5 +1,6 @@
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use hex;
use sha2::{Digest, Sha256};
use std::fmt;
use std::io::{self, Read, Write};
use std::ops::Deref;
@@ -17,14 +18,31 @@ impl fmt::Display for BlockHash {
}
}
impl BlockHash {
/// Constructs a [`BlockHash`] from the given slice.
///
/// # Panics
///
/// This function will panic if the slice is not exactly 32 bytes.
pub fn from_slice(bytes: &[u8]) -> Self {
assert_eq!(bytes.len(), 32);
let mut hash = [0; 32];
hash.copy_from_slice(&bytes);
BlockHash(hash)
}
}
/// A Zcash block header.
pub struct BlockHeader(BlockHeaderData);
pub struct BlockHeader {
hash: BlockHash,
data: BlockHeaderData,
}
impl Deref for BlockHeader {
type Target = BlockHeaderData;
fn deref(&self) -> &BlockHeaderData {
&self.0
&self.data
}
}
@@ -40,12 +58,31 @@ pub struct BlockHeaderData {
}
impl BlockHeaderData {
pub fn freeze(self) -> BlockHeader {
BlockHeader(self)
pub fn freeze(self) -> io::Result<BlockHeader> {
BlockHeader::from_data(self)
}
}
impl BlockHeader {
fn from_data(data: BlockHeaderData) -> io::Result<Self> {
let mut header = BlockHeader {
hash: BlockHash([0; 32]),
data,
};
let mut raw = vec![];
header.write(&mut raw)?;
header
.hash
.0
.copy_from_slice(&Sha256::digest(&Sha256::digest(&raw)));
Ok(header)
}
/// Returns the hash of this header.
pub fn hash(&self) -> BlockHash {
self.hash
}
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let version = reader.read_i32::<LittleEndian>()?;
@@ -66,7 +103,7 @@ impl BlockHeader {
let solution = Vector::read(&mut reader, |r| r.read_u8())?;
Ok(BlockHeader(BlockHeaderData {
BlockHeader::from_data(BlockHeaderData {
version,
prev_block,
merkle_root,
@@ -75,7 +112,7 @@ impl BlockHeader {
bits,
nonce,
solution,
}))
})
}
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
@@ -202,6 +239,10 @@ mod tests {
#[test]
fn header_read_write() {
let header = BlockHeader::read(&HEADER_MAINNET_415000[..]).unwrap();
assert_eq!(
format!("{}", header.hash()),
"0000000001ab37793ce771262b2ffa082519aa3fe891250a1adb43baaf856168"
);
let mut encoded = Vec::with_capacity(HEADER_MAINNET_415000.len());
header.write(&mut encoded).unwrap();
assert_eq!(&HEADER_MAINNET_415000[..], &encoded[..]);

View File

@@ -2,39 +2,31 @@
/// This is chosen to be some random string that we couldn't have anticipated when we designed
/// the algorithm, for rigidity purposes.
/// We deliberately use an ASCII hex string of 32 bytes here.
pub const GH_FIRST_BLOCK: &'static [u8; 64]
= b"096b36a5804bfacef1691e173c366a47ff5ba84a44f26ddd7e8d9f79d5b42df0";
pub const GH_FIRST_BLOCK: &'static [u8; 64] =
b"096b36a5804bfacef1691e173c366a47ff5ba84a44f26ddd7e8d9f79d5b42df0";
// BLAKE2s invocation personalizations
/// BLAKE2s Personalization for CRH^ivk = BLAKE2s(ak | nk)
pub const CRH_IVK_PERSONALIZATION: &'static [u8; 8]
= b"Zcashivk";
pub const CRH_IVK_PERSONALIZATION: &'static [u8; 8] = b"Zcashivk";
/// BLAKE2s Personalization for PRF^nf = BLAKE2s(nk | rho)
pub const PRF_NF_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_nf";
pub const PRF_NF_PERSONALIZATION: &'static [u8; 8] = b"Zcash_nf";
// Group hash personalizations
/// BLAKE2s Personalization for Pedersen hash generators.
pub const PEDERSEN_HASH_GENERATORS_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_PH";
pub const PEDERSEN_HASH_GENERATORS_PERSONALIZATION: &'static [u8; 8] = b"Zcash_PH";
/// BLAKE2s Personalization for the group hash for key diversification
pub const KEY_DIVERSIFICATION_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_gd";
pub const KEY_DIVERSIFICATION_PERSONALIZATION: &'static [u8; 8] = b"Zcash_gd";
/// BLAKE2s Personalization for the spending key base point
pub const SPENDING_KEY_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_G_";
pub const SPENDING_KEY_GENERATOR_PERSONALIZATION: &'static [u8; 8] = b"Zcash_G_";
/// BLAKE2s Personalization for the proof generation key base point
pub const PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_H_";
pub const PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION: &'static [u8; 8] = b"Zcash_H_";
/// BLAKE2s Personalization for the value commitment generator for the value
pub const VALUE_COMMITMENT_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_cv";
pub const VALUE_COMMITMENT_GENERATOR_PERSONALIZATION: &'static [u8; 8] = b"Zcash_cv";
/// BLAKE2s Personalization for the nullifier position generator (for computing rho)
pub const NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_J_";
pub const NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION: &'static [u8; 8] = b"Zcash_J_";

View File

@@ -1,14 +1,8 @@
use jubjub::{
JubjubEngine,
PrimeOrder,
edwards
};
use jubjub::{edwards, JubjubEngine, PrimeOrder};
use ff::{
PrimeField
};
use ff::PrimeField;
use blake2_rfc::blake2s::Blake2s;
use blake2s_simd::Params;
use constants;
/// Produces a random point in the Jubjub curve.
@@ -17,21 +11,22 @@ use constants;
pub fn group_hash<E: JubjubEngine>(
tag: &[u8],
personalization: &[u8],
params: &E::Params
) -> Option<edwards::Point<E, PrimeOrder>>
{
params: &E::Params,
) -> Option<edwards::Point<E, PrimeOrder>> {
assert_eq!(personalization.len(), 8);
// Check to see that scalar field is 255 bits
assert!(E::Fr::NUM_BITS == 255);
let mut h = Blake2s::with_params(32, &[], &[], personalization);
h.update(constants::GH_FIRST_BLOCK);
h.update(tag);
let h = h.finalize().as_ref().to_vec();
assert!(h.len() == 32);
let h = Params::new()
.hash_length(32)
.personal(personalization)
.to_state()
.update(constants::GH_FIRST_BLOCK)
.update(tag)
.finalize();
match edwards::Point::<E, _>::read(&h[..], params) {
match edwards::Point::<E, _>::read(h.as_ref(), params) {
Ok(p) => {
let p = p.mul_by_cofactor(params);
@@ -40,7 +35,7 @@ pub fn group_hash<E: JubjubEngine>(
} else {
None
}
},
Err(_) => None
}
Err(_) => None,
}
}

View File

@@ -1,24 +1,12 @@
use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr, SqrtField};
use super::{
JubjubEngine,
JubjubParams,
Unknown,
PrimeOrder,
montgomery
};
use super::{montgomery, JubjubEngine, JubjubParams, PrimeOrder, Unknown};
use rand::{
Rng
};
use rand_core::RngCore;
use std::marker::PhantomData;
use std::io::{
self,
Write,
Read
};
use std::io::{self, Read, Write};
// Represents the affine point (X/Z, Y/Z) via the extended
// twisted Edwards coordinates.
@@ -31,46 +19,38 @@ pub struct Point<E: JubjubEngine, Subgroup> {
y: E::Fr,
t: E::Fr,
z: E::Fr,
_marker: PhantomData<Subgroup>
_marker: PhantomData<Subgroup>,
}
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2>
{
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2> {
Point {
x: from.x,
y: from.y,
t: from.t,
z: from.z,
_marker: PhantomData
_marker: PhantomData,
}
}
impl<E: JubjubEngine> From<&Point<E, Unknown>> for Point<E, Unknown>
{
fn from(p: &Point<E, Unknown>) -> Point<E, Unknown>
{
impl<E: JubjubEngine> From<&Point<E, Unknown>> for Point<E, Unknown> {
fn from(p: &Point<E, Unknown>) -> Point<E, Unknown> {
p.clone()
}
}
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown>
{
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown>
{
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown> {
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown> {
convert_subgroup(&p)
}
}
impl<E: JubjubEngine> From<&Point<E, PrimeOrder>> for Point<E, Unknown>
{
fn from(p: &Point<E, PrimeOrder>) -> Point<E, Unknown>
{
impl<E: JubjubEngine> From<&Point<E, PrimeOrder>> for Point<E, Unknown> {
fn from(p: &Point<E, PrimeOrder>) -> Point<E, Unknown> {
convert_subgroup(p)
}
}
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup>
{
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup> {
fn clone(&self) -> Self {
convert_subgroup(self)
}
@@ -101,11 +81,7 @@ impl<E: JubjubEngine, Subgroup> PartialEq for Point<E, Subgroup> {
}
impl<E: JubjubEngine> Point<E, Unknown> {
pub fn read<R: Read>(
reader: R,
params: &E::Params
) -> io::Result<Self>
{
pub fn read<R: Read>(reader: R, params: &E::Params) -> io::Result<Self> {
let mut y_repr = <E::Fr as PrimeField>::Repr::default();
y_repr.read_le(reader)?;
@@ -113,22 +89,18 @@ impl<E: JubjubEngine> Point<E, Unknown> {
y_repr.as_mut()[3] &= 0x7fffffffffffffff;
match E::Fr::from_repr(y_repr) {
Ok(y) => {
match Self::get_for_y(y, x_sign, params) {
Some(p) => Ok(p),
None => {
Err(io::Error::new(io::ErrorKind::InvalidInput, "not on curve"))
}
}
Ok(y) => match Self::get_for_y(y, x_sign, params) {
Some(p) => Ok(p),
None => Err(io::Error::new(io::ErrorKind::InvalidInput, "not on curve")),
},
Err(_) => {
Err(io::Error::new(io::ErrorKind::InvalidInput, "y is not in field"))
}
Err(_) => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"y is not in field",
)),
}
}
pub fn get_for_y(y: E::Fr, sign: bool, params: &E::Params) -> Option<Self>
{
pub fn get_for_y(y: E::Fr, sign: bool, params: &E::Params) -> Option<Self> {
// Given a y on the curve, x^2 = (y^2 - 1) / (dy^2 + 1)
// This is defined for all valid y-coordinates,
// as dy^2 + 1 = 0 has no solution in Fr.
@@ -164,33 +136,30 @@ impl<E: JubjubEngine> Point<E, Unknown> {
y: y,
t: t,
z: E::Fr::one(),
_marker: PhantomData
_marker: PhantomData,
})
},
None => None
}
None => None,
}
},
None => None
}
None => None,
}
}
/// This guarantees the point is in the prime order subgroup
#[must_use]
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder>
{
let tmp = self.double(params)
.double(params)
.double(params);
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder> {
let tmp = self.double(params).double(params).double(params);
convert_subgroup(&tmp)
}
pub fn rand<R: Rng>(rng: &mut R, params: &E::Params) -> Self
{
pub fn rand<R: RngCore>(rng: &mut R, params: &E::Params) -> Self {
loop {
let y: E::Fr = rng.gen();
let y = E::Fr::random(rng);
let sign = rng.next_u32() % 2 != 0;
if let Some(p) = Self::get_for_y(y, rng.gen(), params) {
if let Some(p) = Self::get_for_y(y, sign, params) {
return p;
}
}
@@ -198,11 +167,7 @@ impl<E: JubjubEngine> Point<E, Unknown> {
}
impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
pub fn write<W: Write>(
&self,
writer: W
) -> io::Result<()>
{
pub fn write<W: Write>(&self, writer: W) -> io::Result<()> {
let (x, y) = self.into_xy();
assert_eq!(E::Fr::NUM_BITS, 255);
@@ -217,16 +182,12 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
}
/// Convert from a Montgomery point
pub fn from_montgomery(
m: &montgomery::Point<E, Subgroup>,
params: &E::Params
) -> Self
{
pub fn from_montgomery(m: &montgomery::Point<E, Subgroup>, params: &E::Params) -> Self {
match m.into_xy() {
None => {
// Map the point at infinity to the neutral element.
Point::zero()
},
}
Some((x, y)) => {
// The map from a Montgomery curve is defined as:
// (x, y) -> (u, v) where
@@ -259,7 +220,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
y: neg1,
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData
_marker: PhantomData,
}
} else {
// Otherwise, as stated above, the mapping is still
@@ -318,7 +279,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
y: v,
t: t,
z: z,
_marker: PhantomData
_marker: PhantomData,
}
}
}
@@ -341,12 +302,11 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
y: E::Fr::one(),
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData
_marker: PhantomData,
}
}
pub fn into_xy(&self) -> (E::Fr, E::Fr)
{
pub fn into_xy(&self) -> (E::Fr, E::Fr) {
let zinv = self.z.inverse().unwrap();
let mut x = self.x;
@@ -433,13 +393,12 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
y: y3,
t: t3,
z: z3,
_marker: PhantomData
_marker: PhantomData,
}
}
#[must_use]
pub fn add(&self, other: &Self, params: &E::Params) -> Self
{
pub fn add(&self, other: &Self, params: &E::Params) -> Self {
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
// 3.1 Unified Addition in E^e
@@ -506,17 +465,12 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
y: y3,
t: t3,
z: z3,
_marker: PhantomData
_marker: PhantomData,
}
}
#[must_use]
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(
&self,
scalar: S,
params: &E::Params
) -> Self
{
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(&self, scalar: S, params: &E::Params) -> Self {
// Standard double-and-add scalar multiplication
let mut res = Self::zero();

View File

@@ -24,10 +24,7 @@ use group_hash::group_hash;
use constants;
use pairing::bls12_381::{
Bls12,
Fr
};
use pairing::bls12_381::{Bls12, Fr};
/// This is an implementation of the twisted Edwards Jubjub curve.
pub mod edwards;
@@ -44,11 +41,11 @@ pub mod tests;
/// Point of unknown order.
#[derive(Debug)]
pub enum Unknown { }
pub enum Unknown {}
/// Point of prime order.
#[derive(Debug)]
pub enum PrimeOrder { }
pub enum PrimeOrder {}
/// Fixed generators of the Jubjub curve of unknown
/// exponent.
@@ -80,7 +77,7 @@ pub enum FixedGenerators {
/// base at spend time.
SpendingKeyGenerator = 5,
Max = 6
Max = 6,
}
pub trait ToUniform {
@@ -151,10 +148,18 @@ pub struct JubjubBls12 {
}
impl JubjubParams<Bls12> for JubjubBls12 {
fn edwards_d(&self) -> &Fr { &self.edwards_d }
fn montgomery_a(&self) -> &Fr { &self.montgomery_a }
fn montgomery_2a(&self) -> &Fr { &self.montgomery_2a }
fn scale(&self) -> &Fr { &self.scale }
fn edwards_d(&self) -> &Fr {
&self.edwards_d
}
fn montgomery_a(&self) -> &Fr {
&self.montgomery_a
}
fn montgomery_2a(&self) -> &Fr {
&self.montgomery_2a
}
fn scale(&self) -> &Fr {
&self.scale
}
fn pedersen_hash_generators(&self) -> &[edwards::Point<Bls12, PrimeOrder>] {
&self.pedersen_hash_generators
}
@@ -170,12 +175,10 @@ impl JubjubParams<Bls12> for JubjubBls12 {
fn pedersen_circuit_generators(&self) -> &[Vec<Vec<(Fr, Fr)>>] {
&self.pedersen_circuit_generators
}
fn generator(&self, base: FixedGenerators) -> &edwards::Point<Bls12, PrimeOrder>
{
fn generator(&self, base: FixedGenerators) -> &edwards::Point<Bls12, PrimeOrder> {
&self.fixed_base_generators[base as usize]
}
fn circuit_generators(&self, base: FixedGenerators) -> &[Vec<(Fr, Fr)>]
{
fn circuit_generators(&self, base: FixedGenerators) -> &[Vec<(Fr, Fr)>] {
&self.fixed_base_circuit_generators[base as usize][..]
}
fn pedersen_hash_exp_window_size() -> u32 {
@@ -191,13 +194,19 @@ impl JubjubBls12 {
let mut tmp_params = JubjubBls12 {
// d = -(10240/10241)
edwards_d: Fr::from_str("19257038036680949359750312669786877991949435402254120286184196891950884077233").unwrap(),
edwards_d: Fr::from_str(
"19257038036680949359750312669786877991949435402254120286184196891950884077233",
)
.unwrap(),
// A = 40962
montgomery_a: montgomery_a,
// 2A = 2.A
montgomery_2a: montgomery_2a,
// scaling factor = sqrt(4 / (a - d))
scale: Fr::from_str("17814886934372412843466061268024708274627479829237077604635722030778476050649").unwrap(),
scale: Fr::from_str(
"17814886934372412843466061268024708274627479829237077604635722030778476050649",
)
.unwrap(),
// We'll initialize these below
pedersen_hash_generators: vec![],
@@ -210,19 +219,14 @@ impl JubjubBls12 {
fn find_group_hash<E: JubjubEngine>(
m: &[u8],
personalization: &[u8; 8],
params: &E::Params
) -> edwards::Point<E, PrimeOrder>
{
params: &E::Params,
) -> edwards::Point<E, PrimeOrder> {
let mut tag = m.to_vec();
let i = tag.len();
tag.push(0u8);
loop {
let gh = group_hash(
&tag,
personalization,
params
);
let gh = group_hash(&tag, personalization, params);
// We don't want to overflow and start reusing generators
assert!(tag[i] != u8::max_value());
@@ -239,18 +243,18 @@ impl JubjubBls12 {
let mut pedersen_hash_generators = vec![];
for m in 0..5 {
use byteorder::{WriteBytesExt, LittleEndian};
use byteorder::{LittleEndian, WriteBytesExt};
let mut segment_number = [0u8; 4];
(&mut segment_number[0..4]).write_u32::<LittleEndian>(m).unwrap();
(&mut segment_number[0..4])
.write_u32::<LittleEndian>(m)
.unwrap();
pedersen_hash_generators.push(
find_group_hash(
&segment_number,
constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION,
&tmp_params
)
);
pedersen_hash_generators.push(find_group_hash(
&segment_number,
constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION,
&tmp_params,
));
}
// Check for duplicates, far worse than spec inconsistencies!
@@ -259,7 +263,7 @@ impl JubjubBls12 {
panic!("Neutral element!");
}
for p2 in pedersen_hash_generators.iter().skip(i+1) {
for p2 in pedersen_hash_generators.iter().skip(i + 1) {
if p1 == p2 {
panic!("Duplicate generator!");
}
@@ -307,25 +311,46 @@ impl JubjubBls12 {
// Create the bases for other parts of the protocol
{
let mut fixed_base_generators = vec![edwards::Point::zero(); FixedGenerators::Max as usize];
let mut fixed_base_generators =
vec![edwards::Point::zero(); FixedGenerators::Max as usize];
fixed_base_generators[FixedGenerators::ProofGenerationKey as usize] =
find_group_hash(&[], constants::PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::ProofGenerationKey as usize] = find_group_hash(
&[],
constants::PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION,
&tmp_params,
);
fixed_base_generators[FixedGenerators::NoteCommitmentRandomness as usize] =
find_group_hash(b"r", constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION, &tmp_params);
find_group_hash(
b"r",
constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION,
&tmp_params,
);
fixed_base_generators[FixedGenerators::NullifierPosition as usize] =
find_group_hash(&[], constants::NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::NullifierPosition as usize] = find_group_hash(
&[],
constants::NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION,
&tmp_params,
);
fixed_base_generators[FixedGenerators::ValueCommitmentValue as usize] =
find_group_hash(b"v", constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::ValueCommitmentValue as usize] = find_group_hash(
b"v",
constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION,
&tmp_params,
);
fixed_base_generators[FixedGenerators::ValueCommitmentRandomness as usize] =
find_group_hash(b"r", constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION, &tmp_params);
find_group_hash(
b"r",
constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION,
&tmp_params,
);
fixed_base_generators[FixedGenerators::SpendingKeyGenerator as usize] =
find_group_hash(&[], constants::SPENDING_KEY_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::SpendingKeyGenerator as usize] = find_group_hash(
&[],
constants::SPENDING_KEY_GENERATOR_PERSONALIZATION,
&tmp_params,
);
// Check for duplicates, far worse than spec inconsistencies!
for (i, p1) in fixed_base_generators.iter().enumerate() {
@@ -333,7 +358,7 @@ impl JubjubBls12 {
panic!("Neutral element!");
}
for p2 in fixed_base_generators.iter().skip(i+1) {
for p2 in fixed_base_generators.iter().skip(i + 1) {
if p1 == p2 {
panic!("Duplicate generator!");
}
@@ -413,10 +438,14 @@ fn test_jubjub_bls12() {
let test_repr = hex!("9d12b88b08dcbef8a11ee0712d94cb236ee2f4ca17317075bfafc82ce3139d31");
let p = edwards::Point::<Bls12, _>::read(&test_repr[..], &params).unwrap();
let q = edwards::Point::<Bls12, _>::get_for_y(
Fr::from_str("22440861827555040311190986994816762244378363690614952020532787748720529117853").unwrap(),
Fr::from_str(
"22440861827555040311190986994816762244378363690614952020532787748720529117853",
)
.unwrap(),
false,
&params
).unwrap();
&params,
)
.unwrap();
assert!(p == q);
@@ -424,10 +453,14 @@ fn test_jubjub_bls12() {
let test_repr = hex!("9d12b88b08dcbef8a11ee0712d94cb236ee2f4ca17317075bfafc82ce3139db1");
let p = edwards::Point::<Bls12, _>::read(&test_repr[..], &params).unwrap();
let q = edwards::Point::<Bls12, _>::get_for_y(
Fr::from_str("22440861827555040311190986994816762244378363690614952020532787748720529117853").unwrap(),
Fr::from_str(
"22440861827555040311190986994816762244378363690614952020532787748720529117853",
)
.unwrap(),
true,
&params
).unwrap();
&params,
)
.unwrap();
assert!(p == q);
}

View File

@@ -1,16 +1,8 @@
use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr, SqrtField};
use super::{
JubjubEngine,
JubjubParams,
Unknown,
PrimeOrder,
edwards
};
use super::{edwards, JubjubEngine, JubjubParams, PrimeOrder, Unknown};
use rand::{
Rng
};
use rand_core::RngCore;
use std::marker::PhantomData;
@@ -19,29 +11,25 @@ pub struct Point<E: JubjubEngine, Subgroup> {
x: E::Fr,
y: E::Fr,
infinity: bool,
_marker: PhantomData<Subgroup>
_marker: PhantomData<Subgroup>,
}
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2>
{
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2> {
Point {
x: from.x,
y: from.y,
infinity: from.infinity,
_marker: PhantomData
_marker: PhantomData,
}
}
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown>
{
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown>
{
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown> {
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown> {
convert_subgroup(&p)
}
}
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup>
{
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup> {
fn clone(&self) -> Self {
convert_subgroup(self)
}
@@ -52,16 +40,13 @@ impl<E: JubjubEngine, Subgroup> PartialEq for Point<E, Subgroup> {
match (self.infinity, other.infinity) {
(true, true) => true,
(true, false) | (false, true) => false,
(false, false) => {
self.x == other.x && self.y == other.y
}
(false, false) => self.x == other.x && self.y == other.y,
}
}
}
impl<E: JubjubEngine> Point<E, Unknown> {
pub fn get_for_x(x: E::Fr, sign: bool, params: &E::Params) -> Option<Self>
{
pub fn get_for_x(x: E::Fr, sign: bool, params: &E::Params) -> Option<Self> {
// Given an x on the curve, y = sqrt(x^3 + A*x^2 + x)
let mut x2 = x;
@@ -83,33 +68,28 @@ impl<E: JubjubEngine> Point<E, Unknown> {
x: x,
y: y,
infinity: false,
_marker: PhantomData
})
},
None => None
_marker: PhantomData,
});
}
None => None,
}
}
/// This guarantees the point is in the prime order subgroup
#[must_use]
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder>
{
let tmp = self.double(params)
.double(params)
.double(params);
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder> {
let tmp = self.double(params).double(params).double(params);
convert_subgroup(&tmp)
}
pub fn rand<R: Rng>(rng: &mut R, params: &E::Params) -> Self
{
pub fn rand<R: RngCore>(rng: &mut R, params: &E::Params) -> Self {
loop {
let x: E::Fr = rng.gen();
let x = E::Fr::random(rng);
let sign = rng.next_u32() % 2 != 0;
match Self::get_for_x(x, rng.gen(), params) {
Some(p) => {
return p
},
match Self::get_for_x(x, sign, params) {
Some(p) => return p,
None => {}
}
}
@@ -118,11 +98,7 @@ impl<E: JubjubEngine> Point<E, Unknown> {
impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
/// Convert from an Edwards point
pub fn from_edwards(
e: &edwards::Point<E, Subgroup>,
params: &E::Params
) -> Self
{
pub fn from_edwards(e: &edwards::Point<E, Subgroup>, params: &E::Params) -> Self {
let (x, y) = e.into_xy();
if y == E::Fr::one() {
@@ -150,7 +126,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
x: E::Fr::zero(),
y: E::Fr::zero(),
infinity: false,
_marker: PhantomData
_marker: PhantomData,
}
} else {
// The mapping is defined as above.
@@ -177,7 +153,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
x: u,
y: v,
infinity: false,
_marker: PhantomData
_marker: PhantomData,
}
}
}
@@ -198,12 +174,11 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
x: E::Fr::zero(),
y: E::Fr::zero(),
infinity: true,
_marker: PhantomData
_marker: PhantomData,
}
}
pub fn into_xy(&self) -> Option<(E::Fr, E::Fr)>
{
pub fn into_xy(&self) -> Option<(E::Fr, E::Fr)> {
if self.infinity {
None
} else {
@@ -273,13 +248,12 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
x: x3,
y: y3,
infinity: false,
_marker: PhantomData
_marker: PhantomData,
}
}
#[must_use]
pub fn add(&self, other: &Self, params: &E::Params) -> Self
{
pub fn add(&self, other: &Self, params: &E::Params) -> Self {
// This is a standard affine point addition formula
// See 4.3.2 The group law for Weierstrass curves
// Montgomery curves and the Montgomery Ladder
@@ -302,7 +276,10 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
{
let mut tmp = other.x;
tmp.sub_assign(&self.x);
delta.mul_assign(&tmp.inverse().expect("self.x != other.x, so this must be nonzero"));
delta.mul_assign(
&tmp.inverse()
.expect("self.x != other.x, so this must be nonzero"),
);
}
let mut x3 = delta;
@@ -321,7 +298,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
x: x3,
y: y3,
infinity: false,
_marker: PhantomData
_marker: PhantomData,
}
}
}
@@ -329,12 +306,7 @@ impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
}
#[must_use]
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(
&self,
scalar: S,
params: &E::Params
) -> Self
{
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(&self, scalar: S, params: &E::Params) -> Self {
// Standard double-and-add scalar multiplication
let mut res = Self::zero();

View File

@@ -1,20 +1,9 @@
use super::{
JubjubEngine,
JubjubParams,
PrimeOrder,
montgomery,
edwards
};
use super::{edwards, montgomery, JubjubEngine, JubjubParams, PrimeOrder};
use ff::{
Field,
PrimeField,
PrimeFieldRepr,
SqrtField,
LegendreSymbol
};
use ff::{Field, LegendreSymbol, PrimeField, PrimeFieldRepr, SqrtField};
use rand::{XorShiftRng, SeedableRng, Rand};
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
pub fn test_suite<E: JubjubEngine>(params: &E::Params) {
test_back_and_forth::<E>(params);
@@ -29,12 +18,7 @@ pub fn test_suite<E: JubjubEngine>(params: &E::Params) {
test_read_write::<E>(params);
}
fn is_on_mont_curve<E: JubjubEngine, P: JubjubParams<E>>(
x: E::Fr,
y: E::Fr,
params: &P
) -> bool
{
fn is_on_mont_curve<E: JubjubEngine, P: JubjubParams<E>>(x: E::Fr, y: E::Fr, params: &P) -> bool {
let mut lhs = y;
lhs.square();
@@ -55,9 +39,8 @@ fn is_on_mont_curve<E: JubjubEngine, P: JubjubParams<E>>(
fn is_on_twisted_edwards_curve<E: JubjubEngine, P: JubjubParams<E>>(
x: E::Fr,
y: E::Fr,
params: &P
) -> bool
{
params: &P,
) -> bool {
let mut x2 = x;
x2.square();
@@ -78,7 +61,10 @@ fn is_on_twisted_edwards_curve<E: JubjubEngine, P: JubjubParams<E>>(
}
fn test_loworder<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let inf = montgomery::Point::zero();
// try to find a point of order 8
@@ -109,15 +95,18 @@ fn test_loworder<E: JubjubEngine>(params: &E::Params) {
fn test_mul_associativity<E: JubjubEngine>(params: &E::Params) {
use self::edwards::Point;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..100 {
// Pick a random point and multiply it by the cofactor
let base = Point::<E, _>::rand(rng, params).mul_by_cofactor(params);
let mut a = E::Fs::rand(rng);
let b = E::Fs::rand(rng);
let c = E::Fs::rand(rng);
let mut a = E::Fs::random(rng);
let b = E::Fs::random(rng);
let c = E::Fs::random(rng);
let res1 = base.mul(a, params).mul(b, params).mul(c, params);
let res2 = base.mul(b, params).mul(c, params).mul(a, params);
@@ -143,10 +132,15 @@ fn test_mul_associativity<E: JubjubEngine>(params: &E::Params) {
fn test_order<E: JubjubEngine>(params: &E::Params) {
use self::edwards::Point;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
// The neutral element is in the prime order subgroup.
assert!(Point::<E, PrimeOrder>::zero().as_prime_order(params).is_some());
assert!(Point::<E, PrimeOrder>::zero()
.as_prime_order(params)
.is_some());
for _ in 0..50 {
// Pick a random point and multiply it by the cofactor
@@ -170,7 +164,10 @@ fn test_order<E: JubjubEngine>(params: &E::Params) {
}
fn test_addition_associativity<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
use self::montgomery::Point;
@@ -194,7 +191,10 @@ fn test_addition_associativity<E: JubjubEngine>(params: &E::Params) {
}
fn test_identities<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
{
use self::edwards::Point;
@@ -228,26 +228,28 @@ fn test_identities<E: JubjubEngine>(params: &E::Params) {
}
fn test_get_for<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let y = E::Fr::rand(rng);
let sign = bool::rand(rng);
let y = E::Fr::random(rng);
let sign = rng.next_u32() % 2 == 1;
if let Some(mut p) = edwards::Point::<E, _>::get_for_y(y, sign, params) {
assert!(p.into_xy().0.into_repr().is_odd() == sign);
p = p.negate();
assert!(
edwards::Point::<E, _>::get_for_y(y, !sign, params).unwrap()
==
p
);
assert!(edwards::Point::<E, _>::get_for_y(y, !sign, params).unwrap() == p);
}
}
}
fn test_read_write<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let e = edwards::Point::<E, _>::rand(rng, params);
@@ -262,7 +264,10 @@ fn test_read_write<E: JubjubEngine>(params: &E::Params) {
}
fn test_rand<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let p = montgomery::Point::<E, _>::rand(rng, params);
@@ -281,10 +286,13 @@ fn test_rand<E: JubjubEngine>(params: &E::Params) {
}
fn test_back_and_forth<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let rng = &mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x5d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..1000 {
let s = E::Fs::rand(rng);
let s = E::Fs::random(rng);
let edwards_p1 = edwards::Point::<E, _>::rand(rng, params);
let mont_p1 = montgomery::Point::from_edwards(&edwards_p1, params);
let mont_p2 = montgomery::Point::<E, _>::rand(rng, params);
@@ -293,13 +301,9 @@ fn test_back_and_forth<E: JubjubEngine>(params: &E::Params) {
let mont = mont_p1.add(&mont_p2, params).mul(s, params);
let edwards = edwards_p1.add(&edwards_p2, params).mul(s, params);
assert!(
montgomery::Point::from_edwards(&edwards, params) == mont
);
assert!(montgomery::Point::from_edwards(&edwards, params) == mont);
assert!(
edwards::Point::from_montgomery(&mont, params) == edwards
);
assert!(edwards::Point::from_montgomery(&mont, params) == edwards);
}
}
@@ -383,8 +387,7 @@ fn test_jubjub_params<E: JubjubEngine>(params: &E::Params) {
let mut pacc = E::Fs::zero().into_repr();
let mut nacc = E::Fs::char();
for _ in 0..params.pedersen_hash_chunks_per_generator()
{
for _ in 0..params.pedersen_hash_chunks_per_generator() {
// tmp = cur * 4
let mut tmp = cur;
tmp.mul2();

View File

@@ -2,23 +2,26 @@
//!
//! Implements section 4.2.2 of the Zcash Protocol Specification.
use blake2_rfc::blake2b::{Blake2b, Blake2bResult};
use ff::{PrimeField, PrimeFieldRepr};
use sapling_crypto::{
use crate::{
jubjub::{edwards, FixedGenerators, JubjubEngine, JubjubParams, ToUniform, Unknown},
primitives::{ProofGenerationKey, ViewingKey},
};
use blake2b_simd::{Hash as Blake2bHash, Params as Blake2bParams};
use ff::{PrimeField, PrimeFieldRepr};
use std::io::{self, Read, Write};
pub const PRF_EXPAND_PERSONALIZATION: &'static [u8; 16] = b"Zcash_ExpandSeed";
/// PRF^expand(sk, t) := BLAKE2b-512("Zcash_ExpandSeed", sk || t)
pub fn prf_expand(sk: &[u8], t: &[u8]) -> Blake2bResult {
prf_expand_vec(sk, &[t])
pub fn prf_expand(sk: &[u8], t: &[u8]) -> Blake2bHash {
prf_expand_vec(sk, &vec![t])
}
pub fn prf_expand_vec(sk: &[u8], ts: &[&[u8]]) -> Blake2bResult {
let mut h = Blake2b::with_params(64, &[], &[], PRF_EXPAND_PERSONALIZATION);
pub fn prf_expand_vec(sk: &[u8], ts: &[&[u8]]) -> Blake2bHash {
let mut h = Blake2bParams::new()
.hash_length(64)
.personal(PRF_EXPAND_PERSONALIZATION)
.to_state();
h.update(sk);
for t in ts {
h.update(t);
@@ -184,8 +187,8 @@ impl<E: JubjubEngine> FullViewingKey<E> {
#[cfg(test)]
mod tests {
use crate::jubjub::{edwards, FixedGenerators, JubjubParams, PrimeOrder};
use pairing::bls12_381::Bls12;
use sapling_crypto::jubjub::{edwards, FixedGenerators, JubjubParams, PrimeOrder};
use std::error::Error;
use super::FullViewingKey;

View File

@@ -0,0 +1,195 @@
//! Support for legacy transparent addresses and scripts.
use byteorder::{ReadBytesExt, WriteBytesExt};
use std::io::{self, Read, Write};
use std::ops::Shl;
use crate::serialize::Vector;
/// Minimal subset of script opcodes.
enum OpCode {
// push value
PushData1 = 0x4c,
PushData2 = 0x4d,
PushData4 = 0x4e,
// stack ops
Dup = 0x76,
// bit logic
Equal = 0x87,
EqualVerify = 0x88,
// crypto
Hash160 = 0xa9,
CheckSig = 0xac,
}
/// A serialized script, used inside transparent inputs and outputs of a transaction.
#[derive(Debug, Default)]
pub struct Script(pub Vec<u8>);
impl Script {
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let script = Vector::read(&mut reader, |r| r.read_u8())?;
Ok(Script(script))
}
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
Vector::write(&mut writer, &self.0, |w, e| w.write_u8(*e))
}
/// Returns the address that this Script contains, if any.
pub fn address(&self) -> Option<TransparentAddress> {
if self.0.len() == 25
&& self.0[0] == OpCode::Dup as u8
&& self.0[1] == OpCode::Hash160 as u8
&& self.0[2] == 0x14
&& self.0[23] == OpCode::EqualVerify as u8
&& self.0[24] == OpCode::CheckSig as u8
{
let mut hash = [0; 20];
hash.copy_from_slice(&self.0[3..23]);
Some(TransparentAddress::PublicKey(hash))
} else if self.0.len() == 23
&& self.0[0] == OpCode::Hash160 as u8
&& self.0[1] == 0x14
&& self.0[22] == OpCode::Equal as u8
{
let mut hash = [0; 20];
hash.copy_from_slice(&self.0[2..22]);
Some(TransparentAddress::Script(hash))
} else {
None
}
}
}
impl Shl<OpCode> for Script {
type Output = Self;
fn shl(mut self, rhs: OpCode) -> Self {
self.0.push(rhs as u8);
self
}
}
impl Shl<&[u8]> for Script {
type Output = Self;
fn shl(mut self, data: &[u8]) -> Self {
if data.len() < OpCode::PushData1 as usize {
self.0.push(data.len() as u8);
} else if data.len() <= 0xff {
self.0.push(OpCode::PushData1 as u8);
self.0.push(data.len() as u8);
} else if data.len() <= 0xffff {
self.0.push(OpCode::PushData2 as u8);
self.0.extend(&(data.len() as u16).to_le_bytes());
} else {
self.0.push(OpCode::PushData4 as u8);
self.0.extend(&(data.len() as u32).to_le_bytes());
}
self.0.extend(data);
self
}
}
/// A transparent address corresponding to either a public key or a `Script`.
#[derive(Debug, PartialEq)]
pub enum TransparentAddress {
PublicKey([u8; 20]),
Script([u8; 20]),
}
impl TransparentAddress {
/// Generate the `scriptPubKey` corresponding to this address.
pub fn script(&self) -> Script {
match self {
TransparentAddress::PublicKey(key_id) => {
// P2PKH script
Script::default()
<< OpCode::Dup
<< OpCode::Hash160
<< &key_id[..]
<< OpCode::EqualVerify
<< OpCode::CheckSig
}
TransparentAddress::Script(script_id) => {
// P2SH script
Script::default() << OpCode::Hash160 << &script_id[..] << OpCode::Equal
}
}
}
}
#[cfg(test)]
mod tests {
use super::{OpCode, Script, TransparentAddress};
#[test]
fn script_opcode() {
{
let script = Script::default() << OpCode::PushData1;
assert_eq!(&script.0, &[OpCode::PushData1 as u8]);
}
}
#[test]
fn script_pushdata() {
{
let script = Script::default() << &[1, 2, 3, 4][..];
assert_eq!(&script.0, &[4, 1, 2, 3, 4]);
}
{
let short_data = vec![2; 100];
let script = Script::default() << &short_data[..];
assert_eq!(script.0[0], OpCode::PushData1 as u8);
assert_eq!(script.0[1] as usize, 100);
assert_eq!(&script.0[2..], &short_data[..]);
}
{
let medium_data = vec![7; 1024];
let script = Script::default() << &medium_data[..];
assert_eq!(script.0[0], OpCode::PushData2 as u8);
assert_eq!(&script.0[1..3], &[0x00, 0x04][..]);
assert_eq!(&script.0[3..], &medium_data[..]);
}
{
let long_data = vec![42; 1_000_000];
let script = Script::default() << &long_data[..];
assert_eq!(script.0[0], OpCode::PushData4 as u8);
assert_eq!(&script.0[1..5], &[0x40, 0x42, 0x0f, 0x00][..]);
assert_eq!(&script.0[5..], &long_data[..]);
}
}
#[test]
fn p2pkh() {
let addr = TransparentAddress::PublicKey([4; 20]);
assert_eq!(
&addr.script().0,
&[
0x76, 0xa9, 0x14, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x88, 0xac,
]
);
assert_eq!(addr.script().address(), Some(addr));
}
#[test]
fn p2sh() {
let addr = TransparentAddress::Script([7; 20]);
assert_eq!(
&addr.script().0,
&[
0xa9, 0x14, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x87,
]
);
assert_eq!(addr.script().address(), Some(addr));
}
}

View File

@@ -2,7 +2,8 @@
extern crate lazy_static;
extern crate aes;
extern crate blake2_rfc;
extern crate blake2b_simd;
extern crate blake2s_simd;
extern crate byteorder;
extern crate crypto_api_chachapoly;
extern crate ff;
@@ -10,22 +11,46 @@ extern crate fpe;
extern crate hex;
extern crate pairing;
extern crate rand;
extern crate sapling_crypto;
extern crate rand_core;
extern crate rand_os;
extern crate sha2;
use sapling_crypto::jubjub::JubjubBls12;
#[cfg(feature = "transparent-inputs")]
extern crate ripemd160;
#[cfg(feature = "transparent-inputs")]
extern crate secp256k1;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate rand_xorshift;
pub mod block;
pub mod constants;
pub mod group_hash;
pub mod jubjub;
pub mod keys;
pub mod legacy;
pub mod merkle_tree;
pub mod note_encryption;
pub mod pedersen_hash;
pub mod primitives;
pub mod prover;
pub mod redjubjub;
pub mod sapling;
mod serialize;
pub mod serialize;
pub mod transaction;
mod util;
pub mod zip32;
#[cfg(test)]
mod test_vectors;
use jubjub::JubjubBls12;
lazy_static! {
pub static ref JUBJUB: JubjubBls12 = { JubjubBls12::new() };
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,6 @@
//! Implementation of in-band secret distribution for Zcash transactions.
use blake2_rfc::blake2b::{Blake2b, Blake2bResult};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crypto_api_chachapoly::{ChaCha20Ietf, ChachaPolyIetf};
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr};
use rand::{OsRng, Rng};
use sapling_crypto::{
use crate::{
jubjub::{
edwards,
fs::{Fs, FsRepr},
@@ -14,6 +8,12 @@ use sapling_crypto::{
},
primitives::{Diversifier, Note, PaymentAddress},
};
use blake2b_simd::{Hash as Blake2bHash, Params as Blake2bParams};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crypto_api_chachapoly::{ChaCha20Ietf, ChachaPolyIetf};
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr};
use rand_core::{CryptoRng, RngCore};
use std::fmt;
use std::str;
@@ -134,13 +134,10 @@ impl Memo {
}
}
fn generate_esk() -> Fs {
pub fn generate_esk<R: RngCore + CryptoRng>(rng: &mut R) -> Fs {
// create random 64 byte buffer
let mut rng = OsRng::new().expect("should be able to construct RNG");
let mut buffer = [0u8; 64];
for i in 0..buffer.len() {
buffer[i] = rng.gen();
}
rng.fill_bytes(&mut buffer);
// reduce to uniform value
Fs::to_uniform(&buffer[..])
@@ -168,14 +165,15 @@ where
fn kdf_sapling(
dhsecret: edwards::Point<Bls12, PrimeOrder>,
epk: &edwards::Point<Bls12, PrimeOrder>,
) -> Blake2bResult {
) -> Blake2bHash {
let mut input = [0u8; 64];
dhsecret.write(&mut input[0..32]).unwrap();
epk.write(&mut input[32..64]).unwrap();
let mut h = Blake2b::with_params(32, &[], &[], KDF_SAPLING_PERSONALIZATION);
h.update(&input);
h.finalize()
Blake2bParams::new()
.hash_length(32)
.personal(KDF_SAPLING_PERSONALIZATION)
.hash(&input)
}
/// Sapling PRF^ock.
@@ -186,16 +184,17 @@ fn prf_ock(
cv: &edwards::Point<Bls12, Unknown>,
cmu: &Fr,
epk: &edwards::Point<Bls12, PrimeOrder>,
) -> Blake2bResult {
) -> Blake2bHash {
let mut ock_input = [0u8; 128];
ock_input[0..32].copy_from_slice(&ovk.0);
cv.write(&mut ock_input[32..64]).unwrap();
cmu.into_repr().write_le(&mut ock_input[64..96]).unwrap();
epk.write(&mut ock_input[96..128]).unwrap();
let mut h = Blake2b::with_params(32, &[], &[], PRF_OCK_PERSONALIZATION);
h.update(&ock_input);
h.finalize()
Blake2bParams::new()
.hash_length(32)
.personal(PRF_OCK_PERSONALIZATION)
.hash(&ock_input)
}
/// An API for encrypting Sapling notes.
@@ -209,23 +208,23 @@ fn prf_ock(
/// # Examples
///
/// ```
/// extern crate ff;
/// extern crate pairing;
/// extern crate rand;
/// extern crate sapling_crypto;
/// extern crate rand_os;
/// extern crate zcash_primitives;
///
/// use ff::Field;
/// use pairing::bls12_381::Bls12;
/// use rand::{OsRng, Rand};
/// use sapling_crypto::{
/// jubjub::fs::Fs,
/// primitives::{Diversifier, PaymentAddress, ValueCommitment},
/// };
/// use rand_os::OsRng;
/// use zcash_primitives::{
/// jubjub::fs::Fs,
/// keys::OutgoingViewingKey,
/// note_encryption::{Memo, SaplingNoteEncryption},
/// primitives::{Diversifier, PaymentAddress, ValueCommitment},
/// JUBJUB,
/// };
///
/// let mut rng = OsRng::new().unwrap();
/// let mut rng = OsRng;
///
/// let diversifier = Diversifier([0; 11]);
/// let pk_d = diversifier.g_d::<Bls12>(&JUBJUB).unwrap();
@@ -236,7 +235,7 @@ fn prf_ock(
/// let ovk = OutgoingViewingKey([0; 32]);
///
/// let value = 1000;
/// let rcv = Fs::rand(&mut rng);
/// let rcv = Fs::random(&mut rng);
/// let cv = ValueCommitment::<Bls12> {
/// value,
/// randomness: rcv.clone(),
@@ -244,7 +243,7 @@ fn prf_ock(
/// let note = to.create_note(value, rcv, &JUBJUB).unwrap();
/// let cmu = note.cm(&JUBJUB);
///
/// let enc = SaplingNoteEncryption::new(ovk, note, to, Memo::default());
/// let enc = SaplingNoteEncryption::new(ovk, note, to, Memo::default(), &mut rng);
/// let encCiphertext = enc.encrypt_note_plaintext();
/// let outCiphertext = enc.encrypt_outgoing_plaintext(&cv.cm(&JUBJUB).into(), &cmu);
/// ```
@@ -259,13 +258,14 @@ pub struct SaplingNoteEncryption {
impl SaplingNoteEncryption {
/// Creates a new encryption context for the given note.
pub fn new(
pub fn new<R: RngCore + CryptoRng>(
ovk: OutgoingViewingKey,
note: Note<Bls12>,
to: PaymentAddress<Bls12>,
memo: Memo,
rng: &mut R,
) -> SaplingNoteEncryption {
let esk = generate_esk();
let esk = generate_esk(rng);
let epk = note.g_d.mul(esk, &JUBJUB);
SaplingNoteEncryption {
@@ -442,23 +442,12 @@ pub fn try_sapling_compact_note_decryption(
let shared_secret = sapling_ka_agree(ivk, epk);
let key = kdf_sapling(shared_secret, &epk);
// Prefix plaintext with 64 zero-bytes to skip over Poly1305 keying output
const CHACHA20_BLOCK_SIZE: usize = 64;
let mut plaintext = [0; CHACHA20_BLOCK_SIZE + COMPACT_NOTE_SIZE];
plaintext[CHACHA20_BLOCK_SIZE..].copy_from_slice(&enc_ciphertext[0..COMPACT_NOTE_SIZE]);
assert_eq!(
ChaCha20Ietf::cipher()
.decrypt(
&mut plaintext,
CHACHA20_BLOCK_SIZE + COMPACT_NOTE_SIZE,
key.as_bytes(),
&[0u8; 12],
)
.ok()?,
CHACHA20_BLOCK_SIZE + COMPACT_NOTE_SIZE
);
// Start from block 1 to skip over Poly1305 keying output
let mut plaintext = [0; COMPACT_NOTE_SIZE];
plaintext.copy_from_slice(&enc_ciphertext);
ChaCha20Ietf::xor(key.as_bytes(), &[0u8; 12], 1, &mut plaintext);
parse_note_plaintext_without_memo(ivk, cmu, &plaintext[CHACHA20_BLOCK_SIZE..])
parse_note_plaintext_without_memo(ivk, cmu, &plaintext)
}
/// Recovery of the full note plaintext by the sender.
@@ -555,11 +544,7 @@ pub fn try_sapling_output_recovery(
#[cfg(test)]
mod tests {
use crypto_api_chachapoly::ChachaPolyIetf;
use ff::{PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr, FrRepr};
use rand::{thread_rng, Rand, Rng};
use sapling_crypto::{
use crate::{
jubjub::{
edwards,
fs::{Fs, FsRepr},
@@ -567,6 +552,11 @@ mod tests {
},
primitives::{Diversifier, PaymentAddress, ValueCommitment},
};
use crypto_api_chachapoly::ChachaPolyIetf;
use ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::bls12_381::{Bls12, Fr, FrRepr};
use rand_core::{CryptoRng, RngCore};
use rand_os::OsRng;
use super::{
kdf_sapling, prf_ock, sapling_ka_agree, try_sapling_compact_note_decryption,
@@ -690,8 +680,8 @@ mod tests {
assert_eq!(Memo::default().to_utf8(), None);
}
fn random_enc_ciphertext(
mut rng: &mut Rng,
fn random_enc_ciphertext<R: RngCore + CryptoRng>(
mut rng: &mut R,
) -> (
OutgoingViewingKey,
Fs,
@@ -702,7 +692,7 @@ mod tests {
[u8; OUT_CIPHERTEXT_SIZE],
) {
let diversifier = Diversifier([0; 11]);
let ivk = Fs::rand(&mut rng);
let ivk = Fs::random(&mut rng);
let pk_d = diversifier.g_d::<Bls12>(&JUBJUB).unwrap().mul(ivk, &JUBJUB);
let pa = PaymentAddress { diversifier, pk_d };
@@ -710,15 +700,17 @@ mod tests {
let value = 100;
let value_commitment = ValueCommitment::<Bls12> {
value,
randomness: Fs::rand(&mut rng),
randomness: Fs::random(&mut rng),
};
let cv = value_commitment.cm(&JUBJUB).into();
let note = pa.create_note(value, Fs::rand(&mut rng), &JUBJUB).unwrap();
let note = pa
.create_note(value, Fs::random(&mut rng), &JUBJUB)
.unwrap();
let cmu = note.cm(&JUBJUB);
let ovk = OutgoingViewingKey([0; 32]);
let ne = SaplingNoteEncryption::new(ovk, note, pa, Memo([0; 512]));
let ne = SaplingNoteEncryption::new(ovk, note, pa, Memo([0; 512]), rng);
let epk = ne.epk();
let enc_ciphertext = ne.encrypt_note_plaintext();
let out_ciphertext = ne.encrypt_outgoing_plaintext(&cv, &cmu);
@@ -842,19 +834,19 @@ mod tests {
#[test]
fn decryption_with_invalid_ivk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, _, _, cmu, epk, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
assert_eq!(
try_sapling_note_decryption(&Fs::rand(&mut rng), &epk, &cmu, &enc_ciphertext),
try_sapling_note_decryption(&Fs::random(&mut rng), &epk, &cmu, &enc_ciphertext),
None
);
}
#[test]
fn decryption_with_invalid_epk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, ivk, _, cmu, _, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
@@ -871,19 +863,19 @@ mod tests {
#[test]
fn decryption_with_invalid_cmu() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, ivk, _, _, epk, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
assert_eq!(
try_sapling_note_decryption(&ivk, &epk, &Fr::rand(&mut rng), &enc_ciphertext),
try_sapling_note_decryption(&ivk, &epk, &Fr::random(&mut rng), &enc_ciphertext),
None
);
}
#[test]
fn decryption_with_invalid_tag() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, ivk, _, cmu, epk, mut enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
@@ -896,7 +888,7 @@ mod tests {
#[test]
fn decryption_with_invalid_version_byte() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -918,7 +910,7 @@ mod tests {
#[test]
fn decryption_with_invalid_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -940,7 +932,7 @@ mod tests {
#[test]
fn decryption_with_incorrect_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -962,13 +954,13 @@ mod tests {
#[test]
fn compact_decryption_with_invalid_ivk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, _, _, cmu, epk, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
assert_eq!(
try_sapling_compact_note_decryption(
&Fs::rand(&mut rng),
&Fs::random(&mut rng),
&epk,
&cmu,
&enc_ciphertext[..COMPACT_NOTE_SIZE]
@@ -979,7 +971,7 @@ mod tests {
#[test]
fn compact_decryption_with_invalid_epk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, ivk, _, cmu, _, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
@@ -996,7 +988,7 @@ mod tests {
#[test]
fn compact_decryption_with_invalid_cmu() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (_, ivk, _, _, epk, enc_ciphertext, _) = random_enc_ciphertext(&mut rng);
@@ -1004,7 +996,7 @@ mod tests {
try_sapling_compact_note_decryption(
&ivk,
&epk,
&Fr::rand(&mut rng),
&Fr::random(&mut rng),
&enc_ciphertext[..COMPACT_NOTE_SIZE]
),
None
@@ -1013,7 +1005,7 @@ mod tests {
#[test]
fn compact_decryption_with_invalid_version_byte() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1040,7 +1032,7 @@ mod tests {
#[test]
fn compact_decryption_with_invalid_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1067,7 +1059,7 @@ mod tests {
#[test]
fn compact_decryption_with_incorrect_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, ivk, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1094,7 +1086,7 @@ mod tests {
#[test]
fn recovery_with_invalid_ovk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (mut ovk, _, cv, cmu, epk, enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1108,7 +1100,7 @@ mod tests {
#[test]
fn recovery_with_invalid_cv() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, _, cmu, epk, enc_ciphertext, out_ciphertext) = random_enc_ciphertext(&mut rng);
@@ -1127,7 +1119,7 @@ mod tests {
#[test]
fn recovery_with_invalid_cmu() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, _, epk, enc_ciphertext, out_ciphertext) = random_enc_ciphertext(&mut rng);
@@ -1135,7 +1127,7 @@ mod tests {
try_sapling_output_recovery(
&ovk,
&cv,
&Fr::rand(&mut rng),
&Fr::random(&mut rng),
&epk,
&enc_ciphertext,
&out_ciphertext
@@ -1146,7 +1138,7 @@ mod tests {
#[test]
fn recovery_with_invalid_epk() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, _, enc_ciphertext, out_ciphertext) = random_enc_ciphertext(&mut rng);
@@ -1165,7 +1157,7 @@ mod tests {
#[test]
fn recovery_with_invalid_enc_tag() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1179,7 +1171,7 @@ mod tests {
#[test]
fn recovery_with_invalid_out_tag() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, epk, enc_ciphertext, mut out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1193,7 +1185,7 @@ mod tests {
#[test]
fn recovery_with_invalid_version_byte() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1215,7 +1207,7 @@ mod tests {
#[test]
fn recovery_with_invalid_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1237,7 +1229,7 @@ mod tests {
#[test]
fn recovery_with_incorrect_diversifier() {
let mut rng = thread_rng();
let mut rng = OsRng;
let (ovk, _, cv, cmu, epk, mut enc_ciphertext, out_ciphertext) =
random_enc_ciphertext(&mut rng);
@@ -1365,7 +1357,7 @@ mod tests {
// Test encryption
//
let mut ne = SaplingNoteEncryption::new(ovk, note, to, Memo(tv.memo));
let mut ne = SaplingNoteEncryption::new(ovk, note, to, Memo(tv.memo), &mut OsRng);
// Swap in the ephemeral keypair from the test vectors
ne.esk = esk;
ne.epk = epk;

View File

@@ -4,14 +4,13 @@ use jubjub::*;
#[derive(Copy, Clone)]
pub enum Personalization {
NoteCommitment,
MerkleTree(usize)
MerkleTree(usize),
}
impl Personalization {
pub fn get_bits(&self) -> Vec<bool> {
match *self {
Personalization::NoteCommitment =>
vec![true, true, true, true, true, true],
Personalization::NoteCommitment => vec![true, true, true, true, true, true],
Personalization::MerkleTree(num) => {
assert!(num < 63);
@@ -24,12 +23,16 @@ impl Personalization {
pub fn pedersen_hash<E, I>(
personalization: Personalization,
bits: I,
params: &E::Params
params: &E::Params,
) -> edwards::Point<E, PrimeOrder>
where I: IntoIterator<Item=bool>,
E: JubjubEngine
where
I: IntoIterator<Item = bool>,
E: JubjubEngine,
{
let mut bits = personalization.get_bits().into_iter().chain(bits.into_iter());
let mut bits = personalization
.get_bits()
.into_iter()
.chain(bits.into_iter());
let mut result = edwards::Point::zero();
let mut generators = params.pedersen_hash_exp_table().iter();
@@ -79,12 +82,13 @@ pub fn pedersen_hash<E, I>(
break;
}
let mut table: &[Vec<edwards::Point<E, _>>] = &generators.next().expect("we don't have enough generators");
let mut table: &[Vec<edwards::Point<E, _>>] =
&generators.next().expect("we don't have enough generators");
let window = JubjubBls12::pedersen_hash_exp_window_size();
let window_mask = (1 << window) - 1;
let mut acc = acc.into_repr();
let mut tmp = edwards::Point::zero();
while !acc.is_zero() {

View File

@@ -4,60 +4,47 @@ use constants;
use group_hash::group_hash;
use pedersen_hash::{
pedersen_hash,
Personalization
};
use pedersen_hash::{pedersen_hash, Personalization};
use byteorder::{
LittleEndian,
WriteBytesExt
};
use byteorder::{LittleEndian, WriteBytesExt};
use jubjub::{
JubjubEngine,
JubjubParams,
edwards,
PrimeOrder,
FixedGenerators
};
use jubjub::{edwards, FixedGenerators, JubjubEngine, JubjubParams, PrimeOrder};
use blake2_rfc::blake2s::Blake2s;
use blake2s_simd::Params as Blake2sParams;
#[derive(Clone)]
pub struct ValueCommitment<E: JubjubEngine> {
pub value: u64,
pub randomness: E::Fs
pub randomness: E::Fs,
}
impl<E: JubjubEngine> ValueCommitment<E> {
pub fn cm(
&self,
params: &E::Params
) -> edwards::Point<E, PrimeOrder>
{
params.generator(FixedGenerators::ValueCommitmentValue)
.mul(self.value, params)
.add(
&params.generator(FixedGenerators::ValueCommitmentRandomness)
.mul(self.randomness, params),
params
)
pub fn cm(&self, params: &E::Params) -> edwards::Point<E, PrimeOrder> {
params
.generator(FixedGenerators::ValueCommitmentValue)
.mul(self.value, params)
.add(
&params
.generator(FixedGenerators::ValueCommitmentRandomness)
.mul(self.randomness, params),
params,
)
}
}
#[derive(Clone)]
pub struct ProofGenerationKey<E: JubjubEngine> {
pub ak: edwards::Point<E, PrimeOrder>,
pub nsk: E::Fs
pub nsk: E::Fs,
}
impl<E: JubjubEngine> ProofGenerationKey<E> {
pub fn into_viewing_key(&self, params: &E::Params) -> ViewingKey<E> {
ViewingKey {
ak: self.ak.clone(),
nk: params.generator(FixedGenerators::ProofGenerationKey)
.mul(self.nsk, params)
nk: params
.generator(FixedGenerators::ProofGenerationKey)
.mul(self.nsk, params),
}
}
}
@@ -65,19 +52,16 @@ impl<E: JubjubEngine> ProofGenerationKey<E> {
#[derive(Debug)]
pub struct ViewingKey<E: JubjubEngine> {
pub ak: edwards::Point<E, PrimeOrder>,
pub nk: edwards::Point<E, PrimeOrder>
pub nk: edwards::Point<E, PrimeOrder>,
}
impl<E: JubjubEngine> ViewingKey<E> {
pub fn rk(
&self,
ar: E::Fs,
params: &E::Params
) -> edwards::Point<E, PrimeOrder> {
pub fn rk(&self, ar: E::Fs, params: &E::Params) -> edwards::Point<E, PrimeOrder> {
self.ak.add(
&params.generator(FixedGenerators::SpendingKeyGenerator)
.mul(ar, params),
params
&params
.generator(FixedGenerators::SpendingKeyGenerator)
.mul(ar, params),
params,
)
}
@@ -87,9 +71,14 @@ impl<E: JubjubEngine> ViewingKey<E> {
self.ak.write(&mut preimage[0..32]).unwrap();
self.nk.write(&mut preimage[32..64]).unwrap();
let mut h = Blake2s::with_params(32, &[], &[], constants::CRH_IVK_PERSONALIZATION);
h.update(&preimage);
let mut h = h.finalize().as_ref().to_vec();
let mut h = [0; 32];
h.copy_from_slice(
Blake2sParams::new()
.hash_length(32)
.personal(constants::CRH_IVK_PERSONALIZATION)
.hash(&preimage)
.as_bytes(),
);
// Drop the most significant five bits, so it can be interpreted as a scalar.
h[31] &= 0b0000_0111;
@@ -103,15 +92,14 @@ impl<E: JubjubEngine> ViewingKey<E> {
pub fn into_payment_address(
&self,
diversifier: Diversifier,
params: &E::Params
) -> Option<PaymentAddress<E>>
{
params: &E::Params,
) -> Option<PaymentAddress<E>> {
diversifier.g_d(params).map(|g_d| {
let pk_d = g_d.mul(self.ivk(), params);
PaymentAddress {
pk_d: pk_d,
diversifier: diversifier
diversifier: diversifier,
}
})
}
@@ -123,17 +111,20 @@ pub struct Diversifier(pub [u8; 11]);
impl Diversifier {
pub fn g_d<E: JubjubEngine>(
&self,
params: &E::Params
) -> Option<edwards::Point<E, PrimeOrder>>
{
group_hash::<E>(&self.0, constants::KEY_DIVERSIFICATION_PERSONALIZATION, params)
params: &E::Params,
) -> Option<edwards::Point<E, PrimeOrder>> {
group_hash::<E>(
&self.0,
constants::KEY_DIVERSIFICATION_PERSONALIZATION,
params,
)
}
}
#[derive(Clone, Debug)]
pub struct PaymentAddress<E: JubjubEngine> {
pub pk_d: edwards::Point<E, PrimeOrder>,
pub diversifier: Diversifier
pub diversifier: Diversifier,
}
impl<E: JubjubEngine> PartialEq for PaymentAddress<E> {
@@ -143,11 +134,7 @@ impl<E: JubjubEngine> PartialEq for PaymentAddress<E> {
}
impl<E: JubjubEngine> PaymentAddress<E> {
pub fn g_d(
&self,
params: &E::Params
) -> Option<edwards::Point<E, PrimeOrder>>
{
pub fn g_d(&self, params: &E::Params) -> Option<edwards::Point<E, PrimeOrder>> {
self.diversifier.g_d(params)
}
@@ -155,16 +142,13 @@ impl<E: JubjubEngine> PaymentAddress<E> {
&self,
value: u64,
randomness: E::Fs,
params: &E::Params
) -> Option<Note<E>>
{
self.g_d(params).map(|g_d| {
Note {
value: value,
r: randomness,
g_d: g_d,
pk_d: self.pk_d.clone()
}
params: &E::Params,
) -> Option<Note<E>> {
self.g_d(params).map(|g_d| Note {
value: value,
r: randomness,
g_d: g_d,
pk_d: self.pk_d.clone(),
})
}
}
@@ -178,7 +162,7 @@ pub struct Note<E: JubjubEngine> {
/// The public key of the address, g_d^ivk
pub pk_d: edwards::Point<E, PrimeOrder>,
/// The commitment randomness
pub r: E::Fs
pub r: E::Fs,
}
impl<E: JubjubEngine> PartialEq for Note<E> {
@@ -201,13 +185,14 @@ impl<E: JubjubEngine> Note<E> {
}
/// Computes the note commitment, returning the full point.
fn cm_full_point(&self, params: &E::Params) -> edwards::Point<E, PrimeOrder>
{
fn cm_full_point(&self, params: &E::Params) -> edwards::Point<E, PrimeOrder> {
// Calculate the note contents, as bytes
let mut note_contents = vec![];
// Writing the value in little endian
(&mut note_contents).write_u64::<LittleEndian>(self.value).unwrap();
(&mut note_contents)
.write_u64::<LittleEndian>(self.value)
.unwrap();
// Write g_d
self.g_d.write(&mut note_contents).unwrap();
@@ -220,50 +205,44 @@ impl<E: JubjubEngine> Note<E> {
// Compute the Pedersen hash of the note contents
let hash_of_contents = pedersen_hash(
Personalization::NoteCommitment,
note_contents.into_iter()
.flat_map(|byte| {
(0..8).map(move |i| ((byte >> i) & 1) == 1)
}),
params
note_contents
.into_iter()
.flat_map(|byte| (0..8).map(move |i| ((byte >> i) & 1) == 1)),
params,
);
// Compute final commitment
params.generator(FixedGenerators::NoteCommitmentRandomness)
.mul(self.r, params)
.add(&hash_of_contents, params)
params
.generator(FixedGenerators::NoteCommitmentRandomness)
.mul(self.r, params)
.add(&hash_of_contents, params)
}
/// Computes the nullifier given the viewing key and
/// note position
pub fn nf(
&self,
viewing_key: &ViewingKey<E>,
position: u64,
params: &E::Params
) -> Vec<u8>
{
pub fn nf(&self, viewing_key: &ViewingKey<E>, position: u64, params: &E::Params) -> Vec<u8> {
// Compute rho = cm + position.G
let rho = self
.cm_full_point(params)
.add(
&params.generator(FixedGenerators::NullifierPosition)
.mul(position, params),
params
);
let rho = self.cm_full_point(params).add(
&params
.generator(FixedGenerators::NullifierPosition)
.mul(position, params),
params,
);
// Compute nf = BLAKE2s(nk | rho)
let mut nf_preimage = [0u8; 64];
viewing_key.nk.write(&mut nf_preimage[0..32]).unwrap();
rho.write(&mut nf_preimage[32..64]).unwrap();
let mut h = Blake2s::with_params(32, &[], &[], constants::PRF_NF_PERSONALIZATION);
h.update(&nf_preimage);
h.finalize().as_ref().to_vec()
Blake2sParams::new()
.hash_length(32)
.personal(constants::PRF_NF_PERSONALIZATION)
.hash(&nf_preimage)
.as_bytes()
.to_vec()
}
/// Computes the note commitment
pub fn cm(&self, params: &E::Params) -> E::Fr
{
pub fn cm(&self, params: &E::Params) -> E::Fr {
// The commitment is in the prime order subgroup, so mapping the
// commitment to the x-coordinate is an injective encoding.
self.cm_full_point(params).into_xy().0

Some files were not shown because too many files have changed in this diff Show More