Merge pull request #32 from str4d/cargo-workspace

Convert the repo into a Cargo workspace
This commit is contained in:
ebfull 2018-08-29 12:58:43 -06:00 committed by GitHub
commit e1c6232dd7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 31595 additions and 46 deletions

618
Cargo.lock generated
View File

@ -1,3 +1,49 @@
[[package]]
name = "aes"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aes-soft 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"aesni 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"block-cipher-trait 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "aes-soft"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"block-cipher-trait 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"opaque-debug 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "aesni"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"block-cipher-trait 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"opaque-debug 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"stream-cipher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "aho-corasick"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "arrayvec"
version = "0.4.7"
@ -6,10 +52,30 @@ dependencies = [
"nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "backtrace"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "backtrace-sys"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cc 1.0.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bellman"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -17,7 +83,7 @@ dependencies = [
"futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -26,6 +92,11 @@ name = "bit-vec"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "1.0.1"
@ -41,11 +112,79 @@ dependencies = [
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "block-cipher-trait"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byte-tools"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "byteorder"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cargo_metadata"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "cc"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cfg-if"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "clippy"
version = "0.0.200"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy_lints 0.0.200 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clippy_lints"
version = "0.0.200"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)",
"if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "constant_time_eq"
version = "0.1.3"
@ -64,6 +203,31 @@ dependencies = [
"generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "either"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "error-chain"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fpe"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aes 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"num-bigint 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
@ -92,6 +256,11 @@ dependencies = [
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "gcc"
version = "0.3.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "generic-array"
version = "0.9.0"
@ -100,6 +269,59 @@ dependencies = [
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "getopts"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hex-literal"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro-hack 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hex-literal-impl"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro-hack 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "idna"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "if_chain"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "itertools"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "itoa"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.0.0"
@ -114,14 +336,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "librustzcash"
version = "0.1.0"
dependencies = [
"bellman 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bellman 0.1.0",
"blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)",
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"sapling-crypto 0.0.1 (git+https://github.com/zcash-hackworks/sapling-crypto?rev=21084bde2019c04bd34208e63c3560fe2c02fb0e)",
"sapling-crypto 0.0.1",
]
[[package]]
name = "matches"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memchr"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -129,6 +364,28 @@ name = "nodrop"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num-bigint"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-integer"
version = "0.1.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num_cpus"
version = "1.8.0"
@ -137,12 +394,75 @@ dependencies = [
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "opaque-debug"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "pairing"
version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.200 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "percent-encoding"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "proc-macro-hack"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro-hack-impl 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "proc-macro-hack-impl"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "proc-macro2"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "pulldown-cmark"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
"getopts 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quine-mc_cluskey"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "quote"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand"
version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -156,17 +476,161 @@ dependencies = [
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "redox_syscall"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rust-crypto"
version = "0.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-demangle"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc-serialize"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ryu"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "sapling-crypto"
version = "0.0.1"
source = "git+https://github.com/zcash-hackworks/sapling-crypto?rev=21084bde2019c04bd34208e63c3560fe2c02fb0e#21084bde2019c04bd34208e63c3560fe2c02fb0e"
dependencies = [
"bellman 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bellman 0.1.0",
"blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)",
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"digest 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde"
version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde_derive"
version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde_json"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"itoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ryu 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "stream-cipher"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "syn"
version = "0.14.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread_local"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "time"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "toml"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -174,6 +638,49 @@ name = "typenum"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ucd-util"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-bidi"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "unicode-normalization"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-width"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-xid"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "url"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "utf8-ranges"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.3.4"
@ -193,29 +700,116 @@ name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "zcash_primitives"
version = "0.0.0"
[[package]]
name = "zcash_proofs"
version = "0.0.0"
[[package]]
name = "zcash_wallet"
version = "0.0.0"
[[package]]
name = "zip32"
version = "0.0.0"
dependencies = [
"aes 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)",
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"fpe 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.14.2",
"sapling-crypto 0.0.1",
]
[metadata]
"checksum aes 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6fb1737cdc8da3db76e90ca817a194249a38fcb500c2e6ecec39b29448aa873"
"checksum aes-soft 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67cc03b0a090a05cb01e96998a01905d7ceedce1bc23b756c0bb7faa0682ccb1"
"checksum aesni 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6810b7fb9f2bb4f76f05ac1c170b8dde285b6308955dc3afd89710268c958d9e"
"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a"
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
"checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef"
"checksum bellman 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eae372472c7ea8f7c8fc6a62f7d5535db8302de7f1aafda2e13a97c4830d3bcf"
"checksum backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "89a47830402e9981c5c41223151efcced65a0510c13097c769cede7efb34782a"
"checksum backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c66d56ac8dabd07f6aacdaf633f4b8262f5b3601a810a0dcddffd5c22c69daa0"
"checksum bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f"
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
"checksum blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)" = "<none>"
"checksum block-cipher-trait 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "370424437b9459f3dfd68428ed9376ddfe03d8b70ede29cc533b3557df186ab4"
"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40"
"checksum byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "73b5bdfe7ee3ad0b99c9801d58807a9dbc9e09196365b0203853b99889ab3c87"
"checksum cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1efca0b863ca03ed4c109fb1c55e0bc4bbeb221d3e103d86251046b06a526bd0"
"checksum cc 1.0.22 (registry+https://github.com/rust-lang/crates.io-index)" = "4a6007c146fdd28d4512a794b07ffe9d8e89e6bf86e2e0c4ddff2e1fb54a0007"
"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3"
"checksum clippy 0.0.200 (registry+https://github.com/rust-lang/crates.io-index)" = "927a1f79af10deb103df108347f23c6b7fa1731c953d6fb24d68be1748a0993f"
"checksum clippy_lints 0.0.200 (registry+https://github.com/rust-lang/crates.io-index)" = "d2432663f6bdb90255dcf9df5ca504f99b575bb471281591138f62f9d31f863b"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
"checksum digest 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "00a49051fef47a72c9623101b19bd71924a45cca838826caae3eaa4d00772603"
"checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0"
"checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3"
"checksum fpe 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce3371c82bfbd984f624cab093f55e7336f5a6e589f8518e1258f54f011b89ad"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
"checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c"
"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"
"checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb"
"checksum generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d"
"checksum getopts 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0a7292d30132fb5424b354f5dc02512a86e4c516fe544bb7a25e7f266951b797"
"checksum hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4da5f0e01bd8a71a224a4eedecaacfcabda388dbb7a80faf04d3514287572d95"
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
"checksum if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4bac95d9aa0624e7b78187d6fb8ab012b41d9f6f54b1bcb61e61c4845f8357ec"
"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
"checksum itoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5adb58558dcd1d786b5f0bd15f3226ee23486e24b7b58304b60f64dc68e62606"
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
"checksum libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)" = "6fd41f331ac7c5b8ac259b8bf82c75c0fb2e469bbf37d2becbba9a6a2221965b"
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
"checksum memchr 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a3b4142ab8738a78c51896f704f83c11df047ff1bda9a92a661aa6361552d93d"
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
"checksum num-bigint 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3eceac7784c5dc97c2d6edf30259b4e153e6e2b42b3c85e9a6e9f45d06caef6e"
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
"checksum num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "630de1ef5cc79d0cdd78b7e33b81f083cbfe90de0f4b2b2f07f905867c70e9fe"
"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
"checksum pairing 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ceda21136251c6d5a422d3d798d8ac22515a6e8d3521bb60c59a8349d36d0d57"
"checksum opaque-debug 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d620c9c26834b34f039489ac0dfdb12c7ac15ccaf818350a64c9b5334a452ad7"
"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831"
"checksum proc-macro-hack 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ba8d4f9257b85eb6cdf13f055cea3190520aab1409ca2ab43493ea4820c25f0"
"checksum proc-macro-hack-impl 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d5cb6f960ad471404618e9817c0e5d10b1ae74cfdf01fab89ea0641fe7fb2892"
"checksum proc-macro2 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b331c6ad3411474cd55540398dc7ad89fc41488e64ec71fdecc9c9b86de96fb0"
"checksum pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d6fdf85cda6cadfae5428a54661d431330b312bc767ddbc57adbedc24da66e32"
"checksum quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "07589615d719a60c8dd8a4622e7946465dfef20d1a428f969e3443e7386d5f45"
"checksum quote 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "dd636425967c33af890042c483632d33fa7a18f19ad1d7ea72e8998c6ef8dea5"
"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
"checksum sapling-crypto 0.0.1 (git+https://github.com/zcash-hackworks/sapling-crypto?rev=21084bde2019c04bd34208e63c3560fe2c02fb0e)" = "<none>"
"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1"
"checksum regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "67d0301b0c6804eca7e3c275119d0b01ff3b7ab9258a65709e608a66312a1025"
"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d"
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
"checksum rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "bcfe5b13211b4d78e5c2cadfebd7769197d95c639c35a50057eb4c05de811395"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum ryu 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7153dd96dade874ab973e098cb62fcdbb89a03682e46b144fd09550998d4a4a7"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)" = "22d340507cea0b7e6632900a176101fea959c7065d93ba555072da90aaaafc87"
"checksum serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)" = "234fc8b737737b148ccd625175fc6390f5e4dacfdaa543cb93a3430d984a9119"
"checksum serde_json 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "44dd2cfde475037451fa99b7e5df77aa3cfd1536575fa8e7a538ab36dcde49ae"
"checksum stream-cipher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "30dc6118470d69ce0fdcf7e6f95e95853f7f4f72f80d835d4519577c323814ab"
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b"
"checksum toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a0263c6c02c4db6c8f7681f9fd35e90de799ebd4cfdeab77a38f4ff6b3d8c0d9"
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5"
"checksum unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6a0180bc61fc5a987082bfa111f4cc95c4caff7f9799f3e46df09163a937aa25"
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
"checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6"
"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4"
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View File

@ -1,33 +1,14 @@
[package]
name = "librustzcash"
version = "0.1.0"
authors = [
"Sean Bowe <ewillbefull@gmail.com>",
"Jack Grigg <jack@z.cash>",
"Jay Graber <jay@z.cash>",
"Simon Liu <simon@z.cash>"
]
[lib]
name = "rustzcash"
path = "src/rustzcash.rs"
crate-type = ["staticlib"]
[dependencies]
libc = "0.2"
pairing = "0.14.2"
lazy_static = "1"
bellman = "0.1"
byteorder = "1"
rand = "0.4"
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
[dependencies.sapling-crypto]
git = "https://github.com/zcash-hackworks/sapling-crypto"
rev = "21084bde2019c04bd34208e63c3560fe2c02fb0e"
[workspace]
members = [
"bellman",
"librustzcash",
"pairing",
"sapling-crypto",
"zcash_primitives",
"zcash_proofs",
"zcash_wallet",
"zip32",
]
[profile.release]
lto = true

View File

@ -1,10 +1,15 @@
# librustzcash
# Zcash Rust crates
This repository contains librustzcash, a static library for Zcash code assets written in Rust.
This repository contains a (work-in-progress) set of Rust crates for
working with Zcash.
## Security Warnings
These libraries are currently under development and have not been fully-reviewed.
## License
Licensed under either of
All code in this workspace is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
@ -17,4 +22,3 @@ Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

2
bellman/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
target
Cargo.lock

14
bellman/COPYRIGHT Normal file
View File

@ -0,0 +1,14 @@
Copyrights in the "bellman" library are retained by their contributors. No
copyright assignment is required to contribute to the "bellman" library.
The "bellman" library is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

22
bellman/Cargo.toml Normal file
View File

@ -0,0 +1,22 @@
[package]
authors = ["Sean Bowe <ewillbefull@gmail.com>"]
description = "zk-SNARK library"
documentation = "https://github.com/ebfull/bellman"
homepage = "https://github.com/ebfull/bellman"
license = "MIT/Apache-2.0"
name = "bellman"
repository = "https://github.com/ebfull/bellman"
version = "0.1.0"
[dependencies]
rand = "0.4"
bit-vec = "0.4.4"
futures = "0.1"
futures-cpupool = "0.1"
num_cpus = "1"
crossbeam = "0.3"
pairing = { path = "../pairing" }
byteorder = "1"
[features]
default = []

201
bellman/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
bellman/LICENSE-MIT Normal file
View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

19
bellman/README.md Normal file
View File

@ -0,0 +1,19 @@
# bellman [![Crates.io](https://img.shields.io/crates/v/bellman.svg)](https://crates.io/crates/bellman) #
This is a research project being built for [Zcash](https://z.cash/).
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

494
bellman/src/domain.rs Normal file
View File

@ -0,0 +1,494 @@
//! This module contains an `EvaluationDomain` abstraction for
//! performing various kinds of polynomial arithmetic on top of
//! the scalar field.
//!
//! In pairing-based SNARKs like Groth16, we need to calculate
//! a quotient polynomial over a target polynomial with roots
//! at distinct points associated with each constraint of the
//! constraint system. In order to be efficient, we choose these
//! roots to be the powers of a 2^n root of unity in the field.
//! This allows us to perform polynomial operations in O(n)
//! by performing an O(n log n) FFT over such a domain.
use pairing::{
Engine,
Field,
PrimeField,
CurveProjective
};
use super::{
SynthesisError
};
use super::multicore::Worker;
pub struct EvaluationDomain<E: Engine, G: Group<E>> {
coeffs: Vec<G>,
exp: u32,
omega: E::Fr,
omegainv: E::Fr,
geninv: E::Fr,
minv: E::Fr
}
impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
pub fn as_ref(&self) -> &[G] {
&self.coeffs
}
pub fn as_mut(&mut self) -> &mut [G] {
&mut self.coeffs
}
pub fn into_coeffs(self) -> Vec<G> {
self.coeffs
}
pub fn from_coeffs(mut coeffs: Vec<G>) -> Result<EvaluationDomain<E, G>, SynthesisError>
{
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < coeffs.len() {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp >= E::Fr::S {
return Err(SynthesisError::PolynomialDegreeTooLarge)
}
}
// Compute omega, the 2^exp primitive root of unity
let mut omega = E::Fr::root_of_unity();
for _ in exp..E::Fr::S {
omega.square();
}
// Extend the coeffs vector with zeroes if necessary
coeffs.resize(m, G::group_zero());
Ok(EvaluationDomain {
coeffs: coeffs,
exp: exp,
omega: omega,
omegainv: omega.inverse().unwrap(),
geninv: E::Fr::multiplicative_generator().inverse().unwrap(),
minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap()
})
}
pub fn fft(&mut self, worker: &Worker)
{
best_fft(&mut self.coeffs, worker, &self.omega, self.exp);
}
pub fn ifft(&mut self, worker: &Worker)
{
best_fft(&mut self.coeffs, worker, &self.omegainv, self.exp);
worker.scope(self.coeffs.len(), |scope, chunk| {
let minv = self.minv;
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move || {
for v in v {
v.group_mul_assign(&minv);
}
});
}
});
}
pub fn distribute_powers(&mut self, worker: &Worker, g: E::Fr)
{
worker.scope(self.coeffs.len(), |scope, chunk| {
for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
let mut u = g.pow(&[(i * chunk) as u64]);
for v in v.iter_mut() {
v.group_mul_assign(&u);
u.mul_assign(&g);
}
});
}
});
}
pub fn coset_fft(&mut self, worker: &Worker)
{
self.distribute_powers(worker, E::Fr::multiplicative_generator());
self.fft(worker);
}
pub fn icoset_fft(&mut self, worker: &Worker)
{
let geninv = self.geninv;
self.ifft(worker);
self.distribute_powers(worker, geninv);
}
/// This evaluates t(tau) for this domain, which is
/// tau^m - 1 for these radix-2 domains.
pub fn z(&self, tau: &E::Fr) -> E::Fr {
let mut tmp = tau.pow(&[self.coeffs.len() as u64]);
tmp.sub_assign(&E::Fr::one());
tmp
}
/// The target polynomial is the zero polynomial in our
/// evaluation domain, so we must perform division over
/// a coset.
pub fn divide_by_z_on_coset(&mut self, worker: &Worker)
{
let i = self.z(&E::Fr::multiplicative_generator()).inverse().unwrap();
worker.scope(self.coeffs.len(), |scope, chunk| {
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move || {
for v in v {
v.group_mul_assign(&i);
}
});
}
});
}
/// Perform O(n) multiplication of two polynomials in the domain.
pub fn mul_assign(&mut self, worker: &Worker, other: &EvaluationDomain<E, Scalar<E>>) {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
scope.spawn(move || {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_mul_assign(&b.0);
}
});
}
});
}
/// Perform O(n) subtraction of one polynomial from another in the domain.
pub fn sub_assign(&mut self, worker: &Worker, other: &EvaluationDomain<E, G>) {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
scope.spawn(move || {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_sub_assign(&b);
}
});
}
});
}
}
pub trait Group<E: Engine>: Sized + Copy + Clone + Send + Sync {
fn group_zero() -> Self;
fn group_mul_assign(&mut self, by: &E::Fr);
fn group_add_assign(&mut self, other: &Self);
fn group_sub_assign(&mut self, other: &Self);
}
pub struct Point<G: CurveProjective>(pub G);
impl<G: CurveProjective> PartialEq for Point<G> {
fn eq(&self, other: &Point<G>) -> bool {
self.0 == other.0
}
}
impl<G: CurveProjective> Copy for Point<G> { }
impl<G: CurveProjective> Clone for Point<G> {
fn clone(&self) -> Point<G> {
*self
}
}
impl<G: CurveProjective> Group<G::Engine> for Point<G> {
fn group_zero() -> Self {
Point(G::zero())
}
fn group_mul_assign(&mut self, by: &G::Scalar) {
self.0.mul_assign(by.into_repr());
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
pub struct Scalar<E: Engine>(pub E::Fr);
impl<E: Engine> PartialEq for Scalar<E> {
fn eq(&self, other: &Scalar<E>) -> bool {
self.0 == other.0
}
}
impl<E: Engine> Copy for Scalar<E> { }
impl<E: Engine> Clone for Scalar<E> {
fn clone(&self) -> Scalar<E> {
*self
}
}
impl<E: Engine> Group<E> for Scalar<E> {
fn group_zero() -> Self {
Scalar(E::Fr::zero())
}
fn group_mul_assign(&mut self, by: &E::Fr) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
fn best_fft<E: Engine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32)
{
let log_cpus = worker.log_num_cpus();
if log_n <= log_cpus {
serial_fft(a, omega, log_n);
} else {
parallel_fft(a, worker, omega, log_n, log_cpus);
}
}
fn serial_fft<E: Engine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32)
{
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
for _ in 0..l {
r = (r << 1) | (n & 1);
n >>= 1;
}
r
}
let n = a.len() as u32;
assert_eq!(n, 1 << log_n);
for k in 0..n {
let rk = bitreverse(k, log_n);
if k < rk {
a.swap(rk as usize, k as usize);
}
}
let mut m = 1;
for _ in 0..log_n {
let w_m = omega.pow(&[(n / (2*m)) as u64]);
let mut k = 0;
while k < n {
let mut w = E::Fr::one();
for j in 0..m {
let mut t = a[(k+j+m) as usize];
t.group_mul_assign(&w);
let mut tmp = a[(k+j) as usize];
tmp.group_sub_assign(&t);
a[(k+j+m) as usize] = tmp;
a[(k+j) as usize].group_add_assign(&t);
w.mul_assign(&w_m);
}
k += 2*m;
}
m *= 2;
}
}
fn parallel_fft<E: Engine, T: Group<E>>(
a: &mut [T],
worker: &Worker,
omega: &E::Fr,
log_n: u32,
log_cpus: u32
)
{
assert!(log_n >= log_cpus);
let num_cpus = 1 << log_cpus;
let log_new_n = log_n - log_cpus;
let mut tmp = vec![vec![T::group_zero(); 1 << log_new_n]; num_cpus];
let new_omega = omega.pow(&[num_cpus as u64]);
worker.scope(0, |scope, _| {
let a = &*a;
for (j, tmp) in tmp.iter_mut().enumerate() {
scope.spawn(move || {
// Shuffle into a sub-FFT
let omega_j = omega.pow(&[j as u64]);
let omega_step = omega.pow(&[(j as u64) << log_new_n]);
let mut elt = E::Fr::one();
for i in 0..(1 << log_new_n) {
for s in 0..num_cpus {
let idx = (i + (s << log_new_n)) % (1 << log_n);
let mut t = a[idx];
t.group_mul_assign(&elt);
tmp[i].group_add_assign(&t);
elt.mul_assign(&omega_step);
}
elt.mul_assign(&omega_j);
}
// Perform sub-FFT
serial_fft(tmp, &new_omega, log_new_n);
});
}
});
// TODO: does this hurt or help?
worker.scope(a.len(), |scope, chunk| {
let tmp = &tmp;
for (idx, a) in a.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
let mut idx = idx * chunk;
let mask = (1 << log_cpus) - 1;
for a in a {
*a = tmp[idx & mask][idx >> log_cpus];
idx += 1;
}
});
}
});
}
// Test multiplying various (low degree) polynomials together and
// comparing with naive evaluations.
#[test]
fn polynomial_arith() {
use pairing::bls12_381::Bls12;
use rand::{self, Rand};
fn test_mul<E: Engine, R: rand::Rng>(rng: &mut R)
{
let worker = Worker::new();
for coeffs_a in 0..70 {
for coeffs_b in 0..70 {
let mut a: Vec<_> = (0..coeffs_a).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect();
let mut b: Vec<_> = (0..coeffs_b).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect();
// naive evaluation
let mut naive = vec![Scalar(E::Fr::zero()); coeffs_a + coeffs_b];
for (i1, a) in a.iter().enumerate() {
for (i2, b) in b.iter().enumerate() {
let mut prod = *a;
prod.group_mul_assign(&b.0);
naive[i1 + i2].group_add_assign(&prod);
}
}
a.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero()));
b.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero()));
let mut a = EvaluationDomain::from_coeffs(a).unwrap();
let mut b = EvaluationDomain::from_coeffs(b).unwrap();
a.fft(&worker);
b.fft(&worker);
a.mul_assign(&worker, &b);
a.ifft(&worker);
for (naive, fft) in naive.iter().zip(a.coeffs.iter()) {
assert!(naive == fft);
}
}
}
}
let rng = &mut rand::thread_rng();
test_mul::<Bls12, _>(rng);
}
#[test]
fn fft_composition() {
use pairing::bls12_381::Bls12;
use rand;
fn test_comp<E: Engine, R: rand::Rng>(rng: &mut R)
{
let worker = Worker::new();
for coeffs in 0..10 {
let coeffs = 1 << coeffs;
let mut v = vec![];
for _ in 0..coeffs {
v.push(Scalar::<E>(rng.gen()));
}
let mut domain = EvaluationDomain::from_coeffs(v.clone()).unwrap();
domain.ifft(&worker);
domain.fft(&worker);
assert!(v == domain.coeffs);
domain.fft(&worker);
domain.ifft(&worker);
assert!(v == domain.coeffs);
domain.icoset_fft(&worker);
domain.coset_fft(&worker);
assert!(v == domain.coeffs);
domain.coset_fft(&worker);
domain.icoset_fft(&worker);
assert!(v == domain.coeffs);
}
}
let rng = &mut rand::thread_rng();
test_comp::<Bls12, _>(rng);
}
#[test]
fn parallel_fft_consistency() {
use pairing::bls12_381::Bls12;
use rand::{self, Rand};
use std::cmp::min;
fn test_consistency<E: Engine, R: rand::Rng>(rng: &mut R)
{
let worker = Worker::new();
for _ in 0..5 {
for log_d in 0..10 {
let d = 1 << log_d;
let v1 = (0..d).map(|_| Scalar::<E>(E::Fr::rand(rng))).collect::<Vec<_>>();
let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap();
let mut v2 = EvaluationDomain::from_coeffs(v1.coeffs.clone()).unwrap();
for log_cpus in log_d..min(log_d+1, 3) {
parallel_fft(&mut v1.coeffs, &worker, &v1.omega, log_d, log_cpus);
serial_fft(&mut v2.coeffs, &v2.omega, log_d);
assert!(v1.coeffs == v2.coeffs);
}
}
}
}
let rng = &mut rand::thread_rng();
test_consistency::<Bls12, _>(rng);
}

View File

@ -0,0 +1,482 @@
use rand::Rng;
use std::sync::Arc;
use pairing::{
Engine,
PrimeField,
Field,
Wnaf,
CurveProjective,
CurveAffine
};
use super::{
Parameters,
VerifyingKey
};
use ::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use ::domain::{
EvaluationDomain,
Scalar
};
use ::multicore::{
Worker
};
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
rng: &mut R
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
{
let g1 = rng.gen();
let g2 = rng.gen();
let alpha = rng.gen();
let beta = rng.gen();
let gamma = rng.gen();
let delta = rng.gen();
let tau = rng.gen();
generate_parameters::<E, C>(
circuit,
g1,
g2,
alpha,
beta,
gamma,
delta,
tau
)
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a QAP.
struct KeypairAssembly<E: Engine> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
at_inputs: Vec<Vec<(E::Fr, usize)>>,
bt_inputs: Vec<Vec<(E::Fr, usize)>>,
ct_inputs: Vec<Vec<(E::Fr, usize)>>,
at_aux: Vec<Vec<(E::Fr, usize)>>,
bt_aux: Vec<Vec<(E::Fr, usize)>>,
ct_aux: Vec<Vec<(E::Fr, usize)>>
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.at_aux.push(vec![]);
self.bt_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
}
}
}
eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints);
eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Create parameters for a circuit, given some toxic waste.
pub fn generate_parameters<E, C>(
circuit: C,
g1: E::G1,
g2: E::G2,
alpha: E::Fr,
beta: E::Fr,
gamma: E::Fr,
delta: E::Fr,
tau: E::Fr
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
at_inputs: vec![],
bt_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![]
};
// Allocate the "one" input variable
assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc,
|lc| lc,
);
}
// Create bases for blind evaluation of polynomials at tau
let powers_of_tau = vec![Scalar::<E>(E::Fr::zero()); assembly.num_constraints];
let mut powers_of_tau = EvaluationDomain::from_coeffs(powers_of_tau)?;
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, {
// H query
(powers_of_tau.as_ref().len() - 1)
// IC/L queries
+ assembly.num_inputs + assembly.num_aux
// A query
+ assembly.num_inputs + assembly.num_aux
// B query
+ assembly.num_inputs + assembly.num_aux
});
// Compute G2 window table
let mut g2_wnaf = Wnaf::new();
let g2_wnaf = g2_wnaf.base(g2, {
// B query
assembly.num_inputs + assembly.num_aux
});
let gamma_inverse = gamma.inverse().ok_or(SynthesisError::UnexpectedIdentity)?;
let delta_inverse = delta.inverse().ok_or(SynthesisError::UnexpectedIdentity)?;
let worker = Worker::new();
let mut h = vec![E::G1::zero(); powers_of_tau.as_ref().len() - 1];
{
// Compute powers of tau
{
let powers_of_tau = powers_of_tau.as_mut();
worker.scope(powers_of_tau.len(), |scope, chunk| {
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate()
{
scope.spawn(move || {
let mut current_tau_power = tau.pow(&[(i*chunk) as u64]);
for p in powers_of_tau {
p.0 = current_tau_power;
current_tau_power.mul_assign(&tau);
}
});
}
});
}
// coeff = t(x) / delta
let mut coeff = powers_of_tau.z(&tau);
coeff.mul_assign(&delta_inverse);
// Compute the H query with multiple threads
worker.scope(h.len(), |scope, chunk| {
for (h, p) in h.chunks_mut(chunk).zip(powers_of_tau.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move || {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (h, p) in h.iter_mut().zip(p.iter())
{
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&coeff);
// Exponentiate
*h = g1_wnaf.scalar(exp.into_repr());
}
// Batch normalize
E::G1::batch_normalization(h);
});
}
});
}
// Use inverse FFT to convert powers of tau to Lagrange coefficients
powers_of_tau.ifft(&worker);
let powers_of_tau = powers_of_tau.into_coeffs();
let mut a = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux];
let mut ic = vec![E::G1::zero(); assembly.num_inputs];
let mut l = vec![E::G1::zero(); assembly.num_aux];
fn eval<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
// Lagrange coefficients for tau
powers_of_tau: &[Scalar<E>],
// QAP polynomials
at: &[Vec<(E::Fr, usize)>],
bt: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated QAP polynomials
a: &mut [E::G1],
b_g1: &mut [E::G1],
b_g2: &mut [E::G2],
ext: &mut [E::G1],
// Inverse coefficient for ext elements
inv: &E::Fr,
// Trapdoors
alpha: &E::Fr,
beta: &E::Fr,
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(a.len(), at.len());
assert_eq!(a.len(), bt.len());
assert_eq!(a.len(), ct.len());
assert_eq!(a.len(), b_g1.len());
assert_eq!(a.len(), b_g2.len());
assert_eq!(a.len(), ext.len());
// Evaluate polynomials in multiple threads
worker.scope(a.len(), |scope, chunk| {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move || {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())
.zip(ext.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
fn eval_at_tau<E: Engine>(
powers_of_tau: &[Scalar<E>],
p: &[(E::Fr, usize)]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for &(ref coeff, index) in p {
let mut n = powers_of_tau[index].0;
n.mul_assign(coeff);
acc.add_assign(&n);
}
acc
}
// Evaluate QAP polynomials at tau
let mut at = eval_at_tau(powers_of_tau, at);
let mut bt = eval_at_tau(powers_of_tau, bt);
let ct = eval_at_tau(powers_of_tau, ct);
// Compute A query (in G1)
if !at.is_zero() {
*a = g1_wnaf.scalar(at.into_repr());
}
// Compute B query (in G1/G2)
if !bt.is_zero() {
let bt_repr = bt.into_repr();
*b_g1 = g1_wnaf.scalar(bt_repr);
*b_g2 = g2_wnaf.scalar(bt_repr);
}
at.mul_assign(&beta);
bt.mul_assign(&alpha);
let mut e = at;
e.add_assign(&bt);
e.add_assign(&ct);
e.mul_assign(inv);
*ext = g1_wnaf.scalar(e.into_repr());
}
// Batch normalize
E::G1::batch_normalization(a);
E::G1::batch_normalization(b_g1);
E::G2::batch_normalization(b_g2);
E::G1::batch_normalization(ext);
});
}
});
}
// Evaluate for inputs.
eval(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_inputs,
&assembly.bt_inputs,
&assembly.ct_inputs,
&mut a[0..assembly.num_inputs],
&mut b_g1[0..assembly.num_inputs],
&mut b_g2[0..assembly.num_inputs],
&mut ic,
&gamma_inverse,
&alpha,
&beta,
&worker
);
// Evaluate for auxillary variables.
eval(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux,
&assembly.bt_aux,
&assembly.ct_aux,
&mut a[assembly.num_inputs..],
&mut b_g1[assembly.num_inputs..],
&mut b_g2[assembly.num_inputs..],
&mut l,
&delta_inverse,
&alpha,
&beta,
&worker
);
// Don't allow any elements be unconstrained, so that
// the L query is always fully dense.
for e in l.iter() {
if e.is_zero() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
let g1 = g1.into_affine();
let g2 = g2.into_affine();
let vk = VerifyingKey::<E> {
alpha_g1: g1.mul(alpha).into_affine(),
beta_g1: g1.mul(beta).into_affine(),
beta_g2: g2.mul(beta).into_affine(),
gamma_g2: g2.mul(gamma).into_affine(),
delta_g1: g1.mul(delta).into_affine(),
delta_g2: g2.mul(delta).into_affine(),
ic: ic.into_iter().map(|e| e.into_affine()).collect()
};
Ok(Parameters {
vk: vk,
h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()),
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// Filter points at infinity away from A/B queries
a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
})
}

576
bellman/src/groth16/mod.rs Normal file
View File

@ -0,0 +1,576 @@
use pairing::{
Engine,
CurveAffine,
EncodedPoint
};
use ::{
SynthesisError
};
use multiexp::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[cfg(test)]
mod tests;
mod generator;
mod prover;
mod verifier;
pub use self::generator::*;
pub use self::prover::*;
pub use self::verifier::*;
#[derive(Clone)]
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a &&
self.b == other.b &&
self.c == other.c
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.a.into_compressed().as_ref())?;
writer.write_all(self.b.into_compressed().as_ref())?;
writer.write_all(self.c.into_compressed().as_ref())?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Compressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let a = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g2_repr.as_mut())?;
let b = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let c = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
Ok(Proof {
a: a,
b: b,
c: c
})
}
}
#[derive(Clone)]
pub struct VerifyingKey<E: Engine> {
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
pub alpha_g1: E::G1Affine,
// beta in g1 and g2 for verifying and for creating B/C elements
// of proof. Never the point at infinity.
pub beta_g1: E::G1Affine,
pub beta_g2: E::G2Affine,
// gamma in g2 for verifying. Never the point at infinity.
pub gamma_g2: E::G2Affine,
// delta in g1/g2 for verifying and proving, essentially the magic
// trapdoor that forces the prover to evaluate the C element of the
// proof with only components from the CRS. Never the point at
// infinity.
pub delta_g1: E::G1Affine,
pub delta_g2: E::G2Affine,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.alpha_g1 == other.alpha_g1 &&
self.beta_g1 == other.beta_g1 &&
self.beta_g2 == other.beta_g2 &&
self.gamma_g2 == other.gamma_g2 &&
self.delta_g1 == other.delta_g1 &&
self.delta_g2 == other.delta_g2 &&
self.ic == other.ic
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.into_uncompressed().as_ref())?;
writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?;
writer.write_all(self.delta_g1.into_uncompressed().as_ref())?;
writer.write_all(self.delta_g2.into_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let beta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let delta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let delta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
let mut ic = vec![];
for _ in 0..ic_len {
reader.read_exact(g1_repr.as_mut())?;
let g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
ic.push(g1);
}
Ok(VerifyingKey {
alpha_g1: alpha_g1,
beta_g1: beta_g1,
beta_g2: beta_g2,
gamma_g2: gamma_g2,
delta_g1: delta_g1,
delta_g2: delta_g2,
ic: ic
})
}
}
#[derive(Clone)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// m-2 inclusive. Never contains points at infinity.
pub h: Arc<Vec<E::G1Affine>>,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta
// for all auxillary inputs. Variables can never be unconstrained, so this
// never contains points at infinity.
pub l: Arc<Vec<E::G1Affine>>,
// QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains
// points at infinity: polynomials that evaluate to zero are omitted from
// the CRS and the prover can deterministically skip their evaluation.
pub a: Arc<Vec<E::G1Affine>>,
// QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in
// G1 and G2 for C/B queries, respectively. Never contains points at
// infinity for the same reason as the "A" polynomials.
pub b_g1: Arc<Vec<E::G1Affine>>,
pub b_g2: Arc<Vec<E::G2Affine>>
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk &&
self.h == other.h &&
self.l == other.l &&
self.a == other.a &&
self.b_g1 == other.b_g1 &&
self.b_g2 == other.b_g2
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
self.vk.write(&mut writer)?;
writer.write_u32::<BigEndian>(self.h.len() as u32)?;
for g in &self.h[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.l.len() as u32)?;
for g in &self.l[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.a.len() as u32)?;
for g in &self.a[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
for g in &self.b_g1[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
for g in &self.b_g2[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let vk = VerifyingKey::<E>::read(&mut reader)?;
let mut h = vec![];
let mut l = vec![];
let mut a = vec![];
let mut b_g1 = vec![];
let mut b_g2 = vec![];
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
h.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
l.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
a.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g1.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g2.push(read_g2(&mut reader)?);
}
}
Ok(Parameters {
vk: vk,
h: Arc::new(h),
l: Arc::new(l),
a: Arc::new(a),
b_g1: Arc::new(b_g1),
b_g2: Arc::new(b_g2)
})
}
}
pub struct PreparedVerifyingKey<E: Engine> {
/// Pairing result of alpha*beta
alpha_g1_beta_g2: E::Fqk,
/// -gamma in G2
neg_gamma_g2: <E::G2Affine as CurveAffine>::Prepared,
/// -delta in G2
neg_delta_g2: <E::G2Affine as CurveAffine>::Prepared,
/// Copy of IC from `VerifiyingKey`.
ic: Vec<E::G1Affine>
}
pub trait ParameterSource<E: Engine> {
type G1Builder: SourceBuilder<E::G1Affine>;
type G2Builder: SourceBuilder<E::G2Affine>;
fn get_vk(
&mut self,
num_ic: usize
) -> Result<VerifyingKey<E>, SynthesisError>;
fn get_h(
&mut self,
num_h: usize
) -> Result<Self::G1Builder, SynthesisError>;
fn get_l(
&mut self,
num_l: usize
) -> Result<Self::G1Builder, SynthesisError>;
fn get_a(
&mut self,
num_inputs: usize,
num_aux: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g1(
&mut self,
num_inputs: usize,
num_aux: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g2(
&mut self,
num_inputs: usize,
num_aux: usize
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
}
impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
fn get_vk(
&mut self,
_: usize
) -> Result<VerifyingKey<E>, SynthesisError>
{
Ok(self.vk.clone())
}
fn get_h(
&mut self,
_: usize
) -> Result<Self::G1Builder, SynthesisError>
{
Ok((self.h.clone(), 0))
}
fn get_l(
&mut self,
_: usize
) -> Result<Self::G1Builder, SynthesisError>
{
Ok((self.l.clone(), 0))
}
fn get_a(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
{
Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
}
fn get_b_g1(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
{
Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
}
fn get_b_g2(
&mut self,
num_inputs: usize,
_: usize
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>
{
Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
}
}
#[cfg(test)]
mod test_with_bls12_381 {
use super::*;
use {Circuit, SynthesisError, ConstraintSystem};
use rand::{Rand, thread_rng};
use pairing::{Field};
use pairing::bls12_381::{Bls12, Fr};
#[test]
fn serialization() {
struct MySillyCircuit<E: Engine> {
a: Option<E::Fr>,
b: Option<E::Fr>
}
impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(|| "c", || {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
})?;
cs.enforce(
|| "a*b=c",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
Ok(())
}
}
let rng = &mut thread_rng();
let params = generate_random_parameters::<Bls12, _, _>(
MySillyCircuit { a: None, b: None },
rng
).unwrap();
{
let mut v = vec![];
params.write(&mut v).unwrap();
assert_eq!(v.len(), 2136);
let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
let de_params = Parameters::read(&v[..], false).unwrap();
assert!(params == de_params);
}
let pvk = prepare_verifying_key::<Bls12>(&params.vk);
for _ in 0..100 {
let a = Fr::rand(rng);
let b = Fr::rand(rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_random_proof(
MySillyCircuit {
a: Some(a),
b: Some(b)
},
&params,
rng
).unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();
assert_eq!(v.len(), 192);
let de_proof = Proof::read(&v[..]).unwrap();
assert!(proof == de_proof);
assert!(verify_proof(&pvk, &proof, &[c]).unwrap());
assert!(!verify_proof(&pvk, &proof, &[a]).unwrap());
}
}
}

View File

@ -0,0 +1,334 @@
use rand::Rng;
use std::sync::Arc;
use futures::Future;
use pairing::{
Engine,
PrimeField,
Field,
CurveProjective,
CurveAffine
};
use super::{
ParameterSource,
Proof
};
use ::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use ::domain::{
EvaluationDomain,
Scalar
};
use ::multiexp::{
DensityTracker,
FullDensity,
multiexp
};
use ::multicore::{
Worker
};
fn eval<E: Engine>(
lc: &LinearCombination<E>,
mut input_density: Option<&mut DensityTracker>,
mut aux_density: Option<&mut DensityTracker>,
input_assignment: &[E::Fr],
aux_assignment: &[E::Fr]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for &(index, coeff) in lc.0.iter() {
let mut tmp;
match index {
Variable(Index::Input(i)) => {
tmp = input_assignment[i];
if let Some(ref mut v) = input_density {
v.inc(i);
}
},
Variable(Index::Aux(i)) => {
tmp = aux_assignment[i];
if let Some(ref mut v) = aux_density {
v.inc(i);
}
}
}
if coeff == E::Fr::one() {
acc.add_assign(&tmp);
} else {
tmp.mul_assign(&coeff);
acc.add_assign(&tmp);
}
}
acc
}
struct ProvingAssignment<E: Engine> {
// Density of queries
a_aux_density: DensityTracker,
b_input_density: DensityTracker,
b_aux_density: DensityTracker,
// Evaluations of A, B, C polynomials
a: Vec<Scalar<E>>,
b: Vec<Scalar<E>>,
c: Vec<Scalar<E>>,
// Assignments of variables
input_assignment: Vec<E::Fr>,
aux_assignment: Vec<E::Fr>
}
impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.aux_assignment.push(f()?);
self.a_aux_density.add_element();
self.b_aux_density.add_element();
Ok(Variable(Index::Aux(self.aux_assignment.len() - 1)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.input_assignment.push(f()?);
self.b_input_density.add_element();
Ok(Variable(Index::Input(self.input_assignment.len() - 1)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.a.push(Scalar(eval(
&a,
// Inputs have full density in the A query
// because there are constraints of the
// form x * 0 = 0 for each input.
None,
Some(&mut self.a_aux_density),
&self.input_assignment,
&self.aux_assignment
)));
self.b.push(Scalar(eval(
&b,
Some(&mut self.b_input_density),
Some(&mut self.b_aux_density),
&self.input_assignment,
&self.aux_assignment
)));
self.c.push(Scalar(eval(
&c,
// There is no C polynomial query,
// though there is an (beta)A + (alpha)B + C
// query for all aux variables.
// However, that query has full density.
None,
None,
&self.input_assignment,
&self.aux_assignment
)));
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
circuit: C,
params: P,
rng: &mut R
) -> Result<Proof<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
{
let r = rng.gen();
let s = rng.gen();
create_proof::<E, C, P>(circuit, params, r, s)
}
pub fn create_proof<E, C, P: ParameterSource<E>>(
circuit: C,
mut params: P,
r: E::Fr,
s: E::Fr
) -> Result<Proof<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut prover = ProvingAssignment {
a_aux_density: DensityTracker::new(),
b_input_density: DensityTracker::new(),
b_aux_density: DensityTracker::new(),
a: vec![],
b: vec![],
c: vec![],
input_assignment: vec![],
aux_assignment: vec![]
};
prover.alloc_input(|| "", || Ok(E::Fr::one()))?;
circuit.synthesize(&mut prover)?;
for i in 0..prover.input_assignment.len() {
prover.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc,
|lc| lc,
);
}
let worker = Worker::new();
let vk = params.get_vk(prover.input_assignment.len())?;
let h = {
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
a.ifft(&worker);
a.coset_fft(&worker);
b.ifft(&worker);
b.coset_fft(&worker);
c.ifft(&worker);
c.coset_fft(&worker);
a.mul_assign(&worker, &b);
drop(b);
a.sub_assign(&worker, &c);
drop(c);
a.divide_by_z_on_coset(&worker);
a.icoset_fft(&worker);
let mut a = a.into_coeffs();
let a_len = a.len() - 1;
a.truncate(a_len);
// TODO: parallelize if it's even helpful
let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::<Vec<_>>());
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
};
// TODO: parallelize if it's even helpful
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone());
let a_aux_density_total = prover.a_aux_density.get_total_density();
let (a_inputs_source, a_aux_source) = params.get_a(input_assignment.len(), a_aux_density_total)?;
let a_inputs = multiexp(&worker, a_inputs_source, FullDensity, input_assignment.clone());
let a_aux = multiexp(&worker, a_aux_source, Arc::new(prover.a_aux_density), aux_assignment.clone());
let b_input_density = Arc::new(prover.b_input_density);
let b_input_density_total = b_input_density.get_total_density();
let b_aux_density = Arc::new(prover.b_aux_density);
let b_aux_density_total = b_aux_density.get_total_density();
let (b_g1_inputs_source, b_g1_aux_source) = params.get_b_g1(b_input_density_total, b_aux_density_total)?;
let b_g1_inputs = multiexp(&worker, b_g1_inputs_source, b_input_density.clone(), input_assignment.clone());
let b_g1_aux = multiexp(&worker, b_g1_aux_source, b_aux_density.clone(), aux_assignment.clone());
let (b_g2_inputs_source, b_g2_aux_source) = params.get_b_g2(b_input_density_total, b_aux_density_total)?;
let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment);
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);
if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() {
// If this element is zero, someone is trying to perform a
// subversion-CRS attack.
return Err(SynthesisError::UnexpectedIdentity);
}
let mut g_a = vk.delta_g1.mul(r);
g_a.add_assign_mixed(&vk.alpha_g1);
let mut g_b = vk.delta_g2.mul(s);
g_b.add_assign_mixed(&vk.beta_g2);
let mut g_c;
{
let mut rs = r;
rs.mul_assign(&s);
g_c = vk.delta_g1.mul(rs);
g_c.add_assign(&vk.alpha_g1.mul(s));
g_c.add_assign(&vk.beta_g1.mul(r));
}
let mut a_answer = a_inputs.wait()?;
a_answer.add_assign(&a_aux.wait()?);
g_a.add_assign(&a_answer);
a_answer.mul_assign(s);
g_c.add_assign(&a_answer);
let mut b1_answer = b_g1_inputs.wait()?;
b1_answer.add_assign(&b_g1_aux.wait()?);
let mut b2_answer = b_g2_inputs.wait()?;
b2_answer.add_assign(&b_g2_aux.wait()?);
g_b.add_assign(&b2_answer);
b1_answer.mul_assign(r);
g_c.add_assign(&b1_answer);
g_c.add_assign(&h.wait()?);
g_c.add_assign(&l.wait()?);
Ok(Proof {
a: g_a.into_affine(),
b: g_b.into_affine(),
c: g_c.into_affine()
})
}

View File

@ -0,0 +1,451 @@
use pairing::{
Engine,
PrimeField,
PrimeFieldRepr,
Field,
SqrtField,
LegendreSymbol,
CurveProjective,
CurveAffine,
PrimeFieldDecodingError,
GroupDecodingError,
EncodedPoint
};
use std::cmp::Ordering;
use std::fmt;
use rand::{Rand, Rng};
use std::num::Wrapping;
const MODULUS_R: Wrapping<u32> = Wrapping(64513);
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Fr(Wrapping<u32>);
impl fmt::Display for Fr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", (self.0).0)
}
}
impl Rand for Fr {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fr(Wrapping(rng.gen()) % MODULUS_R)
}
}
impl Field for Fr {
fn zero() -> Self {
Fr(Wrapping(0))
}
fn one() -> Self {
Fr(Wrapping(1))
}
fn is_zero(&self) -> bool {
(self.0).0 == 0
}
fn square(&mut self) {
self.0 = (self.0 * self.0) % MODULUS_R;
}
fn double(&mut self) {
self.0 = (self.0 << 1) % MODULUS_R;
}
fn negate(&mut self) {
if !<Fr as Field>::is_zero(self) {
self.0 = MODULUS_R - self.0;
}
}
fn add_assign(&mut self, other: &Self) {
self.0 = (self.0 + other.0) % MODULUS_R;
}
fn sub_assign(&mut self, other: &Self) {
self.0 = ((MODULUS_R + self.0) - other.0) % MODULUS_R;
}
fn mul_assign(&mut self, other: &Self) {
self.0 = (self.0 * other.0) % MODULUS_R;
}
fn inverse(&self) -> Option<Self> {
if <Fr as Field>::is_zero(self) {
None
} else {
Some(self.pow(&[(MODULUS_R.0 as u64) - 2]))
}
}
fn frobenius_map(&mut self, _: usize) {
// identity
}
}
impl SqrtField for Fr {
fn legendre(&self) -> LegendreSymbol {
// s = self^((r - 1) // 2)
let s = self.pow([32256]);
if s == <Fr as Field>::zero() { LegendreSymbol::Zero }
else if s == <Fr as Field>::one() { LegendreSymbol::QuadraticResidue }
else { LegendreSymbol::QuadraticNonResidue }
}
fn sqrt(&self) -> Option<Self> {
// Tonelli-Shank's algorithm for q mod 16 = 1
// https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5)
match self.legendre() {
LegendreSymbol::Zero => Some(*self),
LegendreSymbol::QuadraticNonResidue => None,
LegendreSymbol::QuadraticResidue => {
let mut c = Fr::root_of_unity();
// r = self^((t + 1) // 2)
let mut r = self.pow([32]);
// t = self^t
let mut t = self.pow([63]);
let mut m = Fr::S;
while t != <Fr as Field>::one() {
let mut i = 1;
{
let mut t2i = t;
t2i.square();
loop {
if t2i == <Fr as Field>::one() {
break;
}
t2i.square();
i += 1;
}
}
for _ in 0..(m - i - 1) {
c.square();
}
<Fr as Field>::mul_assign(&mut r, &c);
c.square();
<Fr as Field>::mul_assign(&mut t, &c);
m = i;
}
Some(r)
}
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct FrRepr([u64; 1]);
impl Ord for FrRepr {
fn cmp(&self, other: &FrRepr) -> Ordering {
(self.0)[0].cmp(&(other.0)[0])
}
}
impl PartialOrd for FrRepr {
fn partial_cmp(&self, other: &FrRepr) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Rand for FrRepr {
fn rand<R: Rng>(rng: &mut R) -> Self {
FrRepr([rng.gen()])
}
}
impl fmt::Display for FrRepr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", (self.0)[0])
}
}
impl From<u64> for FrRepr {
fn from(v: u64) -> FrRepr {
FrRepr([v])
}
}
impl From<Fr> for FrRepr {
fn from(v: Fr) -> FrRepr {
FrRepr([(v.0).0 as u64])
}
}
impl AsMut<[u64]> for FrRepr {
fn as_mut(&mut self) -> &mut [u64] {
&mut self.0[..]
}
}
impl AsRef<[u64]> for FrRepr {
fn as_ref(&self) -> &[u64] {
&self.0[..]
}
}
impl Default for FrRepr {
fn default() -> FrRepr {
FrRepr::from(0u64)
}
}
impl PrimeFieldRepr for FrRepr {
fn sub_noborrow(&mut self, other: &Self) {
self.0[0] = self.0[0].wrapping_sub(other.0[0]);
}
fn add_nocarry(&mut self, other: &Self) {
self.0[0] = self.0[0].wrapping_add(other.0[0]);
}
fn num_bits(&self) -> u32 {
64 - self.0[0].leading_zeros()
}
fn is_zero(&self) -> bool {
self.0[0] == 0
}
fn is_odd(&self) -> bool {
!self.is_even()
}
fn is_even(&self) -> bool {
self.0[0] % 2 == 0
}
fn div2(&mut self) {
self.shr(1)
}
fn shr(&mut self, amt: u32) {
self.0[0] >>= amt;
}
fn mul2(&mut self) {
self.shl(1)
}
fn shl(&mut self, amt: u32) {
self.0[0] <<= amt;
}
}
impl PrimeField for Fr {
type Repr = FrRepr;
const NUM_BITS: u32 = 16;
const CAPACITY: u32 = 15;
const S: u32 = 10;
fn from_repr(repr: FrRepr) -> Result<Self, PrimeFieldDecodingError> {
if repr.0[0] >= (MODULUS_R.0 as u64) {
Err(PrimeFieldDecodingError::NotInField(format!("{}", repr)))
} else {
Ok(Fr(Wrapping(repr.0[0] as u32)))
}
}
fn into_repr(&self) -> FrRepr {
FrRepr::from(*self)
}
fn char() -> FrRepr {
Fr(MODULUS_R).into()
}
fn multiplicative_generator() -> Fr {
Fr(Wrapping(5))
}
fn root_of_unity() -> Fr {
Fr(Wrapping(57751))
}
}
#[derive(Clone)]
pub struct DummyEngine;
impl Engine for DummyEngine {
type Fr = Fr;
type G1 = Fr;
type G1Affine = Fr;
type G2 = Fr;
type G2Affine = Fr;
type Fq = Fr;
type Fqe = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Fqk = Fr;
fn miller_loop<'a, I>(i: I) -> Self::Fqk
where I: IntoIterator<Item=&'a (
&'a <Self::G1Affine as CurveAffine>::Prepared,
&'a <Self::G2Affine as CurveAffine>::Prepared
)>
{
let mut acc = <Fr as Field>::zero();
for &(a, b) in i {
let mut tmp = *a;
<Fr as Field>::mul_assign(&mut tmp, b);
<Fr as Field>::add_assign(&mut acc, &tmp);
}
acc
}
/// Perform final exponentiation of the result of a miller loop.
fn final_exponentiation(this: &Self::Fqk) -> Option<Self::Fqk>
{
Some(*this)
}
}
impl CurveProjective for Fr {
type Affine = Fr;
type Base = Fr;
type Scalar = Fr;
type Engine = DummyEngine;
fn zero() -> Self {
<Fr as Field>::zero()
}
fn one() -> Self {
<Fr as Field>::one()
}
fn is_zero(&self) -> bool {
<Fr as Field>::is_zero(self)
}
fn batch_normalization(_: &mut [Self]) {
}
fn is_normalized(&self) -> bool {
true
}
fn double(&mut self) {
<Fr as Field>::double(self);
}
fn add_assign(&mut self, other: &Self) {
<Fr as Field>::add_assign(self, other);
}
fn add_assign_mixed(&mut self, other: &Self) {
<Fr as Field>::add_assign(self, other);
}
fn negate(&mut self) {
<Fr as Field>::negate(self);
}
fn mul_assign<S: Into<<Self::Scalar as PrimeField>::Repr>>(&mut self, other: S)
{
let tmp = Fr::from_repr(other.into()).unwrap();
<Fr as Field>::mul_assign(self, &tmp);
}
fn into_affine(&self) -> Fr {
*self
}
fn recommended_wnaf_for_scalar(_: <Self::Scalar as PrimeField>::Repr) -> usize {
3
}
fn recommended_wnaf_for_num_scalars(_: usize) -> usize {
3
}
}
#[derive(Copy, Clone)]
pub struct FakePoint;
impl AsMut<[u8]> for FakePoint {
fn as_mut(&mut self) -> &mut [u8] {
unimplemented!()
}
}
impl AsRef<[u8]> for FakePoint {
fn as_ref(&self) -> &[u8] {
unimplemented!()
}
}
impl EncodedPoint for FakePoint {
type Affine = Fr;
fn empty() -> Self {
unimplemented!()
}
fn size() -> usize {
unimplemented!()
}
fn into_affine(&self) -> Result<Self::Affine, GroupDecodingError> {
unimplemented!()
}
fn into_affine_unchecked(&self) -> Result<Self::Affine, GroupDecodingError> {
unimplemented!()
}
fn from_affine(_: Self::Affine) -> Self {
unimplemented!()
}
}
impl CurveAffine for Fr {
type Pair = Fr;
type PairingResult = Fr;
type Compressed = FakePoint;
type Uncompressed = FakePoint;
type Prepared = Fr;
type Projective = Fr;
type Base = Fr;
type Scalar = Fr;
type Engine = DummyEngine;
fn zero() -> Self {
<Fr as Field>::zero()
}
fn one() -> Self {
<Fr as Field>::one()
}
fn is_zero(&self) -> bool {
<Fr as Field>::is_zero(self)
}
fn negate(&mut self) {
<Fr as Field>::negate(self);
}
fn mul<S: Into<<Self::Scalar as PrimeField>::Repr>>(&self, other: S) -> Self::Projective
{
let mut res = *self;
let tmp = Fr::from_repr(other.into()).unwrap();
<Fr as Field>::mul_assign(&mut res, &tmp);
res
}
fn prepare(&self) -> Self::Prepared {
*self
}
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
self.mul(*other)
}
fn into_projective(&self) -> Self::Projective {
*self
}
}

View File

@ -0,0 +1,400 @@
use pairing::{
Engine,
Field,
PrimeField
};
mod dummy_engine;
use self::dummy_engine::*;
use std::marker::PhantomData;
use ::{
Circuit,
ConstraintSystem,
SynthesisError
};
use super::{
generate_parameters,
prepare_verifying_key,
create_proof,
verify_proof
};
struct XORDemo<E: Engine> {
a: Option<bool>,
b: Option<bool>,
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for XORDemo<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc
);
let c_var = cs.alloc_input(|| "c", || {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var
);
Ok(())
}
}
#[test]
fn test_xordemo() {
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from_str("48577").unwrap();
let beta = Fr::from_str("22580").unwrap();
let gamma = Fr::from_str("53332").unwrap();
let delta = Fr::from_str("5481").unwrap();
let tau = Fr::from_str("3673").unwrap();
let params = {
let c = XORDemo::<DummyEngine> {
a: None,
b: None,
_marker: PhantomData
};
generate_parameters(
c,
g1,
g2,
alpha,
beta,
gamma,
delta,
tau
).unwrap()
};
// This will synthesize the constraint system:
//
// public inputs: a_0 = 1, a_1 = c
// aux inputs: a_2 = a, a_3 = b
// constraints:
// (a_0 - a_2) * (a_2) = 0
// (a_0 - a_3) * (a_3) = 0
// (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1)
// (a_0) * 0 = 0
// (a_1) * 0 = 0
// The evaluation domain is 8. The H query should
// have 7 elements (it's a quotient polynomial)
assert_eq!(7, params.h.len());
let mut root_of_unity = Fr::root_of_unity();
// We expect this to be a 2^10 root of unity
assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 10]));
// Let's turn it into a 2^3 root of unity.
root_of_unity = root_of_unity.pow(&[1 << 7]);
assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 3]));
assert_eq!(Fr::from_str("20201").unwrap(), root_of_unity);
// Let's compute all the points in our evaluation domain.
let mut points = Vec::with_capacity(8);
for i in 0..8 {
points.push(root_of_unity.pow(&[i]));
}
// Let's compute t(tau) = (tau - p_0)(tau - p_1)...
// = tau^8 - 1
let mut t_at_tau = tau.pow(&[8]);
t_at_tau.sub_assign(&Fr::one());
{
let mut tmp = Fr::one();
for p in &points {
let mut term = tau;
term.sub_assign(p);
tmp.mul_assign(&term);
}
assert_eq!(tmp, t_at_tau);
}
// We expect our H query to be 7 elements of the form...
// {tau^i t(tau) / delta}
let delta_inverse = delta.inverse().unwrap();
let gamma_inverse = gamma.inverse().unwrap();
{
let mut coeff = delta_inverse;
coeff.mul_assign(&t_at_tau);
let mut cur = Fr::one();
for h in params.h.iter() {
let mut tmp = cur;
tmp.mul_assign(&coeff);
assert_eq!(*h, tmp);
cur.mul_assign(&tau);
}
}
// The density of the IC query is 2 (2 inputs)
assert_eq!(2, params.vk.ic.len());
// The density of the L query is 2 (2 aux variables)
assert_eq!(2, params.l.len());
// The density of the A query is 4 (each variable is in at least one A term)
assert_eq!(4, params.a.len());
// The density of the B query is 2 (two variables are in at least one B term)
assert_eq!(2, params.b_g1.len());
assert_eq!(2, params.b_g2.len());
/*
Lagrange interpolation polynomials in our evaluation domain:
,-------------------------------. ,-------------------------------. ,-------------------------------.
| A TERM | | B TERM | | C TERM |
`-------------------------------. `-------------------------------' `-------------------------------'
| a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 |
| 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 |
| 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 |
| 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
| 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
`-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------'
Example for u_0:
sage: r = 64513
sage: Fr = GF(r)
sage: omega = (Fr(5)^63)^(2^7)
sage: tau = Fr(3673)
sage: R.<x> = PolynomialRing(Fr, 'x')
sage: def eval(tau, c0, c1, c2, c3, c4):
....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)])
....: return p.substitute(tau)
sage: eval(tau, 1, 1, 0, 1, 0)
59158
*/
let u_i = [59158, 48317, 21767, 10402].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
let v_i = [0, 0, 60619, 30791].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
let w_i = [0, 23320, 41193, 41193].iter().map(|e| {
Fr::from_str(&format!("{}", e)).unwrap()
}).collect::<Vec<Fr>>();
for (u, a) in u_i.iter()
.zip(&params.a[..])
{
assert_eq!(u, a);
}
for (v, b) in v_i.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g1[..])
{
assert_eq!(v, b);
}
for (v, b) in v_i.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g2[..])
{
assert_eq!(v, b);
}
for i in 0..4 {
let mut tmp1 = beta;
tmp1.mul_assign(&u_i[i]);
let mut tmp2 = alpha;
tmp2.mul_assign(&v_i[i]);
tmp1.add_assign(&tmp2);
tmp1.add_assign(&w_i[i]);
if i < 2 {
// Check the correctness of the IC query elements
tmp1.mul_assign(&gamma_inverse);
assert_eq!(tmp1, params.vk.ic[i]);
} else {
// Check the correctness of the L query elements
tmp1.mul_assign(&delta_inverse);
assert_eq!(tmp1, params.l[i - 2]);
}
}
// Check consistency of the other elements
assert_eq!(alpha, params.vk.alpha_g1);
assert_eq!(beta, params.vk.beta_g1);
assert_eq!(beta, params.vk.beta_g2);
assert_eq!(gamma, params.vk.gamma_g2);
assert_eq!(delta, params.vk.delta_g1);
assert_eq!(delta, params.vk.delta_g2);
let pvk = prepare_verifying_key(&params.vk);
let r = Fr::from_str("27134").unwrap();
let s = Fr::from_str("17146").unwrap();
let proof = {
let c = XORDemo {
a: Some(true),
b: Some(false),
_marker: PhantomData
};
create_proof(
c,
&params,
r,
s
).unwrap()
};
// A(x) =
// a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) +
// a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) +
// a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) +
// a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) +
{
// proof A = alpha + A(tau) + delta * r
let mut expected_a = delta;
expected_a.mul_assign(&r);
expected_a.add_assign(&alpha);
expected_a.add_assign(&u_i[0]); // a_0 = 1
expected_a.add_assign(&u_i[1]); // a_1 = 1
expected_a.add_assign(&u_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.a, expected_a);
}
// B(x) =
// a_0 * (0) +
// a_1 * (0) +
// a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) +
// a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385)
{
// proof B = beta + B(tau) + delta * s
let mut expected_b = delta;
expected_b.mul_assign(&s);
expected_b.add_assign(&beta);
expected_b.add_assign(&v_i[0]); // a_0 = 1
expected_b.add_assign(&v_i[1]); // a_1 = 1
expected_b.add_assign(&v_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.b, expected_b);
}
// C(x) =
// a_0 * (0) +
// a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) +
// a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) +
// a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449)
//
// If A * B = C at each point in the domain, then the following polynomial...
// P(x) = A(x) * B(x) - C(x)
// = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481
//
// ... should be divisible by t(x), producing the quotient polynomial:
// h(x) = P(x) / t(x)
// = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032
{
let mut expected_c = Fr::zero();
// A * s
let mut tmp = proof.a;
tmp.mul_assign(&s);
expected_c.add_assign(&tmp);
// B * r
let mut tmp = proof.b;
tmp.mul_assign(&r);
expected_c.add_assign(&tmp);
// delta * r * s
let mut tmp = delta;
tmp.mul_assign(&r);
tmp.mul_assign(&s);
expected_c.sub_assign(&tmp);
// L query answer
// a_2 = 1, a_3 = 0
expected_c.add_assign(&params.l[0]);
// H query answer
for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() {
let coeff = Fr::from_str(&format!("{}", coeff)).unwrap();
let mut tmp = params.h[i];
tmp.mul_assign(&coeff);
expected_c.add_assign(&tmp);
}
assert_eq!(expected_c, proof.c);
}
assert!(verify_proof(
&pvk,
&proof,
&[Fr::one()]
).unwrap());
}

View File

@ -0,0 +1,66 @@
use pairing::{
Engine,
CurveProjective,
CurveAffine,
PrimeField
};
use super::{
Proof,
VerifyingKey,
PreparedVerifyingKey
};
use ::{
SynthesisError
};
pub fn prepare_verifying_key<E: Engine>(
vk: &VerifyingKey<E>
) -> PreparedVerifyingKey<E>
{
let mut gamma = vk.gamma_g2;
gamma.negate();
let mut delta = vk.delta_g2;
delta.negate();
PreparedVerifyingKey {
alpha_g1_beta_g2: E::pairing(vk.alpha_g1, vk.beta_g2),
neg_gamma_g2: gamma.prepare(),
neg_delta_g2: delta.prepare(),
ic: vk.ic.clone()
}
}
pub fn verify_proof<'a, E: Engine>(
pvk: &'a PreparedVerifyingKey<E>,
proof: &Proof<E>,
public_inputs: &[E::Fr]
) -> Result<bool, SynthesisError>
{
if (public_inputs.len() + 1) != pvk.ic.len() {
return Err(SynthesisError::MalformedVerifyingKey);
}
let mut acc = pvk.ic[0].into_projective();
for (i, b) in public_inputs.iter().zip(pvk.ic.iter().skip(1)) {
acc.add_assign(&b.mul(i.into_repr()));
}
// The original verification equation is:
// A * B = alpha * beta + inputs * gamma + C * delta
// ... however, we rearrange it so that it is:
// A * B - inputs * gamma - C * delta = alpha * beta
// or equivalently:
// A * B + inputs * (-gamma) + C * (-delta) = alpha * beta
// which allows us to do a single final exponentiation.
Ok(E::final_exponentiation(
&E::miller_loop([
(&proof.a.prepare(), &proof.b.prepare()),
(&acc.into_affine().prepare(), &pvk.neg_gamma_g2),
(&proof.c.prepare(), &pvk.neg_delta_g2)
].into_iter())
).unwrap() == pvk.alpha_g1_beta_g2)
}

424
bellman/src/lib.rs Normal file
View File

@ -0,0 +1,424 @@
extern crate pairing;
extern crate rand;
extern crate num_cpus;
extern crate futures;
extern crate futures_cpupool;
extern crate bit_vec;
extern crate crossbeam;
extern crate byteorder;
pub mod multicore;
mod multiexp;
pub mod domain;
pub mod groth16;
use pairing::{Engine, Field};
use std::ops::{Add, Sub};
use std::fmt;
use std::error::Error;
use std::io;
use std::marker::PhantomData;
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
/// circuit that can be synthesized. The `synthesize` method is called during
/// CRS generation and during proving.
pub trait Circuit<E: Engine> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>;
}
/// Represents a variable in our constraint system.
#[derive(Copy, Clone, Debug)]
pub struct Variable(Index);
impl Variable {
/// This constructs a variable with an arbitrary index.
/// Circuit implementations are not recommended to use this.
pub fn new_unchecked(idx: Index) -> Variable {
Variable(idx)
}
/// This returns the index underlying the variable.
/// Circuit implementations are not recommended to use this.
pub fn get_unchecked(&self) -> Index {
self.0
}
}
/// Represents the index of either an input variable or
/// auxillary variable.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Index {
Input(usize),
Aux(usize)
}
/// This represents a linear combination of some variables, with coefficients
/// in the scalar field of a pairing-friendly elliptic curve group.
#[derive(Clone)]
pub struct LinearCombination<E: Engine>(Vec<(Variable, E::Fr)>);
impl<E: Engine> AsRef<[(Variable, E::Fr)]> for LinearCombination<E> {
fn as_ref(&self) -> &[(Variable, E::Fr)] {
&self.0
}
}
impl<E: Engine> LinearCombination<E> {
pub fn zero() -> LinearCombination<E> {
LinearCombination(vec![])
}
}
impl<E: Engine> Add<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
self.0.push((var, coeff));
self
}
}
impl<E: Engine> Sub<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, (mut coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
coeff.negate();
self + (coeff, var)
}
}
impl<E: Engine> Add<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(self, other: Variable) -> LinearCombination<E> {
self + (E::Fr::one(), other)
}
}
impl<E: Engine> Sub<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, other: Variable) -> LinearCombination<E> {
self - (E::Fr::one(), other)
}
}
impl<'a, E: Engine> Add<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self + (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self - (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Add<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self + (tmp, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self - (tmp, s.0);
}
self
}
}
/// This is an error that could occur during circuit synthesis contexts,
/// such as CRS generation, proving or verification.
#[derive(Debug)]
pub enum SynthesisError {
/// During synthesis, we lacked knowledge of a variable assignment.
AssignmentMissing,
/// During synthesis, we divided by zero.
DivisionByZero,
/// During synthesis, we constructed an unsatisfiable constraint system.
Unsatisfiable,
/// During synthesis, our polynomials ended up being too high of degree
PolynomialDegreeTooLarge,
/// During proof generation, we encountered an identity in the CRS
UnexpectedIdentity,
/// During proof generation, we encountered an I/O error with the CRS
IoError(io::Error),
/// During verification, our verifying key was malformed.
MalformedVerifyingKey,
/// During CRS generation, we observed an unconstrained auxillary variable
UnconstrainedVariable
}
impl From<io::Error> for SynthesisError {
fn from(e: io::Error) -> SynthesisError {
SynthesisError::IoError(e)
}
}
impl Error for SynthesisError {
fn description(&self) -> &str {
match *self {
SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed",
SynthesisError::DivisionByZero => "division by zero",
SynthesisError::Unsatisfiable => "unsatisfiable constraint system",
SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large",
SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS",
SynthesisError::IoError(_) => "encountered an I/O error",
SynthesisError::MalformedVerifyingKey => "malformed verifying key",
SynthesisError::UnconstrainedVariable => "auxillary variable was unconstrained"
}
}
}
impl fmt::Display for SynthesisError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
if let &SynthesisError::IoError(ref e) = self {
write!(f, "I/O error: ")?;
e.fmt(f)
} else {
write!(f, "{}", self.description())
}
}
}
/// Represents a constraint system which can have new variables
/// allocated and constrains between them formed.
pub trait ConstraintSystem<E: Engine>: Sized {
/// Represents the type of the "root" of this constraint system
/// so that nested namespaces can minimize indirection.
type Root: ConstraintSystem<E>;
/// Return the "one" input variable
fn one() -> Variable {
Variable::new_unchecked(Index::Input(0))
}
/// Allocate a private variable in the constraint system. The provided function is used to
/// determine the assignment of the variable. The given `annotation` function is invoked
/// in testing contexts in order to derive a unique name for this variable in the current
/// namespace.
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Allocate a public variable in the constraint system. The provided function is used to
/// determine the assignment of the variable.
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts
/// in order to derive a unique name for the constraint in the current namespace.
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>;
/// Create a new (sub)namespace and enter into it. Not intended
/// for downstream use; use `namespace` instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR;
/// Exit out of the existing namespace. Not intended for
/// downstream use; use `namespace` instead.
fn pop_namespace(&mut self);
/// Gets the "root" constraint system, bypassing the namespacing.
/// Not intended for downstream use; use `namespace` instead.
fn get_root(&mut self) -> &mut Self::Root;
/// Begin a namespace for this constraint system.
fn namespace<'a, NR, N>(
&'a mut self,
name_fn: N
) -> Namespace<'a, E, Self::Root>
where NR: Into<String>, N: FnOnce() -> NR
{
self.get_root().push_namespace(name_fn);
Namespace(self.get_root(), PhantomData)
}
}
/// This is a "namespaced" constraint system which borrows a constraint system (pushing
/// a namespace context) and, when dropped, pops out of the namespace context.
pub struct Namespace<'a, E: Engine, CS: ConstraintSystem<E> + 'a>(&'a mut CS, PhantomData<E>);
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for Namespace<'cs, E, CS> {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
self.0.enforce(annotation, a, b, c)
}
// Downstream users who use `namespace` will never interact with these
// functions and they will never be invoked because the namespace is
// never a root constraint system.
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
panic!("only the root's push_namespace should be called");
}
fn pop_namespace(&mut self)
{
panic!("only the root's pop_namespace should be called");
}
fn get_root(&mut self) -> &mut Self::Root
{
self.0.get_root()
}
}
impl<'a, E: Engine, CS: ConstraintSystem<E>> Drop for Namespace<'a, E, CS> {
fn drop(&mut self) {
self.get_root().pop_namespace()
}
}
/// Convenience implementation of ConstraintSystem<E> for mutable references to
/// constraint systems.
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for &'cs mut CS {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
(**self).enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
{
(**self).push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
(**self).pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
(**self).get_root()
}
}

106
bellman/src/multicore.rs Normal file
View File

@ -0,0 +1,106 @@
//! This is an interface for dealing with the kinds of
//! parallel computations involved in bellman. It's
//! currently just a thin wrapper around CpuPool and
//! crossbeam but may be extended in the future to
//! allow for various parallelism strategies.
use num_cpus;
use futures::{Future, IntoFuture, Poll};
use futures_cpupool::{CpuPool, CpuFuture};
use crossbeam::{self, Scope};
#[derive(Clone)]
pub struct Worker {
cpus: usize,
pool: CpuPool
}
impl Worker {
// We don't expose this outside the library so that
// all `Worker` instances have the same number of
// CPUs configured.
pub(crate) fn new_with_cpus(cpus: usize) -> Worker {
Worker {
cpus: cpus,
pool: CpuPool::new(cpus)
}
}
pub fn new() -> Worker {
Self::new_with_cpus(num_cpus::get())
}
pub fn log_num_cpus(&self) -> u32 {
log2_floor(self.cpus)
}
pub fn compute<F, R>(
&self, f: F
) -> WorkerFuture<R::Item, R::Error>
where F: FnOnce() -> R + Send + 'static,
R: IntoFuture + 'static,
R::Future: Send + 'static,
R::Item: Send + 'static,
R::Error: Send + 'static
{
WorkerFuture {
future: self.pool.spawn_fn(f)
}
}
pub fn scope<'a, F, R>(
&self,
elements: usize,
f: F
) -> R
where F: FnOnce(&Scope<'a>, usize) -> R
{
let chunk_size = if elements < self.cpus {
1
} else {
elements / self.cpus
};
crossbeam::scope(|scope| {
f(scope, chunk_size)
})
}
}
pub struct WorkerFuture<T, E> {
future: CpuFuture<T, E>
}
impl<T: Send + 'static, E: Send + 'static> Future for WorkerFuture<T, E> {
type Item = T;
type Error = E;
fn poll(&mut self) -> Poll<Self::Item, Self::Error>
{
self.future.poll()
}
}
fn log2_floor(num: usize) -> u32 {
assert!(num > 0);
let mut pow = 0;
while (1 << (pow+1)) <= num {
pow += 1;
}
pow
}
#[test]
fn test_log2_floor() {
assert_eq!(log2_floor(1), 0);
assert_eq!(log2_floor(2), 1);
assert_eq!(log2_floor(3), 1);
assert_eq!(log2_floor(4), 2);
assert_eq!(log2_floor(5), 2);
assert_eq!(log2_floor(6), 2);
assert_eq!(log2_floor(7), 2);
assert_eq!(log2_floor(8), 3);
}

303
bellman/src/multiexp.rs Normal file
View File

@ -0,0 +1,303 @@
use pairing::{
CurveAffine,
CurveProjective,
Engine,
PrimeField,
Field,
PrimeFieldRepr
};
use std::sync::Arc;
use std::io;
use bit_vec::{self, BitVec};
use std::iter;
use futures::{Future};
use super::multicore::Worker;
use super::SynthesisError;
/// An object that builds a source of bases.
pub trait SourceBuilder<G: CurveAffine>: Send + Sync + 'static + Clone {
type Source: Source<G>;
fn new(self) -> Self::Source;
}
/// A source of bases, like an iterator.
pub trait Source<G: CurveAffine> {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
}
impl<G: CurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
type Source = (Arc<Vec<G>>, usize);
fn new(self) -> (Arc<Vec<G>>, usize) {
(self.0.clone(), self.1)
}
}
impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into());
}
if self.0[self.1].is_zero() {
return Err(SynthesisError::UnexpectedIdentity)
}
to.add_assign_mixed(&self.0[self.1]);
self.1 += 1;
Ok(())
}
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases from source").into());
}
self.1 += amt;
Ok(())
}
}
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item=bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
}
#[derive(Clone)]
pub struct FullDensity;
impl AsRef<FullDensity> for FullDensity {
fn as_ref(&self) -> &FullDensity {
self
}
}
impl<'a> QueryDensity for &'a FullDensity {
type Iter = iter::Repeat<bool>;
fn iter(self) -> Self::Iter {
iter::repeat(true)
}
fn get_query_size(self) -> Option<usize> {
None
}
}
pub struct DensityTracker {
bv: BitVec,
total_density: usize
}
impl<'a> QueryDensity for &'a DensityTracker {
type Iter = bit_vec::Iter<'a>;
fn iter(self) -> Self::Iter {
self.bv.iter()
}
fn get_query_size(self) -> Option<usize> {
Some(self.bv.len())
}
}
impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker {
bv: BitVec::new(),
total_density: 0
}
}
pub fn add_element(&mut self) {
self.bv.push(false);
}
pub fn inc(&mut self, idx: usize) {
if !self.bv.get(idx).unwrap() {
self.bv.set(idx, true);
self.total_density += 1;
}
}
pub fn get_total_density(&self) -> usize {
self.total_density
}
}
fn multiexp_inner<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<<G::Engine as Engine>::Fr as PrimeField>::Repr>>,
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Box<Future<Item=<G as CurveAffine>::Projective, Error=SynthesisError>>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
// Perform this region of the multiexp
let this = {
let bases = bases.clone();
let exponents = exponents.clone();
let density_map = density_map.clone();
pool.compute(move || {
// Accumulate the result
let mut acc = G::Projective::zero();
// Build a source for the bases
let mut bases = bases.new();
// Create space for the buckets
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
let zero = <G::Engine as Engine>::Fr::zero().into_repr();
let one = <G::Engine as Engine>::Fr::one().into_repr();
// Sort the bases into buckets
for (&exp, density) in exponents.iter().zip(density_map.as_ref().iter()) {
if density {
if exp == zero {
bases.skip(1)?;
} else if exp == one {
if handle_trivial {
bases.add_assign_mixed(&mut acc)?;
} else {
bases.skip(1)?;
}
} else {
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % (1 << c);
if exp != 0 {
bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?;
} else {
bases.skip(1)?;
}
}
}
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
})
};
skip += c;
if skip >= <G::Engine as Engine>::Fr::NUM_BITS {
// There isn't another region.
Box::new(this)
} else {
// There's another region more significant. Calculate and join it with
// this region recursively.
Box::new(
this.join(multiexp_inner(pool, bases, density_map, exponents, skip, c, false))
.map(move |(this, mut higher)| {
for _ in 0..c {
higher.double();
}
higher.add_assign(&this);
higher
})
)
}
}
/// Perform multi-exponentiation. The caller is responsible for ensuring the
/// query size is the same as the number of exponents.
pub fn multiexp<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<<G::Engine as Engine>::Fr as PrimeField>::Repr>>
) -> Box<Future<Item=<G as CurveAffine>::Projective, Error=SynthesisError>>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
if let Some(query_size) = density_map.as_ref().get_query_size() {
// If the density map has a known query size, it should not be
// inconsistent with the number of exponents.
assert!(query_size == exponents.len());
}
multiexp_inner(pool, bases, density_map, exponents, 0, c, true)
}
#[test]
fn test_with_bls12() {
fn naive_multiexp<G: CurveAffine>(
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>
) -> G::Projective
{
assert_eq!(bases.len(), exponents.len());
let mut acc = G::Projective::zero();
for (base, exp) in bases.iter().zip(exponents.iter()) {
acc.add_assign(&base.mul(*exp));
}
acc
}
use rand::{self, Rand};
use pairing::bls12_381::Bls12;
const SAMPLES: usize = 1 << 14;
let rng = &mut rand::thread_rng();
let v = Arc::new((0..SAMPLES).map(|_| <Bls12 as Engine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>());
let g = Arc::new((0..SAMPLES).map(|_| <Bls12 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>());
let naive = naive_multiexp(g.clone(), v.clone());
let pool = Worker::new();
let fast = multiexp(
&pool,
(g, 0),
FullDensity,
v
).wait().unwrap();
assert_eq!(naive, fast);
}

251
bellman/tests/mimc.rs Normal file
View File

@ -0,0 +1,251 @@
extern crate bellman;
extern crate pairing;
extern crate rand;
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using pairing-friendly curves
use pairing::{
Engine,
Field
};
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use pairing::bls12_381::{
Bls12
};
// We'll use these interfaces to construct our circuit.
use bellman::{
Circuit,
ConstraintSystem,
SynthesisError
};
// We're going to use the Groth16 proving system.
use bellman::groth16::{
Proof,
generate_random_parameters,
prepare_verifying_key,
create_random_proof,
verify_proof,
};
const MIMC_ROUNDS: usize = 322;
/// This is an implementation of MiMC, specifically a
/// variant named `LongsightF322p3` for BLS12-381.
/// See http://eprint.iacr.org/2016/492 for more
/// information about this construction.
///
/// ```
/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) {
/// for i from 0 up to 321 {
/// xL, xR := xR + (xL + Ci)^3, xL
/// }
/// return xL
/// }
/// ```
fn mimc<E: Engine>(
mut xl: E::Fr,
mut xr: E::Fr,
constants: &[E::Fr]
) -> E::Fr
{
assert_eq!(constants.len(), MIMC_ROUNDS);
for i in 0..MIMC_ROUNDS {
let mut tmp1 = xl;
tmp1.add_assign(&constants[i]);
let mut tmp2 = tmp1;
tmp2.square();
tmp2.mul_assign(&tmp1);
tmp2.add_assign(&xr);
xr = xl;
xl = tmp2;
}
xl
}
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
struct MiMCDemo<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
constants: &'a [E::Fr]
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let mut tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let mut tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let mut new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let mut new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(|| "image", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}
#[test]
fn test_mimc() {
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
println!("Creating parameters...");
// Create parameters for our circuit
let params = {
let c = MiMCDemo::<Bls12> {
xl: None,
xr: None,
constants: &constants
};
generate_random_parameters(c, rng).unwrap()
};
// Prepare the verification key (for proof verification)
let pvk = prepare_verifying_key(&params.vk);
println!("Creating proofs...");
// Let's benchmark stuff!
const SAMPLES: u32 = 50;
let mut total_proving = Duration::new(0, 0);
let mut total_verifying = Duration::new(0, 0);
// Just a place to put the proof data, so we can
// benchmark deserialization.
let mut proof_vec = vec![];
for _ in 0..SAMPLES {
// Generate a random preimage and compute the image
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bls12>(xl, xr, &constants);
proof_vec.truncate(0);
let start = Instant::now();
{
// Create an instance of our circuit (with the
// witness)
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
// Create a groth16 proof with our parameters.
let proof = create_random_proof(c, &params, rng).unwrap();
proof.write(&mut proof_vec).unwrap();
}
total_proving += start.elapsed();
let start = Instant::now();
let proof = Proof::read(&proof_vec[..]).unwrap();
// Check the proof
assert!(verify_proof(
&pvk,
&proof,
&[image]
).unwrap());
total_verifying += start.elapsed();
}
let proving_avg = total_proving / SAMPLES;
let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (proving_avg.as_secs() as f64);
let verifying_avg = total_verifying / SAMPLES;
let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (verifying_avg.as_secs() as f64);
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);
}

27
librustzcash/Cargo.toml Normal file
View File

@ -0,0 +1,27 @@
[package]
name = "librustzcash"
version = "0.1.0"
authors = [
"Sean Bowe <ewillbefull@gmail.com>",
"Jack Grigg <jack@z.cash>",
"Jay Graber <jay@z.cash>",
"Simon Liu <simon@z.cash>"
]
[lib]
name = "rustzcash"
path = "src/rustzcash.rs"
crate-type = ["staticlib"]
[dependencies]
bellman = { path = "../bellman" }
libc = "0.2"
pairing = { path = "../pairing" }
lazy_static = "1"
byteorder = "1"
rand = "0.4"
sapling-crypto = { path = "../sapling-crypto" }
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"

20
librustzcash/README.md Normal file
View File

@ -0,0 +1,20 @@
# librustzcash
This repository contains librustzcash, a static library for Zcash code assets written in Rust.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](../LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](../LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

3
pairing/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
target/
**/*.rs.bk
Cargo.lock

14
pairing/COPYRIGHT Normal file
View File

@ -0,0 +1,14 @@
Copyrights in the "pairing" library are retained by their contributors. No
copyright assignment is required to contribute to the "pairing" library.
The "pairing" library is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

23
pairing/Cargo.toml Normal file
View File

@ -0,0 +1,23 @@
[package]
name = "pairing"
# Remember to change version string in README.md.
version = "0.14.2"
authors = ["Sean Bowe <ewillbefull@gmail.com>"]
license = "MIT/Apache-2.0"
description = "Pairing-friendly elliptic curve library"
documentation = "https://docs.rs/pairing/"
homepage = "https://github.com/ebfull/pairing"
repository = "https://github.com/ebfull/pairing"
[dependencies]
rand = "0.4"
byteorder = "1"
clippy = { version = "0.0.200", optional = true }
[features]
unstable-features = ["expose-arith"]
expose-arith = []
u128-support = []
default = []

201
pairing/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
pairing/LICENSE-MIT Normal file
View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

35
pairing/README.md Normal file
View File

@ -0,0 +1,35 @@
# pairing [![Crates.io](https://img.shields.io/crates/v/pairing.svg)](https://crates.io/crates/pairing) #
This is a Rust crate for using pairing-friendly elliptic curves. Currently, only the [BLS12-381](https://z.cash/blog/new-snark-curve.html) construction is implemented.
## [Documentation](https://docs.rs/pairing/)
Bring the `pairing` crate into your project just as you normally would.
If you're using a supported platform and the nightly Rust compiler, you can enable the `u128-support` feature for faster arithmetic.
```toml
[dependencies.pairing]
version = "0.14"
features = ["u128-support"]
```
## Security Warnings
This library does not make any guarantees about constant-time operations, memory access patterns, or resistance to side-channel attacks.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,127 @@
mod g1 {
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::CurveProjective;
#[bench]
fn bench_g1_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G1, Fr)> = (0..SAMPLES)
.map(|_| (G1::rand(&mut rng), Fr::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_g1_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G1, G1)> = (0..SAMPLES)
.map(|_| (G1::rand(&mut rng), G1::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_g1_add_assign_mixed(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G1, G1Affine)> = (0..SAMPLES)
.map(|_| (G1::rand(&mut rng), G1::rand(&mut rng).into()))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign_mixed(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
}
mod g2 {
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::CurveProjective;
#[bench]
fn bench_g2_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G2, Fr)> = (0..SAMPLES)
.map(|_| (G2::rand(&mut rng), Fr::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_g2_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G2, G2)> = (0..SAMPLES)
.map(|_| (G2::rand(&mut rng), G2::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_g2_add_assign_mixed(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G2, G2Affine)> = (0..SAMPLES)
.map(|_| (G2::rand(&mut rng), G2::rand(&mut rng).into()))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign_mixed(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
}

View File

@ -0,0 +1,268 @@
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::{Field, PrimeField, PrimeFieldRepr, SqrtField};
#[bench]
fn bench_fq_repr_add_nocarry(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES)
.map(|_| {
let mut tmp1 = FqRepr::rand(&mut rng);
let mut tmp2 = FqRepr::rand(&mut rng);
// Shave a few bits off to avoid overflow.
for _ in 0..3 {
tmp1.div2();
tmp2.div2();
}
(tmp1, tmp2)
})
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_nocarry(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_repr_sub_noborrow(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES)
.map(|_| {
let tmp1 = FqRepr::rand(&mut rng);
let mut tmp2 = tmp1;
// Ensure tmp2 is smaller than tmp1.
for _ in 0..10 {
tmp2.div2();
}
(tmp1, tmp2)
})
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_noborrow(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_repr_num_bits(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FqRepr> = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].num_bits();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_repr_mul2(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FqRepr> = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.mul2();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_repr_div2(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FqRepr> = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.div2();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq, Fq)> = (0..SAMPLES)
.map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_sub_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq, Fq)> = (0..SAMPLES)
.map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq, Fq)> = (0..SAMPLES)
.map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_square(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq> = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.square();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_inverse(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq> = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].inverse()
});
}
#[bench]
fn bench_fq_negate(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq> = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.negate();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq_sqrt(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq> = (0..SAMPLES)
.map(|_| {
let mut tmp = Fq::rand(&mut rng);
tmp.square();
tmp
})
.collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].sqrt()
});
}
#[bench]
fn bench_fq_into_repr(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq> = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].into_repr()
});
}
#[bench]
fn bench_fq_from_repr(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FqRepr> = (0..SAMPLES)
.map(|_| Fq::rand(&mut rng).into_repr())
.collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
Fq::from_repr(v[count])
});
}

View File

@ -0,0 +1,94 @@
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::Field;
#[bench]
fn bench_fq12_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_sub_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_squaring(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.square();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_inverse(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].inverse();
count = (count + 1) % SAMPLES;
tmp
});
}

View File

@ -0,0 +1,110 @@
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::{Field, SqrtField};
#[bench]
fn bench_fq2_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq2, Fq2)> = (0..SAMPLES)
.map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq2_sub_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq2, Fq2)> = (0..SAMPLES)
.map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq2_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq2, Fq2)> = (0..SAMPLES)
.map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq2_squaring(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq2> = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.square();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq2_inverse(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq2> = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].inverse();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq2_sqrt(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq2> = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].sqrt();
count = (count + 1) % SAMPLES;
tmp
});
}

View File

@ -0,0 +1,268 @@
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::{Field, PrimeField, PrimeFieldRepr, SqrtField};
#[bench]
fn bench_fr_repr_add_nocarry(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES)
.map(|_| {
let mut tmp1 = FrRepr::rand(&mut rng);
let mut tmp2 = FrRepr::rand(&mut rng);
// Shave a few bits off to avoid overflow.
for _ in 0..3 {
tmp1.div2();
tmp2.div2();
}
(tmp1, tmp2)
})
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_nocarry(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_repr_sub_noborrow(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES)
.map(|_| {
let tmp1 = FrRepr::rand(&mut rng);
let mut tmp2 = tmp1;
// Ensure tmp2 is smaller than tmp1.
for _ in 0..10 {
tmp2.div2();
}
(tmp1, tmp2)
})
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_noborrow(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_repr_num_bits(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FrRepr> = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].num_bits();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_repr_mul2(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FrRepr> = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.mul2();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_repr_div2(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FrRepr> = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.div2();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fr, Fr)> = (0..SAMPLES)
.map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_sub_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fr, Fr)> = (0..SAMPLES)
.map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fr, Fr)> = (0..SAMPLES)
.map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_square(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fr> = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.square();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_inverse(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fr> = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].inverse()
});
}
#[bench]
fn bench_fr_negate(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fr> = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.negate();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fr_sqrt(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fr> = (0..SAMPLES)
.map(|_| {
let mut tmp = Fr::rand(&mut rng);
tmp.square();
tmp
})
.collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].sqrt()
});
}
#[bench]
fn bench_fr_into_repr(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fr> = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
v[count].into_repr()
});
}
#[bench]
fn bench_fr_from_repr(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<FrRepr> = (0..SAMPLES)
.map(|_| Fr::rand(&mut rng).into_repr())
.collect();
let mut count = 0;
b.iter(|| {
count = (count + 1) % SAMPLES;
Fr::from_repr(v[count])
});
}

View File

@ -0,0 +1,107 @@
mod ec;
mod fq;
mod fq12;
mod fq2;
mod fr;
use rand::{Rand, SeedableRng, XorShiftRng};
use pairing::bls12_381::*;
use pairing::{CurveAffine, Engine};
#[bench]
fn bench_pairing_g1_preparation(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<G1> = (0..SAMPLES).map(|_| G1::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = G1Affine::from(v[count]).prepare();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_pairing_g2_preparation(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<G2> = (0..SAMPLES).map(|_| G2::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = G2Affine::from(v[count]).prepare();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_pairing_miller_loop(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G1Prepared, G2Prepared)> = (0..SAMPLES)
.map(|_| {
(
G1Affine::from(G1::rand(&mut rng)).prepare(),
G2Affine::from(G2::rand(&mut rng)).prepare(),
)
})
.collect();
let mut count = 0;
b.iter(|| {
let tmp = Bls12::miller_loop(&[(&v[count].0, &v[count].1)]);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_pairing_final_exponentiation(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES)
.map(|_| {
(
G1Affine::from(G1::rand(&mut rng)).prepare(),
G2Affine::from(G2::rand(&mut rng)).prepare(),
)
})
.map(|(ref p, ref q)| Bls12::miller_loop(&[(p, q)]))
.collect();
let mut count = 0;
b.iter(|| {
let tmp = Bls12::final_exponentiation(&v[count]);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_pairing_full(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(G1, G2)> = (0..SAMPLES)
.map(|_| (G1::rand(&mut rng), G2::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let tmp = Bls12::pairing(v[count].0, v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}

View File

@ -0,0 +1,7 @@
#![feature(test)]
extern crate pairing;
extern crate rand;
extern crate test;
mod bls12_381;

View File

@ -0,0 +1,71 @@
# BLS12-381
This is an implementation of the BLS12-381 pairing-friendly elliptic curve construction.
## BLS12 Parameterization
BLS12 curves are parameterized by a value *x* such that the base field modulus *q* and subgroup *r* can be computed by:
* q = (x - 1)<sup>2</sup> ((x<sup>4</sup> - x<sup>2</sup> + 1) / 3) + x
* r = (x<sup>4</sup> - x<sup>2</sup> + 1)
Given primes *q* and *r* parameterized as above, we can easily construct an elliptic curve over the prime field F<sub>*q*</sub> which contains a subgroup of order *r* such that *r* | (*q*<sup>12</sup> - 1), giving it an embedding degree of 12. Instantiating its sextic twist over an extension field F<sub>q<sup>2</sup></sub> gives rise to an efficient bilinear pairing function between elements of the order *r* subgroups of either curves, into an order *r* multiplicative subgroup of F<sub>q<sup>12</sup></sub>.
In zk-SNARK schemes, we require F<sub>r</sub> with large 2<sup>n</sup> roots of unity for performing efficient fast-fourier transforms. As such, guaranteeing that large 2<sup>n</sup> | (r - 1), or equivalently that *x* has a large 2<sup>n</sup> factor, gives rise to BLS12 curves suitable for zk-SNARKs.
Due to recent research, it is estimated by many that *q* should be approximately 384 bits to target 128-bit security. Conveniently, *r* is approximately 256 bits when *q* is approximately 384 bits, making BLS12 curves ideal for 128-bit security. It also makes them ideal for many zk-SNARK applications, as the scalar field can be used for keying material such as embedded curve constructions.
Many curves match our descriptions, but we require some extra properties for efficiency purposes:
* *q* should be smaller than 2<sup>383</sup>, and *r* should be smaller than 2<sup>255</sup>, so that the most significant bit is unset when using 64-bit or 32-bit limbs. This allows for cheap reductions.
* F<sub>q<sup>12</sup></sub> is typically constructed using towers of extension fields. As a byproduct of [research](https://eprint.iacr.org/2011/465.pdf) for BLS curves of embedding degree 24, we can identify subfamilies of BLS12 curves (for our purposes, where x mod 72 = {16, 64}) that produce efficient extension field towers and twisting isomorphisms.
* We desire *x* of small Hamming weight, to increase the performance of the pairing function.
## BLS12-381 Instantiation
The BLS12-381 construction is instantiated by `x = -0xd201000000010000`, which produces the largest `q` and smallest Hamming weight of `x` that meets the above requirements. This produces:
* q = `0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab` (381 bits)
* r = `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (255 bits)
Our extension field tower is constructed as follows:
1. F<sub>q<sup>2</sup></sub> is constructed as F<sub>q</sub>(u) / (u<sup>2</sup> - β) where β = -1.
2. F<sub>q<sup>6</sup></sub> is constructed as F<sub>q<sup>2</sup></sub>(v) / (v<sup>3</sup> - ξ) where ξ = u + 1
3. F<sub>q<sup>12</sup></sub> is constructed as F<sub>q<sup>6</sup></sub>(w) / (w<sup>2</sup> - γ) where γ = v
Now, we instantiate the elliptic curve E(F<sub>q</sub>) : y<sup>2</sup> = x<sup>3</sup> + 4, and the elliptic curve E'(F<sub>q<sup>2</sup></sub>) : y<sup>2</sup> = x<sup>3</sup> + 4(u + 1).
The group G<sub>1</sub> is the *r* order subgroup of E, which has cofactor (x - 1)<sup>2</sup> / 3. The group G<sub>2</sub> is the *r* order subgroup of E', which has cofactor (x<sup>8</sup> - 4x<sup>7</sup> + 5x<sup>6</sup> - 4x<sup>4</sup> + 6x<sup>3</sup> - 4x<sup>2</sup> - 4x + 13) / 9.
### Generators
The generators of G<sub>1</sub> and G<sub>2</sub> are computed by finding the lexicographically smallest valid `x`-coordinate, and its lexicographically smallest `y`-coordinate and scaling it by the cofactor such that the result is not the point at infinity.
#### G1
```
x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507
y = 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569
```
#### G2
```
x = 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758*u + 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160
y = 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582*u + 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905
```
### Serialization
* Fq elements are encoded in big-endian form. They occupy 48 bytes in this form.
* Fq2 elements are encoded in big-endian form, meaning that the Fq element c0 + c1 * u is represented by the Fq element c1 followed by the Fq element c0. This means Fq2 elements occupy 96 bytes in this form.
* The group G1 uses Fq elements for coordinates. The group G2 uses Fq2 elements for coordinates.
* G1 and G2 elements can be encoded in uncompressed form (the x-coordinate followed by the y-coordinate) or in compressed form (just the x-coordinate). G1 elements occupy 96 bytes in uncompressed form, and 48 bytes in compressed form. G2 elements occupy 192 bytes in uncompressed form, and 96 bytes in compressed form.
The most-significant three bits of a G1 or G2 encoding should be masked away before the coordinate(s) are interpreted. These bits are used to unambiguously represent the underlying element:
* The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form.
* The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero.
* The third-most significant bit is set if (and only if) this point is in compressed form _and_ it is not the point at infinity _and_ its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate.

2026
pairing/src/bls12_381/ec.rs Normal file

File diff suppressed because it is too large Load Diff

2959
pairing/src/bls12_381/fq.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,189 @@
use super::fq::FROBENIUS_COEFF_FQ12_C1;
use super::fq2::Fq2;
use super::fq6::Fq6;
use rand::{Rand, Rng};
use Field;
/// An element of Fq12, represented by c0 + c1 * w.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Fq12 {
pub c0: Fq6,
pub c1: Fq6,
}
impl ::std::fmt::Display for Fq12 {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Fq12({} + {} * w)", self.c0, self.c1)
}
}
impl Rand for Fq12 {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fq12 {
c0: rng.gen(),
c1: rng.gen(),
}
}
}
impl Fq12 {
pub fn conjugate(&mut self) {
self.c1.negate();
}
pub fn mul_by_014(&mut self, c0: &Fq2, c1: &Fq2, c4: &Fq2) {
let mut aa = self.c0;
aa.mul_by_01(c0, c1);
let mut bb = self.c1;
bb.mul_by_1(c4);
let mut o = *c1;
o.add_assign(c4);
self.c1.add_assign(&self.c0);
self.c1.mul_by_01(c0, &o);
self.c1.sub_assign(&aa);
self.c1.sub_assign(&bb);
self.c0 = bb;
self.c0.mul_by_nonresidue();
self.c0.add_assign(&aa);
}
}
impl Field for Fq12 {
fn zero() -> Self {
Fq12 {
c0: Fq6::zero(),
c1: Fq6::zero(),
}
}
fn one() -> Self {
Fq12 {
c0: Fq6::one(),
c1: Fq6::zero(),
}
}
fn is_zero(&self) -> bool {
self.c0.is_zero() && self.c1.is_zero()
}
fn double(&mut self) {
self.c0.double();
self.c1.double();
}
fn negate(&mut self) {
self.c0.negate();
self.c1.negate();
}
fn add_assign(&mut self, other: &Self) {
self.c0.add_assign(&other.c0);
self.c1.add_assign(&other.c1);
}
fn sub_assign(&mut self, other: &Self) {
self.c0.sub_assign(&other.c0);
self.c1.sub_assign(&other.c1);
}
fn frobenius_map(&mut self, power: usize) {
self.c0.frobenius_map(power);
self.c1.frobenius_map(power);
self.c1.c0.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]);
self.c1.c1.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]);
self.c1.c2.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]);
}
fn square(&mut self) {
let mut ab = self.c0;
ab.mul_assign(&self.c1);
let mut c0c1 = self.c0;
c0c1.add_assign(&self.c1);
let mut c0 = self.c1;
c0.mul_by_nonresidue();
c0.add_assign(&self.c0);
c0.mul_assign(&c0c1);
c0.sub_assign(&ab);
self.c1 = ab;
self.c1.add_assign(&ab);
ab.mul_by_nonresidue();
c0.sub_assign(&ab);
self.c0 = c0;
}
fn mul_assign(&mut self, other: &Self) {
let mut aa = self.c0;
aa.mul_assign(&other.c0);
let mut bb = self.c1;
bb.mul_assign(&other.c1);
let mut o = other.c0;
o.add_assign(&other.c1);
self.c1.add_assign(&self.c0);
self.c1.mul_assign(&o);
self.c1.sub_assign(&aa);
self.c1.sub_assign(&bb);
self.c0 = bb;
self.c0.mul_by_nonresidue();
self.c0.add_assign(&aa);
}
fn inverse(&self) -> Option<Self> {
let mut c0s = self.c0;
c0s.square();
let mut c1s = self.c1;
c1s.square();
c1s.mul_by_nonresidue();
c0s.sub_assign(&c1s);
c0s.inverse().map(|t| {
let mut tmp = Fq12 { c0: t, c1: t };
tmp.c0.mul_assign(&self.c0);
tmp.c1.mul_assign(&self.c1);
tmp.c1.negate();
tmp
})
}
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
#[test]
fn test_fq12_mul_by_014() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let c0 = Fq2::rand(&mut rng);
let c1 = Fq2::rand(&mut rng);
let c5 = Fq2::rand(&mut rng);
let mut a = Fq12::rand(&mut rng);
let mut b = a;
a.mul_by_014(&c0, &c1, &c5);
b.mul_assign(&Fq12 {
c0: Fq6 {
c0: c0,
c1: c1,
c2: Fq2::zero(),
},
c1: Fq6 {
c0: Fq2::zero(),
c1: c5,
c2: Fq2::zero(),
},
});
assert_eq!(a, b);
}
}
#[test]
fn fq12_field_tests() {
use PrimeField;
::tests::field::random_field_tests::<Fq12>();
::tests::field::random_frobenius_tests::<Fq12, _>(super::fq::Fq::char(), 13);
}

View File

@ -0,0 +1,908 @@
use super::fq::{FROBENIUS_COEFF_FQ2_C1, Fq, NEGATIVE_ONE};
use rand::{Rand, Rng};
use {Field, SqrtField};
use std::cmp::Ordering;
/// An element of Fq2, represented by c0 + c1 * u.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Fq2 {
pub c0: Fq,
pub c1: Fq,
}
impl ::std::fmt::Display for Fq2 {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Fq2({} + {} * u)", self.c0, self.c1)
}
}
/// `Fq2` elements are ordered lexicographically.
impl Ord for Fq2 {
#[inline(always)]
fn cmp(&self, other: &Fq2) -> Ordering {
match self.c1.cmp(&other.c1) {
Ordering::Greater => Ordering::Greater,
Ordering::Less => Ordering::Less,
Ordering::Equal => self.c0.cmp(&other.c0),
}
}
}
impl PartialOrd for Fq2 {
#[inline(always)]
fn partial_cmp(&self, other: &Fq2) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Fq2 {
/// Multiply this element by the cubic and quadratic nonresidue 1 + u.
pub fn mul_by_nonresidue(&mut self) {
let t0 = self.c0;
self.c0.sub_assign(&self.c1);
self.c1.add_assign(&t0);
}
/// Norm of Fq2 as extension field in i over Fq
pub fn norm(&self) -> Fq {
let mut t0 = self.c0;
let mut t1 = self.c1;
t0.square();
t1.square();
t1.add_assign(&t0);
t1
}
}
impl Rand for Fq2 {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fq2 {
c0: rng.gen(),
c1: rng.gen(),
}
}
}
impl Field for Fq2 {
fn zero() -> Self {
Fq2 {
c0: Fq::zero(),
c1: Fq::zero(),
}
}
fn one() -> Self {
Fq2 {
c0: Fq::one(),
c1: Fq::zero(),
}
}
fn is_zero(&self) -> bool {
self.c0.is_zero() && self.c1.is_zero()
}
fn square(&mut self) {
let mut ab = self.c0;
ab.mul_assign(&self.c1);
let mut c0c1 = self.c0;
c0c1.add_assign(&self.c1);
let mut c0 = self.c1;
c0.negate();
c0.add_assign(&self.c0);
c0.mul_assign(&c0c1);
c0.sub_assign(&ab);
self.c1 = ab;
self.c1.add_assign(&ab);
c0.add_assign(&ab);
self.c0 = c0;
}
fn double(&mut self) {
self.c0.double();
self.c1.double();
}
fn negate(&mut self) {
self.c0.negate();
self.c1.negate();
}
fn add_assign(&mut self, other: &Self) {
self.c0.add_assign(&other.c0);
self.c1.add_assign(&other.c1);
}
fn sub_assign(&mut self, other: &Self) {
self.c0.sub_assign(&other.c0);
self.c1.sub_assign(&other.c1);
}
fn mul_assign(&mut self, other: &Self) {
let mut aa = self.c0;
aa.mul_assign(&other.c0);
let mut bb = self.c1;
bb.mul_assign(&other.c1);
let mut o = other.c0;
o.add_assign(&other.c1);
self.c1.add_assign(&self.c0);
self.c1.mul_assign(&o);
self.c1.sub_assign(&aa);
self.c1.sub_assign(&bb);
self.c0 = aa;
self.c0.sub_assign(&bb);
}
fn inverse(&self) -> Option<Self> {
let mut t1 = self.c1;
t1.square();
let mut t0 = self.c0;
t0.square();
t0.add_assign(&t1);
t0.inverse().map(|t| {
let mut tmp = Fq2 {
c0: self.c0,
c1: self.c1,
};
tmp.c0.mul_assign(&t);
tmp.c1.mul_assign(&t);
tmp.c1.negate();
tmp
})
}
fn frobenius_map(&mut self, power: usize) {
self.c1.mul_assign(&FROBENIUS_COEFF_FQ2_C1[power % 2]);
}
}
impl SqrtField for Fq2 {
fn legendre(&self) -> ::LegendreSymbol {
self.norm().legendre()
}
fn sqrt(&self) -> Option<Self> {
// Algorithm 9, https://eprint.iacr.org/2012/685.pdf
if self.is_zero() {
Some(Self::zero())
} else {
// a1 = self^((q - 3) / 4)
let mut a1 = self.pow([
0xee7fbfffffffeaaa,
0x7aaffffac54ffff,
0xd9cc34a83dac3d89,
0xd91dd2e13ce144af,
0x92c6e9ed90d2eb35,
0x680447a8e5ff9a6,
]);
let mut alpha = a1;
alpha.square();
alpha.mul_assign(self);
let mut a0 = alpha;
a0.frobenius_map(1);
a0.mul_assign(&alpha);
let neg1 = Fq2 {
c0: NEGATIVE_ONE,
c1: Fq::zero(),
};
if a0 == neg1 {
None
} else {
a1.mul_assign(self);
if alpha == neg1 {
a1.mul_assign(&Fq2 {
c0: Fq::zero(),
c1: Fq::one(),
});
} else {
alpha.add_assign(&Fq2::one());
// alpha = alpha^((q - 1) / 2)
alpha = alpha.pow([
0xdcff7fffffffd555,
0xf55ffff58a9ffff,
0xb39869507b587b12,
0xb23ba5c279c2895f,
0x258dd3db21a5d66b,
0xd0088f51cbff34d,
]);
a1.mul_assign(&alpha);
}
Some(a1)
}
}
}
}
#[test]
fn test_fq2_ordering() {
let mut a = Fq2 {
c0: Fq::zero(),
c1: Fq::zero(),
};
let mut b = a.clone();
assert!(a.cmp(&b) == Ordering::Equal);
b.c0.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Less);
a.c0.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Equal);
b.c1.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Less);
a.c0.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Less);
a.c1.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Greater);
b.c0.add_assign(&Fq::one());
assert!(a.cmp(&b) == Ordering::Equal);
}
#[test]
fn test_fq2_basics() {
assert_eq!(
Fq2 {
c0: Fq::zero(),
c1: Fq::zero(),
},
Fq2::zero()
);
assert_eq!(
Fq2 {
c0: Fq::one(),
c1: Fq::zero(),
},
Fq2::one()
);
assert!(Fq2::zero().is_zero());
assert!(!Fq2::one().is_zero());
assert!(!Fq2 {
c0: Fq::zero(),
c1: Fq::one(),
}.is_zero());
}
#[test]
fn test_fq2_squaring() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::one(),
c1: Fq::one(),
}; // u + 1
a.square();
assert_eq!(
a,
Fq2 {
c0: Fq::zero(),
c1: Fq::from_repr(FqRepr::from(2)).unwrap(),
}
); // 2u
let mut a = Fq2 {
c0: Fq::zero(),
c1: Fq::one(),
}; // u
a.square();
assert_eq!(a, {
let mut neg1 = Fq::one();
neg1.negate();
Fq2 {
c0: neg1,
c1: Fq::zero(),
}
}); // -1
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x9c2c6309bbf8b598,
0x4eef5c946536f602,
0x90e34aab6fb6a6bd,
0xf7f295a94e58ae7c,
0x41b76dcc1c3fbe5e,
0x7080c5fa1d8e042,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x38f473b3c870a4ab,
0x6ad3291177c8c7e5,
0xdac5a4c911a4353e,
0xbfb99020604137a0,
0xfc58a7b7be815407,
0x10d1615e75250a21,
])).unwrap(),
};
a.square();
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0xf262c28c538bcf68,
0xb9f2a66eae1073ba,
0xdc46ab8fad67ae0,
0xcb674157618da176,
0x4cf17b5893c3d327,
0x7eac81369c43361
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xc1579cf58e980cf8,
0xa23eb7e12dd54d98,
0xe75138bce4cec7aa,
0x38d0d7275a9689e1,
0x739c983042779a65,
0x1542a61c8a8db994
])).unwrap(),
}
);
}
#[test]
fn test_fq2_mul() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x85c9f989e1461f03,
0xa2e33c333449a1d6,
0x41e461154a7354a3,
0x9ee53e7e84d7532e,
0x1c202d8ed97afb45,
0x51d3f9253e2516f,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xa7348a8b511aedcf,
0x143c215d8176b319,
0x4cc48081c09b8903,
0x9533e4a9a5158be,
0x7a5e1ecb676d65f9,
0x180c3ee46656b008,
])).unwrap(),
};
a.mul_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
0xe21f9169805f537e,
0xfc87e62e179c285d,
0x27ece175be07a531,
0xcd460f9f0c23e430,
0x6c9110292bfa409,
0x2c93a72eb8af83e,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x4b1c3f936d8992d4,
0x1d2a72916dba4c8a,
0x8871c508658d1e5f,
0x57a06d3135a752ae,
0x634cd3c6c565096d,
0x19e17334d4e93558,
])).unwrap(),
});
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x95b5127e6360c7e4,
0xde29c31a19a6937e,
0xf61a96dacf5a39bc,
0x5511fe4d84ee5f78,
0x5310a202d92f9963,
0x1751afbe166e5399
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x84af0e1bd630117a,
0x6c63cd4da2c2aa7,
0x5ba6e5430e883d40,
0xc975106579c275ee,
0x33a9ac82ce4c5083,
0x1ef1a36c201589d
])).unwrap(),
}
);
}
#[test]
fn test_fq2_inverse() {
use super::fq::FqRepr;
use PrimeField;
assert!(Fq2::zero().inverse().is_none());
let a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x85c9f989e1461f03,
0xa2e33c333449a1d6,
0x41e461154a7354a3,
0x9ee53e7e84d7532e,
0x1c202d8ed97afb45,
0x51d3f9253e2516f,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xa7348a8b511aedcf,
0x143c215d8176b319,
0x4cc48081c09b8903,
0x9533e4a9a5158be,
0x7a5e1ecb676d65f9,
0x180c3ee46656b008,
])).unwrap(),
};
let a = a.inverse().unwrap();
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x70300f9bcb9e594,
0xe5ecda5fdafddbb2,
0x64bef617d2915a8f,
0xdfba703293941c30,
0xa6c3d8f9586f2636,
0x1351ef01941b70c4
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x8c39fd76a8312cb4,
0x15d7b6b95defbff0,
0x947143f89faedee9,
0xcbf651a0f367afb2,
0xdf4e54f0d3ef15a6,
0x103bdf241afb0019
])).unwrap(),
}
);
}
#[test]
fn test_fq2_addition() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
};
a.add_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
0x619a02d78dc70ef2,
0xb93adfc9119e33e8,
0x4bf0b99a9f0dca12,
0x3b88899a42a6318f,
0x986a4a62fa82a49d,
0x13ce433fa26027f5,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x66323bf80b58b9b9,
0xa1379b6facf6e596,
0x402aef1fb797e32f,
0x2236f55246d0d44d,
0x4c8c1800eb104566,
0x11d6e20e986c2085,
])).unwrap(),
});
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x8e9a7adaf6eb0eb9,
0xcb207e6b3341eaba,
0xd70b0c7b481d23ff,
0xf4ef57d604b6bca2,
0x65309427b3d5d090,
0x14c715d5553f01d2
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xfdb032e7d9079a94,
0x35a2809d15468d83,
0xfe4b23317e0796d5,
0xd62fa51334f560fa,
0x9ad265eb46e01984,
0x1303f3465112c8bc
])).unwrap(),
}
);
}
#[test]
fn test_fq2_subtraction() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
};
a.sub_assign(&Fq2 {
c0: Fq::from_repr(FqRepr([
0x619a02d78dc70ef2,
0xb93adfc9119e33e8,
0x4bf0b99a9f0dca12,
0x3b88899a42a6318f,
0x986a4a62fa82a49d,
0x13ce433fa26027f5,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x66323bf80b58b9b9,
0xa1379b6facf6e596,
0x402aef1fb797e32f,
0x2236f55246d0d44d,
0x4c8c1800eb104566,
0x11d6e20e986c2085,
])).unwrap(),
});
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x8565752bdb5c9b80,
0x7756bed7c15982e9,
0xa65a6be700b285fe,
0xe255902672ef6c43,
0x7f77a718021c342d,
0x72ba14049fe9881
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xeb4abaf7c255d1cd,
0x11df49bc6cacc256,
0xe52617930588c69a,
0xf63905f39ad8cb1f,
0x4cd5dd9fb40b3b8f,
0x957411359ba6e4c
])).unwrap(),
}
);
}
#[test]
fn test_fq2_negation() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
};
a.negate();
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x8cfe87fc96dbaae4,
0xcc6615c8fb0492d,
0xdc167fc04da19c37,
0xab107d49317487ab,
0x7e555df189f880e3,
0x19083f5486a10cbd
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x228109103250c9d0,
0x8a411ad149045812,
0xa9109e8f3041427e,
0xb07e9bc405608611,
0xfcd559cbe77bd8b8,
0x18d400b280d93e62
])).unwrap(),
}
);
}
#[test]
fn test_fq2_doubling() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
};
a.double();
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x5a00f006d247ff8e,
0x23cb3d4443476da4,
0x1634a5c1521eb3da,
0x72cd9c7784211627,
0x998c938972a657e7,
0x1f1a52b65bdb3b9
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x2efbeddf9b5dc1b6,
0x28d5ca5ad09f4fdb,
0x7c4068238cdf674b,
0x67f15f81dc49195b,
0x9c8c9bd4b79fa83d,
0x25a226f714d506e
])).unwrap(),
}
);
}
#[test]
fn test_fq2_frobenius_map() {
use super::fq::FqRepr;
use PrimeField;
let mut a = Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc,
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837,
])).unwrap(),
};
a.frobenius_map(0);
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
}
);
a.frobenius_map(1);
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x228109103250c9d0,
0x8a411ad149045812,
0xa9109e8f3041427e,
0xb07e9bc405608611,
0xfcd559cbe77bd8b8,
0x18d400b280d93e62
])).unwrap(),
}
);
a.frobenius_map(1);
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
}
);
a.frobenius_map(2);
assert_eq!(
a,
Fq2 {
c0: Fq::from_repr(FqRepr([
0x2d0078036923ffc7,
0x11e59ea221a3b6d2,
0x8b1a52e0a90f59ed,
0xb966ce3bc2108b13,
0xccc649c4b9532bf3,
0xf8d295b2ded9dc
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0x977df6efcdaee0db,
0x946ae52d684fa7ed,
0xbe203411c66fb3a5,
0xb3f8afc0ee248cad,
0x4e464dea5bcfd41e,
0x12d1137b8a6a837
])).unwrap(),
}
);
}
#[test]
fn test_fq2_sqrt() {
use super::fq::FqRepr;
use PrimeField;
assert_eq!(
Fq2 {
c0: Fq::from_repr(FqRepr([
0x476b4c309720e227,
0x34c2d04faffdab6,
0xa57e6fc1bab51fd9,
0xdb4a116b5bf74aa1,
0x1e58b2159dfe10e2,
0x7ca7da1f13606ac
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xfa8de88b7516d2c3,
0x371a75ed14f41629,
0x4cec2dca577a3eb6,
0x212611bca4e99121,
0x8ee5394d77afb3d,
0xec92336650e49d5
])).unwrap(),
}.sqrt()
.unwrap(),
Fq2 {
c0: Fq::from_repr(FqRepr([
0x40b299b2704258c5,
0x6ef7de92e8c68b63,
0x6d2ddbe552203e82,
0x8d7f1f723d02c1d3,
0x881b3e01b611c070,
0x10f6963bbad2ebc5
])).unwrap(),
c1: Fq::from_repr(FqRepr([
0xc099534fc209e752,
0x7670594665676447,
0x28a20faed211efe7,
0x6b852aeaf2afcb1b,
0xa4c93b08105d71a9,
0x8d7cfff94216330
])).unwrap(),
}
);
assert_eq!(
Fq2 {
c0: Fq::from_repr(FqRepr([
0xb9f78429d1517a6b,
0x1eabfffeb153ffff,
0x6730d2a0f6b0f624,
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
])).unwrap(),
c1: Fq::zero(),
}.sqrt()
.unwrap(),
Fq2 {
c0: Fq::zero(),
c1: Fq::from_repr(FqRepr([
0xb9fefffffd4357a3,
0x1eabfffeb153ffff,
0x6730d2a0f6b0f624,
0x64774b84f38512bf,
0x4b1ba7b6434bacd7,
0x1a0111ea397fe69a
])).unwrap(),
}
);
}
#[test]
fn test_fq2_legendre() {
use LegendreSymbol::*;
assert_eq!(Zero, Fq2::zero().legendre());
// i^2 = -1
let mut m1 = Fq2::one();
m1.negate();
assert_eq!(QuadraticResidue, m1.legendre());
m1.mul_by_nonresidue();
assert_eq!(QuadraticNonResidue, m1.legendre());
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
#[test]
fn test_fq2_mul_nonresidue() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let nqr = Fq2 {
c0: Fq::one(),
c1: Fq::one(),
};
for _ in 0..1000 {
let mut a = Fq2::rand(&mut rng);
let mut b = a;
a.mul_by_nonresidue();
b.mul_assign(&nqr);
assert_eq!(a, b);
}
}
#[test]
fn fq2_field_tests() {
use PrimeField;
::tests::field::random_field_tests::<Fq2>();
::tests::field::random_sqrt_tests::<Fq2>();
::tests::field::random_frobenius_tests::<Fq2, _>(super::fq::Fq::char(), 13);
}

View File

@ -0,0 +1,374 @@
use super::fq::{FROBENIUS_COEFF_FQ6_C1, FROBENIUS_COEFF_FQ6_C2};
use super::fq2::Fq2;
use rand::{Rand, Rng};
use Field;
/// An element of Fq6, represented by c0 + c1 * v + c2 * v^(2).
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Fq6 {
pub c0: Fq2,
pub c1: Fq2,
pub c2: Fq2,
}
impl ::std::fmt::Display for Fq6 {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Fq6({} + {} * v, {} * v^2)", self.c0, self.c1, self.c2)
}
}
impl Rand for Fq6 {
fn rand<R: Rng>(rng: &mut R) -> Self {
Fq6 {
c0: rng.gen(),
c1: rng.gen(),
c2: rng.gen(),
}
}
}
impl Fq6 {
/// Multiply by quadratic nonresidue v.
pub fn mul_by_nonresidue(&mut self) {
use std::mem::swap;
swap(&mut self.c0, &mut self.c1);
swap(&mut self.c0, &mut self.c2);
self.c0.mul_by_nonresidue();
}
pub fn mul_by_1(&mut self, c1: &Fq2) {
let mut b_b = self.c1;
b_b.mul_assign(c1);
let mut t1 = *c1;
{
let mut tmp = self.c1;
tmp.add_assign(&self.c2);
t1.mul_assign(&tmp);
t1.sub_assign(&b_b);
t1.mul_by_nonresidue();
}
let mut t2 = *c1;
{
let mut tmp = self.c0;
tmp.add_assign(&self.c1);
t2.mul_assign(&tmp);
t2.sub_assign(&b_b);
}
self.c0 = t1;
self.c1 = t2;
self.c2 = b_b;
}
pub fn mul_by_01(&mut self, c0: &Fq2, c1: &Fq2) {
let mut a_a = self.c0;
let mut b_b = self.c1;
a_a.mul_assign(c0);
b_b.mul_assign(c1);
let mut t1 = *c1;
{
let mut tmp = self.c1;
tmp.add_assign(&self.c2);
t1.mul_assign(&tmp);
t1.sub_assign(&b_b);
t1.mul_by_nonresidue();
t1.add_assign(&a_a);
}
let mut t3 = *c0;
{
let mut tmp = self.c0;
tmp.add_assign(&self.c2);
t3.mul_assign(&tmp);
t3.sub_assign(&a_a);
t3.add_assign(&b_b);
}
let mut t2 = *c0;
t2.add_assign(c1);
{
let mut tmp = self.c0;
tmp.add_assign(&self.c1);
t2.mul_assign(&tmp);
t2.sub_assign(&a_a);
t2.sub_assign(&b_b);
}
self.c0 = t1;
self.c1 = t2;
self.c2 = t3;
}
}
impl Field for Fq6 {
fn zero() -> Self {
Fq6 {
c0: Fq2::zero(),
c1: Fq2::zero(),
c2: Fq2::zero(),
}
}
fn one() -> Self {
Fq6 {
c0: Fq2::one(),
c1: Fq2::zero(),
c2: Fq2::zero(),
}
}
fn is_zero(&self) -> bool {
self.c0.is_zero() && self.c1.is_zero() && self.c2.is_zero()
}
fn double(&mut self) {
self.c0.double();
self.c1.double();
self.c2.double();
}
fn negate(&mut self) {
self.c0.negate();
self.c1.negate();
self.c2.negate();
}
fn add_assign(&mut self, other: &Self) {
self.c0.add_assign(&other.c0);
self.c1.add_assign(&other.c1);
self.c2.add_assign(&other.c2);
}
fn sub_assign(&mut self, other: &Self) {
self.c0.sub_assign(&other.c0);
self.c1.sub_assign(&other.c1);
self.c2.sub_assign(&other.c2);
}
fn frobenius_map(&mut self, power: usize) {
self.c0.frobenius_map(power);
self.c1.frobenius_map(power);
self.c2.frobenius_map(power);
self.c1.mul_assign(&FROBENIUS_COEFF_FQ6_C1[power % 6]);
self.c2.mul_assign(&FROBENIUS_COEFF_FQ6_C2[power % 6]);
}
fn square(&mut self) {
let mut s0 = self.c0;
s0.square();
let mut ab = self.c0;
ab.mul_assign(&self.c1);
let mut s1 = ab;
s1.double();
let mut s2 = self.c0;
s2.sub_assign(&self.c1);
s2.add_assign(&self.c2);
s2.square();
let mut bc = self.c1;
bc.mul_assign(&self.c2);
let mut s3 = bc;
s3.double();
let mut s4 = self.c2;
s4.square();
self.c0 = s3;
self.c0.mul_by_nonresidue();
self.c0.add_assign(&s0);
self.c1 = s4;
self.c1.mul_by_nonresidue();
self.c1.add_assign(&s1);
self.c2 = s1;
self.c2.add_assign(&s2);
self.c2.add_assign(&s3);
self.c2.sub_assign(&s0);
self.c2.sub_assign(&s4);
}
fn mul_assign(&mut self, other: &Self) {
let mut a_a = self.c0;
let mut b_b = self.c1;
let mut c_c = self.c2;
a_a.mul_assign(&other.c0);
b_b.mul_assign(&other.c1);
c_c.mul_assign(&other.c2);
let mut t1 = other.c1;
t1.add_assign(&other.c2);
{
let mut tmp = self.c1;
tmp.add_assign(&self.c2);
t1.mul_assign(&tmp);
t1.sub_assign(&b_b);
t1.sub_assign(&c_c);
t1.mul_by_nonresidue();
t1.add_assign(&a_a);
}
let mut t3 = other.c0;
t3.add_assign(&other.c2);
{
let mut tmp = self.c0;
tmp.add_assign(&self.c2);
t3.mul_assign(&tmp);
t3.sub_assign(&a_a);
t3.add_assign(&b_b);
t3.sub_assign(&c_c);
}
let mut t2 = other.c0;
t2.add_assign(&other.c1);
{
let mut tmp = self.c0;
tmp.add_assign(&self.c1);
t2.mul_assign(&tmp);
t2.sub_assign(&a_a);
t2.sub_assign(&b_b);
c_c.mul_by_nonresidue();
t2.add_assign(&c_c);
}
self.c0 = t1;
self.c1 = t2;
self.c2 = t3;
}
fn inverse(&self) -> Option<Self> {
let mut c0 = self.c2;
c0.mul_by_nonresidue();
c0.mul_assign(&self.c1);
c0.negate();
{
let mut c0s = self.c0;
c0s.square();
c0.add_assign(&c0s);
}
let mut c1 = self.c2;
c1.square();
c1.mul_by_nonresidue();
{
let mut c01 = self.c0;
c01.mul_assign(&self.c1);
c1.sub_assign(&c01);
}
let mut c2 = self.c1;
c2.square();
{
let mut c02 = self.c0;
c02.mul_assign(&self.c2);
c2.sub_assign(&c02);
}
let mut tmp1 = self.c2;
tmp1.mul_assign(&c1);
let mut tmp2 = self.c1;
tmp2.mul_assign(&c2);
tmp1.add_assign(&tmp2);
tmp1.mul_by_nonresidue();
tmp2 = self.c0;
tmp2.mul_assign(&c0);
tmp1.add_assign(&tmp2);
match tmp1.inverse() {
Some(t) => {
let mut tmp = Fq6 {
c0: t,
c1: t,
c2: t,
};
tmp.c0.mul_assign(&c0);
tmp.c1.mul_assign(&c1);
tmp.c2.mul_assign(&c2);
Some(tmp)
}
None => None,
}
}
}
#[cfg(test)]
use rand::{SeedableRng, XorShiftRng};
#[test]
fn test_fq6_mul_nonresidue() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let nqr = Fq6 {
c0: Fq2::zero(),
c1: Fq2::one(),
c2: Fq2::zero(),
};
for _ in 0..1000 {
let mut a = Fq6::rand(&mut rng);
let mut b = a;
a.mul_by_nonresidue();
b.mul_assign(&nqr);
assert_eq!(a, b);
}
}
#[test]
fn test_fq6_mul_by_1() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let c1 = Fq2::rand(&mut rng);
let mut a = Fq6::rand(&mut rng);
let mut b = a;
a.mul_by_1(&c1);
b.mul_assign(&Fq6 {
c0: Fq2::zero(),
c1: c1,
c2: Fq2::zero(),
});
assert_eq!(a, b);
}
}
#[test]
fn test_fq6_mul_by_01() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let c0 = Fq2::rand(&mut rng);
let c1 = Fq2::rand(&mut rng);
let mut a = Fq6::rand(&mut rng);
let mut b = a;
a.mul_by_01(&c0, &c1);
b.mul_assign(&Fq6 {
c0: c0,
c1: c1,
c2: Fq2::zero(),
});
assert_eq!(a, b);
}
}
#[test]
fn fq6_field_tests() {
use PrimeField;
::tests::field::random_field_tests::<Fq6>();
::tests::field::random_frobenius_tests::<Fq6, _>(super::fq::Fq::char(), 13);
}

1614
pairing/src/bls12_381/fr.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,364 @@
mod ec;
mod fq;
mod fq12;
mod fq2;
mod fq6;
mod fr;
#[cfg(test)]
mod tests;
pub use self::ec::{
G1, G1Affine, G1Compressed, G1Prepared, G1Uncompressed, G2, G2Affine, G2Compressed, G2Prepared,
G2Uncompressed,
};
pub use self::fq::{Fq, FqRepr};
pub use self::fq12::Fq12;
pub use self::fq2::Fq2;
pub use self::fq6::Fq6;
pub use self::fr::{Fr, FrRepr};
use super::{BitIterator, CurveAffine, Engine, Field};
// The BLS parameter x for BLS12-381 is -0xd201000000010000
const BLS_X: u64 = 0xd201000000010000;
const BLS_X_IS_NEGATIVE: bool = true;
#[derive(Clone, Debug)]
pub struct Bls12;
impl Engine for Bls12 {
type Fr = Fr;
type G1 = G1;
type G1Affine = G1Affine;
type G2 = G2;
type G2Affine = G2Affine;
type Fq = Fq;
type Fqe = Fq2;
type Fqk = Fq12;
fn miller_loop<'a, I>(i: I) -> Self::Fqk
where
I: IntoIterator<
Item = &'a (
&'a <Self::G1Affine as CurveAffine>::Prepared,
&'a <Self::G2Affine as CurveAffine>::Prepared,
),
>,
{
let mut pairs = vec![];
for &(p, q) in i {
if !p.is_zero() && !q.is_zero() {
pairs.push((p, q.coeffs.iter()));
}
}
// Twisting isomorphism from E to E'
fn ell(f: &mut Fq12, coeffs: &(Fq2, Fq2, Fq2), p: &G1Affine) {
let mut c0 = coeffs.0;
let mut c1 = coeffs.1;
c0.c0.mul_assign(&p.y);
c0.c1.mul_assign(&p.y);
c1.c0.mul_assign(&p.x);
c1.c1.mul_assign(&p.x);
// Sparse multiplication in Fq12
f.mul_by_014(&coeffs.2, &c1, &c0);
}
let mut f = Fq12::one();
let mut found_one = false;
for i in BitIterator::new(&[BLS_X >> 1]) {
if !found_one {
found_one = i;
continue;
}
for &mut (p, ref mut coeffs) in &mut pairs {
ell(&mut f, coeffs.next().unwrap(), &p.0);
}
if i {
for &mut (p, ref mut coeffs) in &mut pairs {
ell(&mut f, coeffs.next().unwrap(), &p.0);
}
}
f.square();
}
for &mut (p, ref mut coeffs) in &mut pairs {
ell(&mut f, coeffs.next().unwrap(), &p.0);
}
if BLS_X_IS_NEGATIVE {
f.conjugate();
}
f
}
fn final_exponentiation(r: &Fq12) -> Option<Fq12> {
let mut f1 = *r;
f1.conjugate();
match r.inverse() {
Some(mut f2) => {
let mut r = f1;
r.mul_assign(&f2);
f2 = r;
r.frobenius_map(2);
r.mul_assign(&f2);
fn exp_by_x(f: &mut Fq12, x: u64) {
*f = f.pow(&[x]);
if BLS_X_IS_NEGATIVE {
f.conjugate();
}
}
let mut x = BLS_X;
let mut y0 = r;
y0.square();
let mut y1 = y0;
exp_by_x(&mut y1, x);
x >>= 1;
let mut y2 = y1;
exp_by_x(&mut y2, x);
x <<= 1;
let mut y3 = r;
y3.conjugate();
y1.mul_assign(&y3);
y1.conjugate();
y1.mul_assign(&y2);
y2 = y1;
exp_by_x(&mut y2, x);
y3 = y2;
exp_by_x(&mut y3, x);
y1.conjugate();
y3.mul_assign(&y1);
y1.conjugate();
y1.frobenius_map(3);
y2.frobenius_map(2);
y1.mul_assign(&y2);
y2 = y3;
exp_by_x(&mut y2, x);
y2.mul_assign(&y0);
y2.mul_assign(&r);
y1.mul_assign(&y2);
y2 = y3;
y2.frobenius_map(1);
y1.mul_assign(&y2);
Some(y1)
}
None => None,
}
}
}
impl G2Prepared {
pub fn is_zero(&self) -> bool {
self.infinity
}
pub fn from_affine(q: G2Affine) -> Self {
if q.is_zero() {
return G2Prepared {
coeffs: vec![],
infinity: true,
};
}
fn doubling_step(r: &mut G2) -> (Fq2, Fq2, Fq2) {
// Adaptation of Algorithm 26, https://eprint.iacr.org/2010/354.pdf
let mut tmp0 = r.x;
tmp0.square();
let mut tmp1 = r.y;
tmp1.square();
let mut tmp2 = tmp1;
tmp2.square();
let mut tmp3 = tmp1;
tmp3.add_assign(&r.x);
tmp3.square();
tmp3.sub_assign(&tmp0);
tmp3.sub_assign(&tmp2);
tmp3.double();
let mut tmp4 = tmp0;
tmp4.double();
tmp4.add_assign(&tmp0);
let mut tmp6 = r.x;
tmp6.add_assign(&tmp4);
let mut tmp5 = tmp4;
tmp5.square();
let mut zsquared = r.z;
zsquared.square();
r.x = tmp5;
r.x.sub_assign(&tmp3);
r.x.sub_assign(&tmp3);
r.z.add_assign(&r.y);
r.z.square();
r.z.sub_assign(&tmp1);
r.z.sub_assign(&zsquared);
r.y = tmp3;
r.y.sub_assign(&r.x);
r.y.mul_assign(&tmp4);
tmp2.double();
tmp2.double();
tmp2.double();
r.y.sub_assign(&tmp2);
tmp3 = tmp4;
tmp3.mul_assign(&zsquared);
tmp3.double();
tmp3.negate();
tmp6.square();
tmp6.sub_assign(&tmp0);
tmp6.sub_assign(&tmp5);
tmp1.double();
tmp1.double();
tmp6.sub_assign(&tmp1);
tmp0 = r.z;
tmp0.mul_assign(&zsquared);
tmp0.double();
(tmp0, tmp3, tmp6)
}
fn addition_step(r: &mut G2, q: &G2Affine) -> (Fq2, Fq2, Fq2) {
// Adaptation of Algorithm 27, https://eprint.iacr.org/2010/354.pdf
let mut zsquared = r.z;
zsquared.square();
let mut ysquared = q.y;
ysquared.square();
let mut t0 = zsquared;
t0.mul_assign(&q.x);
let mut t1 = q.y;
t1.add_assign(&r.z);
t1.square();
t1.sub_assign(&ysquared);
t1.sub_assign(&zsquared);
t1.mul_assign(&zsquared);
let mut t2 = t0;
t2.sub_assign(&r.x);
let mut t3 = t2;
t3.square();
let mut t4 = t3;
t4.double();
t4.double();
let mut t5 = t4;
t5.mul_assign(&t2);
let mut t6 = t1;
t6.sub_assign(&r.y);
t6.sub_assign(&r.y);
let mut t9 = t6;
t9.mul_assign(&q.x);
let mut t7 = t4;
t7.mul_assign(&r.x);
r.x = t6;
r.x.square();
r.x.sub_assign(&t5);
r.x.sub_assign(&t7);
r.x.sub_assign(&t7);
r.z.add_assign(&t2);
r.z.square();
r.z.sub_assign(&zsquared);
r.z.sub_assign(&t3);
let mut t10 = q.y;
t10.add_assign(&r.z);
let mut t8 = t7;
t8.sub_assign(&r.x);
t8.mul_assign(&t6);
t0 = r.y;
t0.mul_assign(&t5);
t0.double();
r.y = t8;
r.y.sub_assign(&t0);
t10.square();
t10.sub_assign(&ysquared);
let mut ztsquared = r.z;
ztsquared.square();
t10.sub_assign(&ztsquared);
t9.double();
t9.sub_assign(&t10);
t10 = r.z;
t10.double();
t6.negate();
t1 = t6;
t1.double();
(t10, t1, t9)
}
let mut coeffs = vec![];
let mut r: G2 = q.into();
let mut found_one = false;
for i in BitIterator::new([BLS_X >> 1]) {
if !found_one {
found_one = i;
continue;
}
coeffs.push(doubling_step(&mut r));
if i {
coeffs.push(addition_step(&mut r, &q));
}
}
coeffs.push(doubling_step(&mut r));
G2Prepared {
coeffs,
infinity: false,
}
}
}
#[test]
fn bls12_engine_tests() {
::tests::engine::engine_tests::<Bls12>();
}

View File

@ -0,0 +1,611 @@
use super::*;
use *;
#[test]
fn test_pairing_result_against_relic() {
/*
Sent to me from Diego Aranha (author of RELIC library):
1250EBD871FC0A92 A7B2D83168D0D727 272D441BEFA15C50 3DD8E90CE98DB3E7 B6D194F60839C508 A84305AACA1789B6
089A1C5B46E5110B 86750EC6A5323488 68A84045483C92B7 AF5AF689452EAFAB F1A8943E50439F1D 59882A98EAA0170F
1368BB445C7C2D20 9703F239689CE34C 0378A68E72A6B3B2 16DA0E22A5031B54 DDFF57309396B38C 881C4C849EC23E87
193502B86EDB8857 C273FA075A505129 37E0794E1E65A761 7C90D8BD66065B1F FFE51D7A579973B1 315021EC3C19934F
01B2F522473D1713 91125BA84DC4007C FBF2F8DA752F7C74 185203FCCA589AC7 19C34DFFBBAAD843 1DAD1C1FB597AAA5
018107154F25A764 BD3C79937A45B845 46DA634B8F6BE14A 8061E55CCEBA478B 23F7DACAA35C8CA7 8BEAE9624045B4B6
19F26337D205FB46 9CD6BD15C3D5A04D C88784FBB3D0B2DB DEA54D43B2B73F2C BB12D58386A8703E 0F948226E47EE89D
06FBA23EB7C5AF0D 9F80940CA771B6FF D5857BAAF222EB95 A7D2809D61BFE02E 1BFD1B68FF02F0B8 102AE1C2D5D5AB1A
11B8B424CD48BF38 FCEF68083B0B0EC5 C81A93B330EE1A67 7D0D15FF7B984E89 78EF48881E32FAC9 1B93B47333E2BA57
03350F55A7AEFCD3 C31B4FCB6CE5771C C6A0E9786AB59733 20C806AD36082910 7BA810C5A09FFDD9 BE2291A0C25A99A2
04C581234D086A99 02249B64728FFD21 A189E87935A95405 1C7CDBA7B3872629 A4FAFC05066245CB 9108F0242D0FE3EF
0F41E58663BF08CF 068672CBD01A7EC7 3BACA4D72CA93544 DEFF686BFD6DF543 D48EAA24AFE47E1E FDE449383B676631
*/
assert_eq!(Bls12::pairing(G1::one(), G2::one()), Fq12 {
c0: Fq6 {
c0: Fq2 {
c0: Fq::from_str("2819105605953691245277803056322684086884703000473961065716485506033588504203831029066448642358042597501014294104502").unwrap(),
c1: Fq::from_str("1323968232986996742571315206151405965104242542339680722164220900812303524334628370163366153839984196298685227734799").unwrap()
},
c1: Fq2 {
c0: Fq::from_str("2987335049721312504428602988447616328830341722376962214011674875969052835043875658579425548512925634040144704192135").unwrap(),
c1: Fq::from_str("3879723582452552452538684314479081967502111497413076598816163759028842927668327542875108457755966417881797966271311").unwrap()
},
c2: Fq2 {
c0: Fq::from_str("261508182517997003171385743374653339186059518494239543139839025878870012614975302676296704930880982238308326681253").unwrap(),
c1: Fq::from_str("231488992246460459663813598342448669854473942105054381511346786719005883340876032043606739070883099647773793170614").unwrap()
}
},
c1: Fq6 {
c0: Fq2 {
c0: Fq::from_str("3993582095516422658773669068931361134188738159766715576187490305611759126554796569868053818105850661142222948198557").unwrap(),
c1: Fq::from_str("1074773511698422344502264006159859710502164045911412750831641680783012525555872467108249271286757399121183508900634").unwrap()
},
c1: Fq2 {
c0: Fq::from_str("2727588299083545686739024317998512740561167011046940249988557419323068809019137624943703910267790601287073339193943").unwrap(),
c1: Fq::from_str("493643299814437640914745677854369670041080344349607504656543355799077485536288866009245028091988146107059514546594").unwrap()
},
c2: Fq2 {
c0: Fq::from_str("734401332196641441839439105942623141234148957972407782257355060229193854324927417865401895596108124443575283868655").unwrap(),
c1: Fq::from_str("2348330098288556420918672502923664952620152483128593484301759394583320358354186482723629999370241674973832318248497").unwrap()
}
}
});
}
fn test_vectors<G: CurveProjective, E: EncodedPoint<Affine = G::Affine>>(expected: &[u8]) {
let mut e = G::zero();
let mut v = vec![];
{
let mut expected = expected;
for _ in 0..1000 {
let e_affine = e.into_affine();
let encoded = E::from_affine(e_affine);
v.extend_from_slice(encoded.as_ref());
let mut decoded = E::empty();
decoded.as_mut().copy_from_slice(&expected[0..E::size()]);
expected = &expected[E::size()..];
let decoded = decoded.into_affine().unwrap();
assert_eq!(e_affine, decoded);
e.add_assign(&G::one());
}
}
assert_eq!(&v[..], expected);
}
#[test]
fn test_g1_uncompressed_valid_vectors() {
test_vectors::<G1, G1Uncompressed>(include_bytes!("g1_uncompressed_valid_test_vectors.dat"));
}
#[test]
fn test_g1_compressed_valid_vectors() {
test_vectors::<G1, G1Compressed>(include_bytes!("g1_compressed_valid_test_vectors.dat"));
}
#[test]
fn test_g2_uncompressed_valid_vectors() {
test_vectors::<G2, G2Uncompressed>(include_bytes!("g2_uncompressed_valid_test_vectors.dat"));
}
#[test]
fn test_g2_compressed_valid_vectors() {
test_vectors::<G2, G2Compressed>(include_bytes!("g2_compressed_valid_test_vectors.dat"));
}
#[test]
fn test_g1_uncompressed_invalid_vectors() {
{
let z = G1Affine::zero().into_uncompressed();
{
let mut z = z;
z.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected an uncompressed point");
}
}
{
let mut z = z;
z.as_mut()[0] |= 0b0010_0000;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the parity bit should not be set if the point is at infinity");
}
}
for i in 0..G1Uncompressed::size() {
let mut z = z;
z.as_mut()[i] |= 0b0000_0001;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the coordinates should be zeroes at the point at infinity");
}
}
}
let o = G1Affine::one().into_uncompressed();
{
let mut o = o;
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected an uncompressed point");
}
}
let m = Fq::char();
{
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
m.write_be(&mut o.as_mut()[48..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "y coordinate");
} else {
panic!("should have rejected the point")
}
}
{
let m = Fq::zero().into_repr();
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
if let Err(GroupDecodingError::NotOnCurve) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because it isn't on the curve")
}
}
{
let mut o = o;
let mut x = Fq::one();
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq::from_repr(FqRepr::from(4)).unwrap()); // TODO: perhaps expose coeff_b through API?
if let Some(y) = x3b.sqrt() {
// We know this is on the curve, but it's likely not going to be in the correct subgroup.
x.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
y.into_repr().write_be(&mut o.as_mut()[48..]).unwrap();
if let Err(GroupDecodingError::NotInSubgroup) = o.into_affine() {
break;
} else {
panic!(
"should have rejected the point because it isn't in the correct subgroup"
)
}
} else {
x.add_assign(&Fq::one());
}
}
}
}
#[test]
fn test_g2_uncompressed_invalid_vectors() {
{
let z = G2Affine::zero().into_uncompressed();
{
let mut z = z;
z.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected an uncompressed point");
}
}
{
let mut z = z;
z.as_mut()[0] |= 0b0010_0000;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the parity bit should not be set if the point is at infinity");
}
}
for i in 0..G2Uncompressed::size() {
let mut z = z;
z.as_mut()[i] |= 0b0000_0001;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the coordinates should be zeroes at the point at infinity");
}
}
}
let o = G2Affine::one().into_uncompressed();
{
let mut o = o;
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected an uncompressed point");
}
}
let m = Fq::char();
{
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate (c1)");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
m.write_be(&mut o.as_mut()[48..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate (c0)");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
m.write_be(&mut o.as_mut()[96..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "y coordinate (c1)");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
m.write_be(&mut o.as_mut()[144..]).unwrap();
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "y coordinate (c0)");
} else {
panic!("should have rejected the point")
}
}
{
let m = Fq::zero().into_repr();
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
m.write_be(&mut o.as_mut()[48..]).unwrap();
if let Err(GroupDecodingError::NotOnCurve) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because it isn't on the curve")
}
}
{
let mut o = o;
let mut x = Fq2::one();
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq2 {
c0: Fq::from_repr(FqRepr::from(4)).unwrap(),
c1: Fq::from_repr(FqRepr::from(4)).unwrap(),
}); // TODO: perhaps expose coeff_b through API?
if let Some(y) = x3b.sqrt() {
// We know this is on the curve, but it's likely not going to be in the correct subgroup.
x.c1.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
x.c0.into_repr().write_be(&mut o.as_mut()[48..]).unwrap();
y.c1.into_repr().write_be(&mut o.as_mut()[96..]).unwrap();
y.c0.into_repr().write_be(&mut o.as_mut()[144..]).unwrap();
if let Err(GroupDecodingError::NotInSubgroup) = o.into_affine() {
break;
} else {
panic!(
"should have rejected the point because it isn't in the correct subgroup"
)
}
} else {
x.add_assign(&Fq2::one());
}
}
}
}
#[test]
fn test_g1_compressed_invalid_vectors() {
{
let z = G1Affine::zero().into_compressed();
{
let mut z = z;
z.as_mut()[0] &= 0b0111_1111;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected a compressed point");
}
}
{
let mut z = z;
z.as_mut()[0] |= 0b0010_0000;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the parity bit should not be set if the point is at infinity");
}
}
for i in 0..G1Compressed::size() {
let mut z = z;
z.as_mut()[i] |= 0b0000_0001;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the coordinates should be zeroes at the point at infinity");
}
}
}
let o = G1Affine::one().into_compressed();
{
let mut o = o;
o.as_mut()[0] &= 0b0111_1111;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected a compressed point");
}
}
let m = Fq::char();
{
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
let mut x = Fq::one();
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq::from_repr(FqRepr::from(4)).unwrap()); // TODO: perhaps expose coeff_b through API?
if let Some(_) = x3b.sqrt() {
x.add_assign(&Fq::one());
} else {
x.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::NotOnCurve) = o.into_affine() {
break;
} else {
panic!("should have rejected the point because it isn't on the curve")
}
}
}
}
{
let mut o = o;
let mut x = Fq::one();
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq::from_repr(FqRepr::from(4)).unwrap()); // TODO: perhaps expose coeff_b through API?
if let Some(_) = x3b.sqrt() {
// We know this is on the curve, but it's likely not going to be in the correct subgroup.
x.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::NotInSubgroup) = o.into_affine() {
break;
} else {
panic!(
"should have rejected the point because it isn't in the correct subgroup"
)
}
} else {
x.add_assign(&Fq::one());
}
}
}
}
#[test]
fn test_g2_compressed_invalid_vectors() {
{
let z = G2Affine::zero().into_compressed();
{
let mut z = z;
z.as_mut()[0] &= 0b0111_1111;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected a compressed point");
}
}
{
let mut z = z;
z.as_mut()[0] |= 0b0010_0000;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the parity bit should not be set if the point is at infinity");
}
}
for i in 0..G2Compressed::size() {
let mut z = z;
z.as_mut()[i] |= 0b0000_0001;
if let Err(GroupDecodingError::UnexpectedInformation) = z.into_affine() {
// :)
} else {
panic!("should have rejected the point because the coordinates should be zeroes at the point at infinity");
}
}
}
let o = G2Affine::one().into_compressed();
{
let mut o = o;
o.as_mut()[0] &= 0b0111_1111;
if let Err(GroupDecodingError::UnexpectedCompressionMode) = o.into_affine() {
// :)
} else {
panic!("should have rejected the point because we expected a compressed point");
}
}
let m = Fq::char();
{
let mut o = o;
m.write_be(&mut o.as_mut()[0..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate (c1)");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
m.write_be(&mut o.as_mut()[48..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::CoordinateDecodingError(coordinate, _)) = o.into_affine() {
assert_eq!(coordinate, "x coordinate (c0)");
} else {
panic!("should have rejected the point")
}
}
{
let mut o = o;
let mut x = Fq2 {
c0: Fq::one(),
c1: Fq::one(),
};
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq2 {
c0: Fq::from_repr(FqRepr::from(4)).unwrap(),
c1: Fq::from_repr(FqRepr::from(4)).unwrap(),
}); // TODO: perhaps expose coeff_b through API?
if let Some(_) = x3b.sqrt() {
x.add_assign(&Fq2::one());
} else {
x.c1.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
x.c0.into_repr().write_be(&mut o.as_mut()[48..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::NotOnCurve) = o.into_affine() {
break;
} else {
panic!("should have rejected the point because it isn't on the curve")
}
}
}
}
{
let mut o = o;
let mut x = Fq2 {
c0: Fq::one(),
c1: Fq::one(),
};
loop {
let mut x3b = x;
x3b.square();
x3b.mul_assign(&x);
x3b.add_assign(&Fq2 {
c0: Fq::from_repr(FqRepr::from(4)).unwrap(),
c1: Fq::from_repr(FqRepr::from(4)).unwrap(),
}); // TODO: perhaps expose coeff_b through API?
if let Some(_) = x3b.sqrt() {
// We know this is on the curve, but it's likely not going to be in the correct subgroup.
x.c1.into_repr().write_be(&mut o.as_mut()[0..]).unwrap();
x.c0.into_repr().write_be(&mut o.as_mut()[48..]).unwrap();
o.as_mut()[0] |= 0b1000_0000;
if let Err(GroupDecodingError::NotInSubgroup) = o.into_affine() {
break;
} else {
panic!(
"should have rejected the point because it isn't in the correct subgroup"
)
}
} else {
x.add_assign(&Fq2::one());
}
}
}
}

758
pairing/src/lib.rs Normal file
View File

@ -0,0 +1,758 @@
// `clippy` is a code linting tool for improving code quality by catching
// common mistakes or strange code patterns. If the `clippy` feature is
// provided, it is enabled and all compiler warnings are prohibited.
#![cfg_attr(feature = "clippy", deny(warnings))]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", allow(inline_always))]
#![cfg_attr(feature = "clippy", allow(too_many_arguments))]
#![cfg_attr(feature = "clippy", allow(unreadable_literal))]
#![cfg_attr(feature = "clippy", allow(many_single_char_names))]
#![cfg_attr(feature = "clippy", allow(new_without_default_derive))]
#![cfg_attr(feature = "clippy", allow(write_literal))]
// Force public structures to implement Debug
#![deny(missing_debug_implementations)]
extern crate byteorder;
extern crate rand;
#[cfg(test)]
pub mod tests;
pub mod bls12_381;
mod wnaf;
pub use self::wnaf::Wnaf;
use std::error::Error;
use std::fmt;
use std::io::{self, Read, Write};
/// An "engine" is a collection of types (fields, elliptic curve groups, etc.)
/// with well-defined relationships. In particular, the G1/G2 curve groups are
/// of prime order `r`, and are equipped with a bilinear pairing function.
pub trait Engine: Sized + 'static + Clone {
/// This is the scalar field of the G1/G2 groups.
type Fr: PrimeField + SqrtField;
/// The projective representation of an element in G1.
type G1: CurveProjective<
Engine = Self,
Base = Self::Fq,
Scalar = Self::Fr,
Affine = Self::G1Affine,
>
+ From<Self::G1Affine>;
/// The affine representation of an element in G1.
type G1Affine: CurveAffine<
Engine = Self,
Base = Self::Fq,
Scalar = Self::Fr,
Projective = Self::G1,
Pair = Self::G2Affine,
PairingResult = Self::Fqk,
>
+ From<Self::G1>;
/// The projective representation of an element in G2.
type G2: CurveProjective<
Engine = Self,
Base = Self::Fqe,
Scalar = Self::Fr,
Affine = Self::G2Affine,
>
+ From<Self::G2Affine>;
/// The affine representation of an element in G2.
type G2Affine: CurveAffine<
Engine = Self,
Base = Self::Fqe,
Scalar = Self::Fr,
Projective = Self::G2,
Pair = Self::G1Affine,
PairingResult = Self::Fqk,
>
+ From<Self::G2>;
/// The base field that hosts G1.
type Fq: PrimeField + SqrtField;
/// The extension field that hosts G2.
type Fqe: SqrtField;
/// The extension field that hosts the target group of the pairing.
type Fqk: Field;
/// Perform a miller loop with some number of (G1, G2) pairs.
fn miller_loop<'a, I>(i: I) -> Self::Fqk
where
I: IntoIterator<
Item = &'a (
&'a <Self::G1Affine as CurveAffine>::Prepared,
&'a <Self::G2Affine as CurveAffine>::Prepared,
),
>;
/// Perform final exponentiation of the result of a miller loop.
fn final_exponentiation(&Self::Fqk) -> Option<Self::Fqk>;
/// Performs a complete pairing operation `(p, q)`.
fn pairing<G1, G2>(p: G1, q: G2) -> Self::Fqk
where
G1: Into<Self::G1Affine>,
G2: Into<Self::G2Affine>,
{
Self::final_exponentiation(&Self::miller_loop(
[(&(p.into().prepare()), &(q.into().prepare()))].into_iter(),
)).unwrap()
}
}
/// Projective representation of an elliptic curve point guaranteed to be
/// in the correct prime order subgroup.
pub trait CurveProjective:
PartialEq
+ Eq
+ Sized
+ Copy
+ Clone
+ Send
+ Sync
+ fmt::Debug
+ fmt::Display
+ rand::Rand
+ 'static
{
type Engine: Engine<Fr = Self::Scalar>;
type Scalar: PrimeField + SqrtField;
type Base: SqrtField;
type Affine: CurveAffine<Projective = Self, Scalar = Self::Scalar>;
/// Returns the additive identity.
fn zero() -> Self;
/// Returns a fixed generator of unknown exponent.
fn one() -> Self;
/// Determines if this point is the point at infinity.
fn is_zero(&self) -> bool;
/// Normalizes a slice of projective elements so that
/// conversion to affine is cheap.
fn batch_normalization(v: &mut [Self]);
/// Checks if the point is already "normalized" so that
/// cheap affine conversion is possible.
fn is_normalized(&self) -> bool;
/// Doubles this element.
fn double(&mut self);
/// Adds another element to this element.
fn add_assign(&mut self, other: &Self);
/// Subtracts another element from this element.
fn sub_assign(&mut self, other: &Self) {
let mut tmp = *other;
tmp.negate();
self.add_assign(&tmp);
}
/// Adds an affine element to this element.
fn add_assign_mixed(&mut self, other: &Self::Affine);
/// Negates this element.
fn negate(&mut self);
/// Performs scalar multiplication of this element.
fn mul_assign<S: Into<<Self::Scalar as PrimeField>::Repr>>(&mut self, other: S);
/// Converts this element into its affine representation.
fn into_affine(&self) -> Self::Affine;
/// Recommends a wNAF window table size given a scalar. Always returns a number
/// between 2 and 22, inclusive.
fn recommended_wnaf_for_scalar(scalar: <Self::Scalar as PrimeField>::Repr) -> usize;
/// Recommends a wNAF window size given the number of scalars you intend to multiply
/// a base by. Always returns a number between 2 and 22, inclusive.
fn recommended_wnaf_for_num_scalars(num_scalars: usize) -> usize;
}
/// Affine representation of an elliptic curve point guaranteed to be
/// in the correct prime order subgroup.
pub trait CurveAffine:
Copy + Clone + Sized + Send + Sync + fmt::Debug + fmt::Display + PartialEq + Eq + 'static
{
type Engine: Engine<Fr = Self::Scalar>;
type Scalar: PrimeField + SqrtField;
type Base: SqrtField;
type Projective: CurveProjective<Affine = Self, Scalar = Self::Scalar>;
type Prepared: Clone + Send + Sync + 'static;
type Uncompressed: EncodedPoint<Affine = Self>;
type Compressed: EncodedPoint<Affine = Self>;
type Pair: CurveAffine<Pair = Self>;
type PairingResult: Field;
/// Returns the additive identity.
fn zero() -> Self;
/// Returns a fixed generator of unknown exponent.
fn one() -> Self;
/// Determines if this point represents the point at infinity; the
/// additive identity.
fn is_zero(&self) -> bool;
/// Negates this element.
fn negate(&mut self);
/// Performs scalar multiplication of this element with mixed addition.
fn mul<S: Into<<Self::Scalar as PrimeField>::Repr>>(&self, other: S) -> Self::Projective;
/// Prepares this element for pairing purposes.
fn prepare(&self) -> Self::Prepared;
/// Perform a pairing
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult;
/// Converts this element into its affine representation.
fn into_projective(&self) -> Self::Projective;
/// Converts this element into its compressed encoding, so long as it's not
/// the point at infinity.
fn into_compressed(&self) -> Self::Compressed {
<Self::Compressed as EncodedPoint>::from_affine(*self)
}
/// Converts this element into its uncompressed encoding, so long as it's not
/// the point at infinity.
fn into_uncompressed(&self) -> Self::Uncompressed {
<Self::Uncompressed as EncodedPoint>::from_affine(*self)
}
}
/// An encoded elliptic curve point, which should essentially wrap a `[u8; N]`.
pub trait EncodedPoint:
Sized + Send + Sync + AsRef<[u8]> + AsMut<[u8]> + Clone + Copy + 'static
{
type Affine: CurveAffine;
/// Creates an empty representation.
fn empty() -> Self;
/// Returns the number of bytes consumed by this representation.
fn size() -> usize;
/// Converts an `EncodedPoint` into a `CurveAffine` element,
/// if the encoding represents a valid element.
fn into_affine(&self) -> Result<Self::Affine, GroupDecodingError>;
/// Converts an `EncodedPoint` into a `CurveAffine` element,
/// without guaranteeing that the encoding represents a valid
/// element. This is useful when the caller knows the encoding is
/// valid already.
///
/// If the encoding is invalid, this can break API invariants,
/// so caution is strongly encouraged.
fn into_affine_unchecked(&self) -> Result<Self::Affine, GroupDecodingError>;
/// Creates an `EncodedPoint` from an affine point, as long as the
/// point is not the point at infinity.
fn from_affine(affine: Self::Affine) -> Self;
}
/// This trait represents an element of a field.
pub trait Field:
Sized + Eq + Copy + Clone + Send + Sync + fmt::Debug + fmt::Display + 'static + rand::Rand
{
/// Returns the zero element of the field, the additive identity.
fn zero() -> Self;
/// Returns the one element of the field, the multiplicative identity.
fn one() -> Self;
/// Returns true iff this element is zero.
fn is_zero(&self) -> bool;
/// Squares this element.
fn square(&mut self);
/// Doubles this element.
fn double(&mut self);
/// Negates this element.
fn negate(&mut self);
/// Adds another element to this element.
fn add_assign(&mut self, other: &Self);
/// Subtracts another element from this element.
fn sub_assign(&mut self, other: &Self);
/// Multiplies another element by this element.
fn mul_assign(&mut self, other: &Self);
/// Computes the multiplicative inverse of this element, if nonzero.
fn inverse(&self) -> Option<Self>;
/// Exponentiates this element by a power of the base prime modulus via
/// the Frobenius automorphism.
fn frobenius_map(&mut self, power: usize);
/// Exponentiates this element by a number represented with `u64` limbs,
/// least significant digit first.
fn pow<S: AsRef<[u64]>>(&self, exp: S) -> Self {
let mut res = Self::one();
let mut found_one = false;
for i in BitIterator::new(exp) {
if found_one {
res.square();
} else {
found_one = i;
}
if i {
res.mul_assign(self);
}
}
res
}
}
/// This trait represents an element of a field that has a square root operation described for it.
pub trait SqrtField: Field {
/// Returns the Legendre symbol of the field element.
fn legendre(&self) -> LegendreSymbol;
/// Returns the square root of the field element, if it is
/// quadratic residue.
fn sqrt(&self) -> Option<Self>;
}
/// This trait represents a wrapper around a biginteger which can encode any element of a particular
/// prime field. It is a smart wrapper around a sequence of `u64` limbs, least-significant digit
/// first.
pub trait PrimeFieldRepr:
Sized
+ Copy
+ Clone
+ Eq
+ Ord
+ Send
+ Sync
+ Default
+ fmt::Debug
+ fmt::Display
+ 'static
+ rand::Rand
+ AsRef<[u64]>
+ AsMut<[u64]>
+ From<u64>
{
/// Subtract another represetation from this one.
fn sub_noborrow(&mut self, other: &Self);
/// Add another representation to this one.
fn add_nocarry(&mut self, other: &Self);
/// Compute the number of bits needed to encode this number. Always a
/// multiple of 64.
fn num_bits(&self) -> u32;
/// Returns true iff this number is zero.
fn is_zero(&self) -> bool;
/// Returns true iff this number is odd.
fn is_odd(&self) -> bool;
/// Returns true iff this number is even.
fn is_even(&self) -> bool;
/// Performs a rightwise bitshift of this number, effectively dividing
/// it by 2.
fn div2(&mut self);
/// Performs a rightwise bitshift of this number by some amount.
fn shr(&mut self, amt: u32);
/// Performs a leftwise bitshift of this number, effectively multiplying
/// it by 2. Overflow is ignored.
fn mul2(&mut self);
/// Performs a leftwise bitshift of this number by some amount.
fn shl(&mut self, amt: u32);
/// Writes this `PrimeFieldRepr` as a big endian integer.
fn write_be<W: Write>(&self, mut writer: W) -> io::Result<()> {
use byteorder::{BigEndian, WriteBytesExt};
for digit in self.as_ref().iter().rev() {
writer.write_u64::<BigEndian>(*digit)?;
}
Ok(())
}
/// Reads a big endian integer into this representation.
fn read_be<R: Read>(&mut self, mut reader: R) -> io::Result<()> {
use byteorder::{BigEndian, ReadBytesExt};
for digit in self.as_mut().iter_mut().rev() {
*digit = reader.read_u64::<BigEndian>()?;
}
Ok(())
}
/// Writes this `PrimeFieldRepr` as a little endian integer.
fn write_le<W: Write>(&self, mut writer: W) -> io::Result<()> {
use byteorder::{LittleEndian, WriteBytesExt};
for digit in self.as_ref().iter() {
writer.write_u64::<LittleEndian>(*digit)?;
}
Ok(())
}
/// Reads a little endian integer into this representation.
fn read_le<R: Read>(&mut self, mut reader: R) -> io::Result<()> {
use byteorder::{LittleEndian, ReadBytesExt};
for digit in self.as_mut().iter_mut() {
*digit = reader.read_u64::<LittleEndian>()?;
}
Ok(())
}
}
#[derive(Debug, PartialEq)]
pub enum LegendreSymbol {
Zero = 0,
QuadraticResidue = 1,
QuadraticNonResidue = -1,
}
/// An error that may occur when trying to interpret a `PrimeFieldRepr` as a
/// `PrimeField` element.
#[derive(Debug)]
pub enum PrimeFieldDecodingError {
/// The encoded value is not in the field
NotInField(String),
}
impl Error for PrimeFieldDecodingError {
fn description(&self) -> &str {
match *self {
PrimeFieldDecodingError::NotInField(..) => "not an element of the field",
}
}
}
impl fmt::Display for PrimeFieldDecodingError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
PrimeFieldDecodingError::NotInField(ref repr) => {
write!(f, "{} is not an element of the field", repr)
}
}
}
}
/// An error that may occur when trying to decode an `EncodedPoint`.
#[derive(Debug)]
pub enum GroupDecodingError {
/// The coordinate(s) do not lie on the curve.
NotOnCurve,
/// The element is not part of the r-order subgroup.
NotInSubgroup,
/// One of the coordinates could not be decoded
CoordinateDecodingError(&'static str, PrimeFieldDecodingError),
/// The compression mode of the encoded element was not as expected
UnexpectedCompressionMode,
/// The encoding contained bits that should not have been set
UnexpectedInformation,
}
impl Error for GroupDecodingError {
fn description(&self) -> &str {
match *self {
GroupDecodingError::NotOnCurve => "coordinate(s) do not lie on the curve",
GroupDecodingError::NotInSubgroup => "the element is not part of an r-order subgroup",
GroupDecodingError::CoordinateDecodingError(..) => "coordinate(s) could not be decoded",
GroupDecodingError::UnexpectedCompressionMode => {
"encoding has unexpected compression mode"
}
GroupDecodingError::UnexpectedInformation => "encoding has unexpected information",
}
}
}
impl fmt::Display for GroupDecodingError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
GroupDecodingError::CoordinateDecodingError(description, ref err) => {
write!(f, "{} decoding error: {}", description, err)
}
_ => write!(f, "{}", self.description()),
}
}
}
/// This represents an element of a prime field.
pub trait PrimeField: Field {
/// The prime field can be converted back and forth into this biginteger
/// representation.
type Repr: PrimeFieldRepr + From<Self>;
/// Interpret a string of numbers as a (congruent) prime field element.
/// Does not accept unnecessary leading zeroes or a blank string.
fn from_str(s: &str) -> Option<Self> {
if s.is_empty() {
return None;
}
if s == "0" {
return Some(Self::zero());
}
let mut res = Self::zero();
let ten = Self::from_repr(Self::Repr::from(10)).unwrap();
let mut first_digit = true;
for c in s.chars() {
match c.to_digit(10) {
Some(c) => {
if first_digit {
if c == 0 {
return None;
}
first_digit = false;
}
res.mul_assign(&ten);
res.add_assign(&Self::from_repr(Self::Repr::from(u64::from(c))).unwrap());
}
None => {
return None;
}
}
}
Some(res)
}
/// Convert this prime field element into a biginteger representation.
fn from_repr(Self::Repr) -> Result<Self, PrimeFieldDecodingError>;
/// Convert a biginteger representation into a prime field element, if
/// the number is an element of the field.
fn into_repr(&self) -> Self::Repr;
/// Returns the field characteristic; the modulus.
fn char() -> Self::Repr;
/// How many bits are needed to represent an element of this field.
const NUM_BITS: u32;
/// How many bits of information can be reliably stored in the field element.
const CAPACITY: u32;
/// Returns the multiplicative generator of `char()` - 1 order. This element
/// must also be quadratic nonresidue.
fn multiplicative_generator() -> Self;
/// 2^s * t = `char()` - 1 with t odd.
const S: u32;
/// Returns the 2^s root of unity computed by exponentiating the `multiplicative_generator()`
/// by t.
fn root_of_unity() -> Self;
}
#[derive(Debug)]
pub struct BitIterator<E> {
t: E,
n: usize,
}
impl<E: AsRef<[u64]>> BitIterator<E> {
pub fn new(t: E) -> Self {
let n = t.as_ref().len() * 64;
BitIterator { t, n }
}
}
impl<E: AsRef<[u64]>> Iterator for BitIterator<E> {
type Item = bool;
fn next(&mut self) -> Option<bool> {
if self.n == 0 {
None
} else {
self.n -= 1;
let part = self.n / 64;
let bit = self.n - (64 * part);
Some(self.t.as_ref()[part] & (1 << bit) > 0)
}
}
}
#[test]
fn test_bit_iterator() {
let mut a = BitIterator::new([0xa953d79b83f6ab59, 0x6dea2059e200bd39]);
let expected = "01101101111010100010000001011001111000100000000010111101001110011010100101010011110101111001101110000011111101101010101101011001";
for e in expected.chars() {
assert!(a.next().unwrap() == (e == '1'));
}
assert!(a.next().is_none());
let expected = "1010010101111110101010000101101011101000011101110101001000011001100100100011011010001011011011010001011011101100110100111011010010110001000011110100110001100110011101101000101100011100100100100100001010011101010111110011101011000011101000111011011101011001";
let mut a = BitIterator::new([
0x429d5f3ac3a3b759,
0xb10f4c66768b1c92,
0x92368b6d16ecd3b4,
0xa57ea85ae8775219,
]);
for e in expected.chars() {
assert!(a.next().unwrap() == (e == '1'));
}
assert!(a.next().is_none());
}
#[cfg(not(feature = "expose-arith"))]
use self::arith_impl::*;
#[cfg(feature = "expose-arith")]
pub use self::arith_impl::*;
#[cfg(feature = "u128-support")]
mod arith_impl {
/// Calculate a - b - borrow, returning the result and modifying
/// the borrow value.
#[inline(always)]
pub fn sbb(a: u64, b: u64, borrow: &mut u64) -> u64 {
let tmp = (1u128 << 64) + u128::from(a) - u128::from(b) - u128::from(*borrow);
*borrow = if tmp >> 64 == 0 { 1 } else { 0 };
tmp as u64
}
/// Calculate a + b + carry, returning the sum and modifying the
/// carry value.
#[inline(always)]
pub fn adc(a: u64, b: u64, carry: &mut u64) -> u64 {
let tmp = u128::from(a) + u128::from(b) + u128::from(*carry);
*carry = (tmp >> 64) as u64;
tmp as u64
}
/// Calculate a + (b * c) + carry, returning the least significant digit
/// and setting carry to the most significant digit.
#[inline(always)]
pub fn mac_with_carry(a: u64, b: u64, c: u64, carry: &mut u64) -> u64 {
let tmp = (u128::from(a)) + u128::from(b) * u128::from(c) + u128::from(*carry);
*carry = (tmp >> 64) as u64;
tmp as u64
}
}
#[cfg(not(feature = "u128-support"))]
mod arith_impl {
#[inline(always)]
fn split_u64(i: u64) -> (u64, u64) {
(i >> 32, i & 0xFFFFFFFF)
}
#[inline(always)]
fn combine_u64(hi: u64, lo: u64) -> u64 {
(hi << 32) | lo
}
/// Calculate a - b - borrow, returning the result and modifying
/// the borrow value.
#[inline(always)]
pub fn sbb(a: u64, b: u64, borrow: &mut u64) -> u64 {
let (a_hi, a_lo) = split_u64(a);
let (b_hi, b_lo) = split_u64(b);
let (b, r0) = split_u64((1 << 32) + a_lo - b_lo - *borrow);
let (b, r1) = split_u64((1 << 32) + a_hi - b_hi - ((b == 0) as u64));
*borrow = (b == 0) as u64;
combine_u64(r1, r0)
}
/// Calculate a + b + carry, returning the sum and modifying the
/// carry value.
#[inline(always)]
pub fn adc(a: u64, b: u64, carry: &mut u64) -> u64 {
let (a_hi, a_lo) = split_u64(a);
let (b_hi, b_lo) = split_u64(b);
let (carry_hi, carry_lo) = split_u64(*carry);
let (t, r0) = split_u64(a_lo + b_lo + carry_lo);
let (t, r1) = split_u64(t + a_hi + b_hi + carry_hi);
*carry = t;
combine_u64(r1, r0)
}
/// Calculate a + (b * c) + carry, returning the least significant digit
/// and setting carry to the most significant digit.
#[inline(always)]
pub fn mac_with_carry(a: u64, b: u64, c: u64, carry: &mut u64) -> u64 {
/*
[ b_hi | b_lo ]
[ c_hi | c_lo ] *
-------------------------------------------
[ b_lo * c_lo ] <-- w
[ b_hi * c_lo ] <-- x
[ b_lo * c_hi ] <-- y
[ b_hi * c_lo ] <-- z
[ a_hi | a_lo ]
[ C_hi | C_lo ]
*/
let (a_hi, a_lo) = split_u64(a);
let (b_hi, b_lo) = split_u64(b);
let (c_hi, c_lo) = split_u64(c);
let (carry_hi, carry_lo) = split_u64(*carry);
let (w_hi, w_lo) = split_u64(b_lo * c_lo);
let (x_hi, x_lo) = split_u64(b_hi * c_lo);
let (y_hi, y_lo) = split_u64(b_lo * c_hi);
let (z_hi, z_lo) = split_u64(b_hi * c_hi);
let (t, r0) = split_u64(w_lo + a_lo + carry_lo);
let (t, r1) = split_u64(t + w_hi + x_lo + y_lo + a_hi + carry_hi);
let (t, r2) = split_u64(t + x_hi + y_hi + z_lo);
let (_, r3) = split_u64(t + z_hi);
*carry = combine_u64(r3, r2);
combine_u64(r1, r0)
}
}

420
pairing/src/tests/curve.rs Normal file
View File

@ -0,0 +1,420 @@
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use {CurveAffine, CurveProjective, EncodedPoint, Field};
pub fn curve_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// Negation edge case with zero.
{
let mut z = G::zero();
z.negate();
assert!(z.is_zero());
}
// Doubling edge case with zero.
{
let mut z = G::zero();
z.double();
assert!(z.is_zero());
}
// Addition edge cases with zero
{
let mut r = G::rand(&mut rng);
let rcopy = r;
r.add_assign(&G::zero());
assert_eq!(r, rcopy);
r.add_assign_mixed(&G::Affine::zero());
assert_eq!(r, rcopy);
let mut z = G::zero();
z.add_assign(&G::zero());
assert!(z.is_zero());
z.add_assign_mixed(&G::Affine::zero());
assert!(z.is_zero());
let mut z2 = z;
z2.add_assign(&r);
z.add_assign_mixed(&r.into_affine());
assert_eq!(z, z2);
assert_eq!(z, r);
}
// Transformations
{
let a = G::rand(&mut rng);
let b = a.into_affine().into_projective();
let c = a
.into_affine()
.into_projective()
.into_affine()
.into_projective();
assert_eq!(a, b);
assert_eq!(b, c);
}
random_addition_tests::<G>();
random_multiplication_tests::<G>();
random_doubling_tests::<G>();
random_negation_tests::<G>();
random_transformation_tests::<G>();
random_wnaf_tests::<G>();
random_encoding_tests::<G::Affine>();
}
fn random_wnaf_tests<G: CurveProjective>() {
use wnaf::*;
use PrimeField;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
{
let mut table = vec![];
let mut wnaf = vec![];
for w in 2..14 {
for _ in 0..100 {
let g = G::rand(&mut rng);
let s = G::Scalar::rand(&mut rng).into_repr();
let mut g1 = g;
g1.mul_assign(s);
wnaf_table(&mut table, g, w);
wnaf_form(&mut wnaf, s, w);
let g2 = wnaf_exp(&table, &wnaf);
assert_eq!(g1, g2);
}
}
}
{
fn only_compiles_if_send<S: Send>(_: &S) {}
for _ in 0..100 {
let g = G::rand(&mut rng);
let s = G::Scalar::rand(&mut rng).into_repr();
let mut g1 = g;
g1.mul_assign(s);
let g2 = {
let mut wnaf = Wnaf::new();
wnaf.base(g, 1).scalar(s)
};
let g3 = {
let mut wnaf = Wnaf::new();
wnaf.scalar(s).base(g)
};
let g4 = {
let mut wnaf = Wnaf::new();
let mut shared = wnaf.base(g, 1).shared();
only_compiles_if_send(&shared);
shared.scalar(s)
};
let g5 = {
let mut wnaf = Wnaf::new();
let mut shared = wnaf.scalar(s).shared();
only_compiles_if_send(&shared);
shared.base(g)
};
let g6 = {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
}
wnaf.base(g, 1).scalar(s)
};
let g7 = {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
}
wnaf.scalar(s).base(g)
};
let g8 = {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
}
let mut shared = wnaf.base(g, 1).shared();
only_compiles_if_send(&shared);
shared.scalar(s)
};
let g9 = {
let mut wnaf = Wnaf::new();
{
// Populate the vectors.
wnaf.base(rng.gen(), 1).scalar(rng.gen());
}
let mut shared = wnaf.scalar(s).shared();
only_compiles_if_send(&shared);
shared.base(g)
};
assert_eq!(g1, g2);
assert_eq!(g1, g3);
assert_eq!(g1, g4);
assert_eq!(g1, g5);
assert_eq!(g1, g6);
assert_eq!(g1, g7);
assert_eq!(g1, g8);
assert_eq!(g1, g9);
}
}
}
fn random_negation_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let r = G::rand(&mut rng);
let s = G::Scalar::rand(&mut rng);
let mut sneg = s;
sneg.negate();
let mut t1 = r;
t1.mul_assign(s);
let mut t2 = r;
t2.mul_assign(sneg);
let mut t3 = t1;
t3.add_assign(&t2);
assert!(t3.is_zero());
let mut t4 = t1;
t4.add_assign_mixed(&t2.into_affine());
assert!(t4.is_zero());
t1.negate();
assert_eq!(t1, t2);
}
}
fn random_doubling_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let mut a = G::rand(&mut rng);
let mut b = G::rand(&mut rng);
// 2(a + b)
let mut tmp1 = a;
tmp1.add_assign(&b);
tmp1.double();
// 2a + 2b
a.double();
b.double();
let mut tmp2 = a;
tmp2.add_assign(&b);
let mut tmp3 = a;
tmp3.add_assign_mixed(&b.into_affine());
assert_eq!(tmp1, tmp2);
assert_eq!(tmp1, tmp3);
}
}
fn random_multiplication_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let mut a = G::rand(&mut rng);
let mut b = G::rand(&mut rng);
let a_affine = a.into_affine();
let b_affine = b.into_affine();
let s = G::Scalar::rand(&mut rng);
// s ( a + b )
let mut tmp1 = a;
tmp1.add_assign(&b);
tmp1.mul_assign(s);
// sa + sb
a.mul_assign(s);
b.mul_assign(s);
let mut tmp2 = a;
tmp2.add_assign(&b);
// Affine multiplication
let mut tmp3 = a_affine.mul(s);
tmp3.add_assign(&b_affine.mul(s));
assert_eq!(tmp1, tmp2);
assert_eq!(tmp1, tmp3);
}
}
fn random_addition_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let a = G::rand(&mut rng);
let b = G::rand(&mut rng);
let c = G::rand(&mut rng);
let a_affine = a.into_affine();
let b_affine = b.into_affine();
let c_affine = c.into_affine();
// a + a should equal the doubling
{
let mut aplusa = a;
aplusa.add_assign(&a);
let mut aplusamixed = a;
aplusamixed.add_assign_mixed(&a.into_affine());
let mut adouble = a;
adouble.double();
assert_eq!(aplusa, adouble);
assert_eq!(aplusa, aplusamixed);
}
let mut tmp = vec![G::zero(); 6];
// (a + b) + c
tmp[0] = a;
tmp[0].add_assign(&b);
tmp[0].add_assign(&c);
// a + (b + c)
tmp[1] = b;
tmp[1].add_assign(&c);
tmp[1].add_assign(&a);
// (a + c) + b
tmp[2] = a;
tmp[2].add_assign(&c);
tmp[2].add_assign(&b);
// Mixed addition
// (a + b) + c
tmp[3] = a_affine.into_projective();
tmp[3].add_assign_mixed(&b_affine);
tmp[3].add_assign_mixed(&c_affine);
// a + (b + c)
tmp[4] = b_affine.into_projective();
tmp[4].add_assign_mixed(&c_affine);
tmp[4].add_assign_mixed(&a_affine);
// (a + c) + b
tmp[5] = a_affine.into_projective();
tmp[5].add_assign_mixed(&c_affine);
tmp[5].add_assign_mixed(&b_affine);
// Comparisons
for i in 0..6 {
for j in 0..6 {
assert_eq!(tmp[i], tmp[j]);
assert_eq!(tmp[i].into_affine(), tmp[j].into_affine());
}
assert!(tmp[i] != a);
assert!(tmp[i] != b);
assert!(tmp[i] != c);
assert!(a != tmp[i]);
assert!(b != tmp[i]);
assert!(c != tmp[i]);
}
}
}
fn random_transformation_tests<G: CurveProjective>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let g = G::rand(&mut rng);
let g_affine = g.into_affine();
let g_projective = g_affine.into_projective();
assert_eq!(g, g_projective);
}
// Batch normalization
for _ in 0..10 {
let mut v = (0..1000).map(|_| G::rand(&mut rng)).collect::<Vec<_>>();
for i in &v {
assert!(!i.is_normalized());
}
use rand::distributions::{IndependentSample, Range};
let between = Range::new(0, 1000);
// Sprinkle in some normalized points
for _ in 0..5 {
v[between.ind_sample(&mut rng)] = G::zero();
}
for _ in 0..5 {
let s = between.ind_sample(&mut rng);
v[s] = v[s].into_affine().into_projective();
}
let expected_v = v
.iter()
.map(|v| v.into_affine().into_projective())
.collect::<Vec<_>>();
G::batch_normalization(&mut v);
for i in &v {
assert!(i.is_normalized());
}
assert_eq!(v, expected_v);
}
}
fn random_encoding_tests<G: CurveAffine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
assert_eq!(
G::zero().into_uncompressed().into_affine().unwrap(),
G::zero()
);
assert_eq!(
G::zero().into_compressed().into_affine().unwrap(),
G::zero()
);
for _ in 0..1000 {
let mut r = G::Projective::rand(&mut rng).into_affine();
let uncompressed = r.into_uncompressed();
let de_uncompressed = uncompressed.into_affine().unwrap();
assert_eq!(de_uncompressed, r);
let compressed = r.into_compressed();
let de_compressed = compressed.into_affine().unwrap();
assert_eq!(de_compressed, r);
r.negate();
let compressed = r.into_compressed();
let de_compressed = compressed.into_affine().unwrap();
assert_eq!(de_compressed, r);
}
}

126
pairing/src/tests/engine.rs Normal file
View File

@ -0,0 +1,126 @@
use rand::{Rand, SeedableRng, XorShiftRng};
use {CurveAffine, CurveProjective, Engine, Field, PrimeField};
pub fn engine_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..10 {
let a = E::G1::rand(&mut rng).into_affine();
let b = E::G2::rand(&mut rng).into_affine();
assert!(a.pairing_with(&b) == b.pairing_with(&a));
assert!(a.pairing_with(&b) == E::pairing(a, b));
}
for _ in 0..1000 {
let z1 = E::G1Affine::zero().prepare();
let z2 = E::G2Affine::zero().prepare();
let a = E::G1::rand(&mut rng).into_affine().prepare();
let b = E::G2::rand(&mut rng).into_affine().prepare();
let c = E::G1::rand(&mut rng).into_affine().prepare();
let d = E::G2::rand(&mut rng).into_affine().prepare();
assert_eq!(
E::Fqk::one(),
E::final_exponentiation(&E::miller_loop(&[(&z1, &b)])).unwrap()
);
assert_eq!(
E::Fqk::one(),
E::final_exponentiation(&E::miller_loop(&[(&a, &z2)])).unwrap()
);
assert_eq!(
E::final_exponentiation(&E::miller_loop(&[(&z1, &b), (&c, &d)])).unwrap(),
E::final_exponentiation(&E::miller_loop(&[(&a, &z2), (&c, &d)])).unwrap()
);
assert_eq!(
E::final_exponentiation(&E::miller_loop(&[(&a, &b), (&z1, &d)])).unwrap(),
E::final_exponentiation(&E::miller_loop(&[(&a, &b), (&c, &z2)])).unwrap()
);
}
random_bilinearity_tests::<E>();
random_miller_loop_tests::<E>();
}
fn random_miller_loop_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// Exercise the miller loop for a reduced pairing
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let p2 = E::pairing(a, b);
let a = a.into_affine().prepare();
let b = b.into_affine().prepare();
let p1 = E::final_exponentiation(&E::miller_loop(&[(&a, &b)])).unwrap();
assert_eq!(p1, p2);
}
// Exercise a double miller loop
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let c = E::G1::rand(&mut rng);
let d = E::G2::rand(&mut rng);
let ab = E::pairing(a, b);
let cd = E::pairing(c, d);
let mut abcd = ab;
abcd.mul_assign(&cd);
let a = a.into_affine().prepare();
let b = b.into_affine().prepare();
let c = c.into_affine().prepare();
let d = d.into_affine().prepare();
let abcd_with_double_loop =
E::final_exponentiation(&E::miller_loop(&[(&a, &b), (&c, &d)])).unwrap();
assert_eq!(abcd, abcd_with_double_loop);
}
}
fn random_bilinearity_tests<E: Engine>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let a = E::G1::rand(&mut rng);
let b = E::G2::rand(&mut rng);
let c = E::Fr::rand(&mut rng);
let d = E::Fr::rand(&mut rng);
let mut ac = a;
ac.mul_assign(c);
let mut ad = a;
ad.mul_assign(d);
let mut bc = b;
bc.mul_assign(c);
let mut bd = b;
bd.mul_assign(d);
let acbd = E::pairing(ac, bd);
let adbc = E::pairing(ad, bc);
let mut cd = c;
cd.mul_assign(&d);
let abcd = E::pairing(a, b).pow(cd.into_repr());
assert_eq!(acbd, adbc);
assert_eq!(acbd, abcd);
}
}

266
pairing/src/tests/field.rs Normal file
View File

@ -0,0 +1,266 @@
use rand::{Rng, SeedableRng, XorShiftRng};
use {Field, LegendreSymbol, PrimeField, SqrtField};
pub fn random_frobenius_tests<F: Field, C: AsRef<[u64]>>(characteristic: C, maxpower: usize) {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
for i in 0..(maxpower + 1) {
let mut a = F::rand(&mut rng);
let mut b = a;
for _ in 0..i {
a = a.pow(&characteristic);
}
b.frobenius_map(i);
assert_eq!(a, b);
}
}
}
pub fn random_sqrt_tests<F: SqrtField>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..10000 {
let a = F::rand(&mut rng);
let mut b = a;
b.square();
assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue);
let b = b.sqrt().unwrap();
let mut negb = b;
negb.negate();
assert!(a == b || a == negb);
}
let mut c = F::one();
for _ in 0..10000 {
let mut b = c;
b.square();
assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue);
b = b.sqrt().unwrap();
if b != c {
b.negate();
}
assert_eq!(b, c);
c.add_assign(&F::one());
}
}
pub fn random_field_tests<F: Field>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
random_multiplication_tests::<F, _>(&mut rng);
random_addition_tests::<F, _>(&mut rng);
random_subtraction_tests::<F, _>(&mut rng);
random_negation_tests::<F, _>(&mut rng);
random_doubling_tests::<F, _>(&mut rng);
random_squaring_tests::<F, _>(&mut rng);
random_inversion_tests::<F, _>(&mut rng);
random_expansion_tests::<F, _>(&mut rng);
assert!(F::zero().is_zero());
{
let mut z = F::zero();
z.negate();
assert!(z.is_zero());
}
assert!(F::zero().inverse().is_none());
// Multiplication by zero
{
let mut a = F::rand(&mut rng);
a.mul_assign(&F::zero());
assert!(a.is_zero());
}
// Addition by zero
{
let mut a = F::rand(&mut rng);
let copy = a;
a.add_assign(&F::zero());
assert_eq!(a, copy);
}
}
pub fn from_str_tests<F: PrimeField>() {
{
let a = "84395729384759238745923745892374598234705297301958723458712394587103249587213984572934750213947582345792304758273458972349582734958273495872304598234";
let b = "38495729084572938457298347502349857029384609283450692834058293405982304598230458230495820394850293845098234059823049582309485203948502938452093482039";
let c = "3248875134290623212325429203829831876024364170316860259933542844758450336418538569901990710701240661702808867062612075657861768196242274635305077449545396068598317421057721935408562373834079015873933065667961469731886739181625866970316226171512545167081793907058686908697431878454091011239990119126";
let mut a = F::from_str(a).unwrap();
let b = F::from_str(b).unwrap();
let c = F::from_str(c).unwrap();
a.mul_assign(&b);
assert_eq!(a, c);
}
{
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let n: u64 = rng.gen();
let a = F::from_str(&format!("{}", n)).unwrap();
let b = F::from_repr(n.into()).unwrap();
assert_eq!(a, b);
}
}
assert!(F::from_str("").is_none());
assert!(F::from_str("0").unwrap().is_zero());
assert!(F::from_str("00").is_none());
assert!(F::from_str("00000000000").is_none());
}
fn random_multiplication_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let mut t0 = a; // (a * b) * c
t0.mul_assign(&b);
t0.mul_assign(&c);
let mut t1 = a; // (a * c) * b
t1.mul_assign(&c);
t1.mul_assign(&b);
let mut t2 = b; // (b * c) * a
t2.mul_assign(&c);
t2.mul_assign(&a);
assert_eq!(t0, t1);
assert_eq!(t1, t2);
}
}
fn random_addition_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let mut t0 = a; // (a + b) + c
t0.add_assign(&b);
t0.add_assign(&c);
let mut t1 = a; // (a + c) + b
t1.add_assign(&c);
t1.add_assign(&b);
let mut t2 = b; // (b + c) + a
t2.add_assign(&c);
t2.add_assign(&a);
assert_eq!(t0, t1);
assert_eq!(t1, t2);
}
}
fn random_subtraction_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let b = F::rand(rng);
let mut t0 = a; // (a - b)
t0.sub_assign(&b);
let mut t1 = b; // (b - a)
t1.sub_assign(&a);
let mut t2 = t0; // (a - b) + (b - a) = 0
t2.add_assign(&t1);
assert!(t2.is_zero());
}
}
fn random_negation_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let a = F::rand(rng);
let mut b = a;
b.negate();
b.add_assign(&a);
assert!(b.is_zero());
}
}
fn random_doubling_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let mut a = F::rand(rng);
let mut b = a;
a.add_assign(&b);
b.double();
assert_eq!(a, b);
}
}
fn random_squaring_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
let mut a = F::rand(rng);
let mut b = a;
a.mul_assign(&b);
b.square();
assert_eq!(a, b);
}
}
fn random_inversion_tests<F: Field, R: Rng>(rng: &mut R) {
assert!(F::zero().inverse().is_none());
for _ in 0..10000 {
let mut a = F::rand(rng);
let b = a.inverse().unwrap(); // probablistically nonzero
a.mul_assign(&b);
assert_eq!(a, F::one());
}
}
fn random_expansion_tests<F: Field, R: Rng>(rng: &mut R) {
for _ in 0..10000 {
// Compare (a + b)(c + d) and (a*c + b*c + a*d + b*d)
let a = F::rand(rng);
let b = F::rand(rng);
let c = F::rand(rng);
let d = F::rand(rng);
let mut t0 = a;
t0.add_assign(&b);
let mut t1 = c;
t1.add_assign(&d);
t0.mul_assign(&t1);
let mut t2 = a;
t2.mul_assign(&c);
let mut t3 = b;
t3.mul_assign(&c);
let mut t4 = a;
t4.mul_assign(&d);
let mut t5 = b;
t5.mul_assign(&d);
t2.add_assign(&t3);
t2.add_assign(&t4);
t2.add_assign(&t5);
assert_eq!(t0, t2);
}
}

4
pairing/src/tests/mod.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod curve;
pub mod engine;
pub mod field;
pub mod repr;

98
pairing/src/tests/repr.rs Normal file
View File

@ -0,0 +1,98 @@
use rand::{SeedableRng, XorShiftRng};
use PrimeFieldRepr;
pub fn random_repr_tests<R: PrimeFieldRepr>() {
random_encoding_tests::<R>();
random_shl_tests::<R>();
random_shr_tests::<R>();
}
fn random_encoding_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let r = R::rand(&mut rng);
// Big endian
{
let mut rdecoded = R::default();
let mut v: Vec<u8> = vec![];
r.write_be(&mut v).unwrap();
rdecoded.read_be(&v[0..]).unwrap();
assert_eq!(r, rdecoded);
}
// Little endian
{
let mut rdecoded = R::default();
let mut v: Vec<u8> = vec![];
r.write_le(&mut v).unwrap();
rdecoded.read_le(&v[0..]).unwrap();
assert_eq!(r, rdecoded);
}
{
let mut rdecoded_le = R::default();
let mut rdecoded_be_flip = R::default();
let mut v: Vec<u8> = vec![];
r.write_le(&mut v).unwrap();
// This reads in little-endian, so we are done.
rdecoded_le.read_le(&v[..]).unwrap();
// This reads in big-endian, so we perform a swap of the
// bytes beforehand.
let v: Vec<u8> = v.into_iter().rev().collect();
rdecoded_be_flip.read_be(&v[..]).unwrap();
assert_eq!(rdecoded_le, rdecoded_be_flip);
}
}
}
fn random_shl_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
let r = R::rand(&mut rng);
for shift in 0..(r.num_bits() + 1) {
let mut r1 = r;
let mut r2 = r;
for _ in 0..shift {
r1.mul2();
}
r2.shl(shift);
assert_eq!(r1, r2);
}
}
}
fn random_shr_tests<R: PrimeFieldRepr>() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
let r = R::rand(&mut rng);
for shift in 0..(r.num_bits() + 1) {
let mut r1 = r;
let mut r2 = r;
for _ in 0..shift {
r1.div2();
}
r2.shr(shift);
assert_eq!(r1, r2);
}
}
}

179
pairing/src/wnaf.rs Normal file
View File

@ -0,0 +1,179 @@
use super::{CurveProjective, PrimeField, PrimeFieldRepr};
/// Replaces the contents of `table` with a w-NAF window table for the given window size.
pub(crate) fn wnaf_table<G: CurveProjective>(table: &mut Vec<G>, mut base: G, window: usize) {
table.truncate(0);
table.reserve(1 << (window - 1));
let mut dbl = base;
dbl.double();
for _ in 0..(1 << (window - 1)) {
table.push(base);
base.add_assign(&dbl);
}
}
/// Replaces the contents of `wnaf` with the w-NAF representation of a scalar.
pub(crate) fn wnaf_form<S: PrimeFieldRepr>(wnaf: &mut Vec<i64>, mut c: S, window: usize) {
wnaf.truncate(0);
while !c.is_zero() {
let mut u;
if c.is_odd() {
u = (c.as_ref()[0] % (1 << (window + 1))) as i64;
if u > (1 << window) {
u -= 1 << (window + 1);
}
if u > 0 {
c.sub_noborrow(&S::from(u as u64));
} else {
c.add_nocarry(&S::from((-u) as u64));
}
} else {
u = 0;
}
wnaf.push(u);
c.div2();
}
}
/// Performs w-NAF exponentiation with the provided window table and w-NAF form scalar.
///
/// This function must be provided a `table` and `wnaf` that were constructed with
/// the same window size; otherwise, it may panic or produce invalid results.
pub(crate) fn wnaf_exp<G: CurveProjective>(table: &[G], wnaf: &[i64]) -> G {
let mut result = G::zero();
let mut found_one = false;
for n in wnaf.iter().rev() {
if found_one {
result.double();
}
if *n != 0 {
found_one = true;
if *n > 0 {
result.add_assign(&table[(n / 2) as usize]);
} else {
result.sub_assign(&table[((-n) / 2) as usize]);
}
}
}
result
}
/// A "w-ary non-adjacent form" exponentiation context.
#[derive(Debug)]
pub struct Wnaf<W, B, S> {
base: B,
scalar: S,
window_size: W,
}
impl<G: CurveProjective> Wnaf<(), Vec<G>, Vec<i64>> {
/// Construct a new wNAF context without allocating.
pub fn new() -> Self {
Wnaf {
base: vec![],
scalar: vec![],
window_size: (),
}
}
/// Given a base and a number of scalars, compute a window table and return a `Wnaf` object that
/// can perform exponentiations with `.scalar(..)`.
pub fn base(&mut self, base: G, num_scalars: usize) -> Wnaf<usize, &[G], &mut Vec<i64>> {
// Compute the appropriate window size based on the number of scalars.
let window_size = G::recommended_wnaf_for_num_scalars(num_scalars);
// Compute a wNAF table for the provided base and window size.
wnaf_table(&mut self.base, base, window_size);
// Return a Wnaf object that immutably borrows the computed base storage location,
// but mutably borrows the scalar storage location.
Wnaf {
base: &self.base[..],
scalar: &mut self.scalar,
window_size,
}
}
/// Given a scalar, compute its wNAF representation and return a `Wnaf` object that can perform
/// exponentiations with `.base(..)`.
pub fn scalar(
&mut self,
scalar: <<G as CurveProjective>::Scalar as PrimeField>::Repr,
) -> Wnaf<usize, &mut Vec<G>, &[i64]> {
// Compute the appropriate window size for the scalar.
let window_size = G::recommended_wnaf_for_scalar(scalar);
// Compute the wNAF form of the scalar.
wnaf_form(&mut self.scalar, scalar, window_size);
// Return a Wnaf object that mutably borrows the base storage location, but
// immutably borrows the computed wNAF form scalar location.
Wnaf {
base: &mut self.base,
scalar: &self.scalar[..],
window_size,
}
}
}
impl<'a, G: CurveProjective> Wnaf<usize, &'a [G], &'a mut Vec<i64>> {
/// Constructs new space for the scalar representation while borrowing
/// the computed window table, for sending the window table across threads.
pub fn shared(&self) -> Wnaf<usize, &'a [G], Vec<i64>> {
Wnaf {
base: self.base,
scalar: vec![],
window_size: self.window_size,
}
}
}
impl<'a, G: CurveProjective> Wnaf<usize, &'a mut Vec<G>, &'a [i64]> {
/// Constructs new space for the window table while borrowing
/// the computed scalar representation, for sending the scalar representation
/// across threads.
pub fn shared(&self) -> Wnaf<usize, Vec<G>, &'a [i64]> {
Wnaf {
base: vec![],
scalar: self.scalar,
window_size: self.window_size,
}
}
}
impl<B, S: AsRef<[i64]>> Wnaf<usize, B, S> {
/// Performs exponentiation given a base.
pub fn base<G: CurveProjective>(&mut self, base: G) -> G
where
B: AsMut<Vec<G>>,
{
wnaf_table(self.base.as_mut(), base, self.window_size);
wnaf_exp(self.base.as_mut(), self.scalar.as_ref())
}
}
impl<B, S: AsMut<Vec<i64>>> Wnaf<usize, B, S> {
/// Performs exponentiation given a scalar.
pub fn scalar<G: CurveProjective>(
&mut self,
scalar: <<G as CurveProjective>::Scalar as PrimeField>::Repr,
) -> G
where
B: AsRef<[G]>,
{
wnaf_form(self.scalar.as_mut(), scalar, self.window_size);
wnaf_exp(self.base.as_ref(), self.scalar.as_mut())
}
}

3
sapling-crypto/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target/
**/*.rs.bk
Cargo.lock

14
sapling-crypto/COPYRIGHT Normal file
View File

@ -0,0 +1,14 @@
Copyrights in the "sapling-crypto" library are retained by their contributors. No
copyright assignment is required to contribute to the "sapling-crypto" library.
The "sapling-crypto" library is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

31
sapling-crypto/Cargo.toml Normal file
View File

@ -0,0 +1,31 @@
[package]
authors = ["Sean Bowe <sean@z.cash>"]
description = "Cryptographic library for Zcash Sapling"
documentation = "https://github.com/zcash-hackworks/sapling"
homepage = "https://github.com/zcash-hackworks/sapling"
license = "MIT/Apache-2.0"
name = "sapling-crypto"
repository = "https://github.com/zcash-hackworks/sapling"
version = "0.0.1"
[dependencies.pairing]
path = "../pairing"
features = ["expose-arith"]
[dependencies]
bellman = { path = "../bellman" }
rand = "0.4"
digest = "0.7"
byteorder = "1"
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
[dev-dependencies]
hex-literal = "0.1"
rust-crypto = "0.2"
[features]
default = ["u128-support"]
u128-support = ["pairing/u128-support"]

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

23
sapling-crypto/README.md Normal file
View File

@ -0,0 +1,23 @@
# sapling-crypto
This repository contains a (work-in-progress) implementation of Zcash's "Sapling" cryptography.
## Security Warnings
This library is currently under development and has not been reviewed.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@ -0,0 +1,23 @@
#![feature(test)]
extern crate rand;
extern crate test;
extern crate pairing;
extern crate sapling_crypto;
use rand::{Rand, thread_rng};
use pairing::bls12_381::Bls12;
use sapling_crypto::jubjub::JubjubBls12;
use sapling_crypto::pedersen_hash::{pedersen_hash, Personalization};
#[bench]
fn bench_pedersen_hash(b: &mut test::Bencher) {
let params = JubjubBls12::new();
let rng = &mut thread_rng();
let bits = (0..510).map(|_| bool::rand(rng)).collect::<Vec<_>>();
let personalization = Personalization::MerkleTree(31);
b.iter(|| {
pedersen_hash::<Bls12, _>(personalization, bits.clone(), &params)
});
}

View File

@ -0,0 +1,102 @@
extern crate sapling_crypto;
extern crate bellman;
extern crate rand;
extern crate pairing;
use std::time::{Duration, Instant};
use sapling_crypto::jubjub::{
JubjubBls12,
edwards,
fs,
};
use sapling_crypto::circuit::sapling::{
Spend
};
use sapling_crypto::primitives::{
Diversifier,
ProofGenerationKey,
ValueCommitment
};
use bellman::groth16::*;
use rand::{XorShiftRng, SeedableRng, Rng};
use pairing::bls12_381::{Bls12, Fr};
const TREE_DEPTH: usize = 32;
fn main() {
let jubjub_params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
println!("Creating sample parameters...");
let groth_params = generate_random_parameters::<Bls12, _, _>(
Spend {
params: jubjub_params,
value_commitment: None,
proof_generation_key: None,
payment_address: None,
commitment_randomness: None,
ar: None,
auth_path: vec![None; TREE_DEPTH],
anchor: None
},
rng
).unwrap();
const SAMPLES: u32 = 50;
let mut total_time = Duration::new(0, 0);
for _ in 0..SAMPLES {
let value_commitment = ValueCommitment {
value: 1,
randomness: rng.gen()
};
let nsk: fs::Fs = rng.gen();
let ak = edwards::Point::rand(rng, jubjub_params).mul_by_cofactor(jubjub_params);
let proof_generation_key = ProofGenerationKey {
ak: ak.clone(),
nsk: nsk.clone()
};
let viewing_key = proof_generation_key.into_viewing_key(jubjub_params);
let payment_address;
loop {
let diversifier = Diversifier(rng.gen());
if let Some(p) = viewing_key.into_payment_address(
diversifier,
jubjub_params
)
{
payment_address = p;
break;
}
}
let commitment_randomness: fs::Fs = rng.gen();
let auth_path = vec![Some((rng.gen(), rng.gen())); TREE_DEPTH];
let ar: fs::Fs = rng.gen();
let anchor: Fr = rng.gen();
let start = Instant::now();
let _ = create_random_proof(Spend {
params: jubjub_params,
value_commitment: Some(value_commitment),
proof_generation_key: Some(proof_generation_key),
payment_address: Some(payment_address),
commitment_randomness: Some(commitment_randomness),
ar: Some(ar),
auth_path: auth_path,
anchor: Some(anchor)
}, &groth_params, rng).unwrap();
total_time += start.elapsed();
}
let avg = total_time / SAMPLES;
let avg = avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (avg.as_secs() as f64);
println!("Average proving time (in seconds): {}", avg);
}

View File

@ -0,0 +1,438 @@
use pairing::{
Engine,
};
use bellman::{
SynthesisError,
ConstraintSystem
};
use super::boolean::{
Boolean
};
use super::uint32::{
UInt32
};
use super::multieq::MultiEq;
/*
2.1. Parameters
The following table summarizes various parameters and their ranges:
| BLAKE2b | BLAKE2s |
--------------+------------------+------------------+
Bits in word | w = 64 | w = 32 |
Rounds in F | r = 12 | r = 10 |
Block bytes | bb = 128 | bb = 64 |
Hash bytes | 1 <= nn <= 64 | 1 <= nn <= 32 |
Key bytes | 0 <= kk <= 64 | 0 <= kk <= 32 |
Input bytes | 0 <= ll < 2**128 | 0 <= ll < 2**64 |
--------------+------------------+------------------+
G Rotation | (R1, R2, R3, R4) | (R1, R2, R3, R4) |
constants = | (32, 24, 16, 63) | (16, 12, 8, 7) |
--------------+------------------+------------------+
*/
const R1: usize = 16;
const R2: usize = 12;
const R3: usize = 8;
const R4: usize = 7;
/*
Round | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
----------+-------------------------------------------------+
SIGMA[0] | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
SIGMA[1] | 14 10 4 8 9 15 13 6 1 12 0 2 11 7 5 3 |
SIGMA[2] | 11 8 12 0 5 2 15 13 10 14 3 6 7 1 9 4 |
SIGMA[3] | 7 9 3 1 13 12 11 14 2 6 5 10 4 0 15 8 |
SIGMA[4] | 9 0 5 7 2 4 10 15 14 1 11 12 6 8 3 13 |
SIGMA[5] | 2 12 6 10 0 11 8 3 4 13 7 5 15 14 1 9 |
SIGMA[6] | 12 5 1 15 14 13 4 10 0 7 6 3 9 2 8 11 |
SIGMA[7] | 13 11 7 14 12 1 3 9 5 0 15 4 8 6 2 10 |
SIGMA[8] | 6 15 14 9 11 3 0 8 12 2 13 7 1 4 10 5 |
SIGMA[9] | 10 2 8 4 7 6 1 5 15 11 9 14 3 12 13 0 |
----------+-------------------------------------------------+
*/
const SIGMA: [[usize; 16]; 10] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3],
[11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4],
[7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8],
[9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13],
[2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9],
[12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11],
[13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10],
[6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5],
[10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0]
];
/*
3.1. Mixing Function G
The G primitive function mixes two input words, "x" and "y", into
four words indexed by "a", "b", "c", and "d" in the working vector
v[0..15]. The full modified vector is returned. The rotation
constants (R1, R2, R3, R4) are given in Section 2.1.
FUNCTION G( v[0..15], a, b, c, d, x, y )
|
| v[a] := (v[a] + v[b] + x) mod 2**w
| v[d] := (v[d] ^ v[a]) >>> R1
| v[c] := (v[c] + v[d]) mod 2**w
| v[b] := (v[b] ^ v[c]) >>> R2
| v[a] := (v[a] + v[b] + y) mod 2**w
| v[d] := (v[d] ^ v[a]) >>> R3
| v[c] := (v[c] + v[d]) mod 2**w
| v[b] := (v[b] ^ v[c]) >>> R4
|
| RETURN v[0..15]
|
END FUNCTION.
*/
fn mixing_g<E: Engine, CS: ConstraintSystem<E>, M>(
mut cs: M,
v: &mut [UInt32],
a: usize,
b: usize,
c: usize,
d: usize,
x: &UInt32,
y: &UInt32
) -> Result<(), SynthesisError>
where M: ConstraintSystem<E, Root=MultiEq<E, CS>>
{
v[a] = UInt32::addmany(cs.namespace(|| "mixing step 1"), &[v[a].clone(), v[b].clone(), x.clone()])?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 2"), &v[a])?.rotr(R1);
v[c] = UInt32::addmany(cs.namespace(|| "mixing step 3"), &[v[c].clone(), v[d].clone()])?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 4"), &v[c])?.rotr(R2);
v[a] = UInt32::addmany(cs.namespace(|| "mixing step 5"), &[v[a].clone(), v[b].clone(), y.clone()])?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 6"), &v[a])?.rotr(R3);
v[c] = UInt32::addmany(cs.namespace(|| "mixing step 7"), &[v[c].clone(), v[d].clone()])?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 8"), &v[c])?.rotr(R4);
Ok(())
}
/*
3.2. Compression Function F
Compression function F takes as an argument the state vector "h",
message block vector "m" (last block is padded with zeros to full
block size, if required), 2w-bit offset counter "t", and final block
indicator flag "f". Local vector v[0..15] is used in processing. F
returns a new state vector. The number of rounds, "r", is 12 for
BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1.
FUNCTION F( h[0..7], m[0..15], t, f )
|
| // Initialize local work vector v[0..15]
| v[0..7] := h[0..7] // First half from state.
| v[8..15] := IV[0..7] // Second half from IV.
|
| v[12] := v[12] ^ (t mod 2**w) // Low word of the offset.
| v[13] := v[13] ^ (t >> w) // High word.
|
| IF f = TRUE THEN // last block flag?
| | v[14] := v[14] ^ 0xFF..FF // Invert all bits.
| END IF.
|
| // Cryptographic mixing
| FOR i = 0 TO r - 1 DO // Ten or twelve rounds.
| |
| | // Message word selection permutation for this round.
| | s[0..15] := SIGMA[i mod 10][0..15]
| |
| | v := G( v, 0, 4, 8, 12, m[s[ 0]], m[s[ 1]] )
| | v := G( v, 1, 5, 9, 13, m[s[ 2]], m[s[ 3]] )
| | v := G( v, 2, 6, 10, 14, m[s[ 4]], m[s[ 5]] )
| | v := G( v, 3, 7, 11, 15, m[s[ 6]], m[s[ 7]] )
| |
| | v := G( v, 0, 5, 10, 15, m[s[ 8]], m[s[ 9]] )
| | v := G( v, 1, 6, 11, 12, m[s[10]], m[s[11]] )
| | v := G( v, 2, 7, 8, 13, m[s[12]], m[s[13]] )
| | v := G( v, 3, 4, 9, 14, m[s[14]], m[s[15]] )
| |
| END FOR
|
| FOR i = 0 TO 7 DO // XOR the two halves.
| | h[i] := h[i] ^ v[i] ^ v[i + 8]
| END FOR.
|
| RETURN h[0..7] // New state.
|
END FUNCTION.
*/
fn blake2s_compression<E: Engine, CS: ConstraintSystem<E>>(
mut cs: CS,
h: &mut [UInt32],
m: &[UInt32],
t: u64,
f: bool
) -> Result<(), SynthesisError>
{
assert_eq!(h.len(), 8);
assert_eq!(m.len(), 16);
/*
static const uint32_t blake2s_iv[8] =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
*/
let mut v = Vec::with_capacity(16);
v.extend_from_slice(h);
v.push(UInt32::constant(0x6A09E667));
v.push(UInt32::constant(0xBB67AE85));
v.push(UInt32::constant(0x3C6EF372));
v.push(UInt32::constant(0xA54FF53A));
v.push(UInt32::constant(0x510E527F));
v.push(UInt32::constant(0x9B05688C));
v.push(UInt32::constant(0x1F83D9AB));
v.push(UInt32::constant(0x5BE0CD19));
assert_eq!(v.len(), 16);
v[12] = v[12].xor(cs.namespace(|| "first xor"), &UInt32::constant(t as u32))?;
v[13] = v[13].xor(cs.namespace(|| "second xor"), &UInt32::constant((t >> 32) as u32))?;
if f {
v[14] = v[14].xor(cs.namespace(|| "third xor"), &UInt32::constant(u32::max_value()))?;
}
{
let mut cs = MultiEq::new(&mut cs);
for i in 0..10 {
let mut cs = cs.namespace(|| format!("round {}", i));
let s = SIGMA[i % 10];
mixing_g(cs.namespace(|| "mixing invocation 1"), &mut v, 0, 4, 8, 12, &m[s[ 0]], &m[s[ 1]])?;
mixing_g(cs.namespace(|| "mixing invocation 2"), &mut v, 1, 5, 9, 13, &m[s[ 2]], &m[s[ 3]])?;
mixing_g(cs.namespace(|| "mixing invocation 3"), &mut v, 2, 6, 10, 14, &m[s[ 4]], &m[s[ 5]])?;
mixing_g(cs.namespace(|| "mixing invocation 4"), &mut v, 3, 7, 11, 15, &m[s[ 6]], &m[s[ 7]])?;
mixing_g(cs.namespace(|| "mixing invocation 5"), &mut v, 0, 5, 10, 15, &m[s[ 8]], &m[s[ 9]])?;
mixing_g(cs.namespace(|| "mixing invocation 6"), &mut v, 1, 6, 11, 12, &m[s[10]], &m[s[11]])?;
mixing_g(cs.namespace(|| "mixing invocation 7"), &mut v, 2, 7, 8, 13, &m[s[12]], &m[s[13]])?;
mixing_g(cs.namespace(|| "mixing invocation 8"), &mut v, 3, 4, 9, 14, &m[s[14]], &m[s[15]])?;
}
}
for i in 0..8 {
let mut cs = cs.namespace(|| format!("h[{i}] ^ v[{i}] ^ v[{i} + 8]", i=i));
h[i] = h[i].xor(cs.namespace(|| "first xor"), &v[i])?;
h[i] = h[i].xor(cs.namespace(|| "second xor"), &v[i + 8])?;
}
Ok(())
}
/*
FUNCTION BLAKE2( d[0..dd-1], ll, kk, nn )
|
| h[0..7] := IV[0..7] // Initialization Vector.
|
| // Parameter block p[0]
| h[0] := h[0] ^ 0x01010000 ^ (kk << 8) ^ nn
|
| // Process padded key and data blocks
| IF dd > 1 THEN
| | FOR i = 0 TO dd - 2 DO
| | | h := F( h, d[i], (i + 1) * bb, FALSE )
| | END FOR.
| END IF.
|
| // Final block.
| IF kk = 0 THEN
| | h := F( h, d[dd - 1], ll, TRUE )
| ELSE
| | h := F( h, d[dd - 1], ll + bb, TRUE )
| END IF.
|
| RETURN first "nn" bytes from little-endian word array h[].
|
END FUNCTION.
*/
pub fn blake2s<E: Engine, CS: ConstraintSystem<E>>(
mut cs: CS,
input: &[Boolean],
personalization: &[u8]
) -> Result<Vec<Boolean>, SynthesisError>
{
use byteorder::{ByteOrder, LittleEndian};
assert_eq!(personalization.len(), 8);
assert!(input.len() % 8 == 0);
let mut h = Vec::with_capacity(8);
h.push(UInt32::constant(0x6A09E667 ^ 0x01010000 ^ 32));
h.push(UInt32::constant(0xBB67AE85));
h.push(UInt32::constant(0x3C6EF372));
h.push(UInt32::constant(0xA54FF53A));
h.push(UInt32::constant(0x510E527F));
h.push(UInt32::constant(0x9B05688C));
// Personalization is stored here
h.push(UInt32::constant(0x1F83D9AB ^ LittleEndian::read_u32(&personalization[0..4])));
h.push(UInt32::constant(0x5BE0CD19 ^ LittleEndian::read_u32(&personalization[4..8])));
let mut blocks: Vec<Vec<UInt32>> = vec![];
for block in input.chunks(512) {
let mut this_block = Vec::with_capacity(16);
for word in block.chunks(32) {
let mut tmp = word.to_vec();
while tmp.len() < 32 {
tmp.push(Boolean::constant(false));
}
this_block.push(UInt32::from_bits(&tmp));
}
while this_block.len() < 16 {
this_block.push(UInt32::constant(0));
}
blocks.push(this_block);
}
if blocks.len() == 0 {
blocks.push((0..16).map(|_| UInt32::constant(0)).collect());
}
for (i, block) in blocks[0..blocks.len() - 1].iter().enumerate() {
let cs = cs.namespace(|| format!("block {}", i));
blake2s_compression(cs, &mut h, block, ((i as u64) + 1) * 64, false)?;
}
{
let cs = cs.namespace(|| "final block");
blake2s_compression(cs, &mut h, &blocks[blocks.len() - 1], (input.len() / 8) as u64, true)?;
}
Ok(h.iter().flat_map(|b| b.into_bits()).collect())
}
#[cfg(test)]
mod test {
use rand::{XorShiftRng, SeedableRng, Rng};
use pairing::bls12_381::{Bls12};
use ::circuit::boolean::{Boolean, AllocatedBit};
use ::circuit::test::TestConstraintSystem;
use super::blake2s;
use bellman::{ConstraintSystem};
use blake2_rfc::blake2s::Blake2s;
#[test]
fn test_blank_hash() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bits = vec![];
let out = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
// >>> import blake2s from hashlib
// >>> h = blake2s(digest_size=32, person=b'12345678')
// >>> h.hexdigest()
let expected = hex!("c59f682376d137f3f255e671e207d1f2374ebe504e9314208a52d9f88d69e8c8");
let mut out = out.into_iter();
for b in expected.into_iter() {
for i in 0..8 {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_blake2s_constraints() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bits: Vec<_> = (0..512).map(|i| AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true)).unwrap().into()).collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
}
#[test]
fn test_blake2s_precomp_constraints() {
// Test that 512 fixed leading bits (constants)
// doesn't result in more constraints.
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let input_bits: Vec<_> = (0..512)
.map(|_| Boolean::constant(rng.gen()))
.chain((0..512)
.map(|i| AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true)).unwrap().into()))
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
}
#[test]
fn test_blake2s_constant_constraints() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let input_bits: Vec<_> = (0..512).map(|_| Boolean::constant(rng.gen())).collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert_eq!(cs.num_constraints(), 0);
}
#[test]
fn test_blake2s() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0))
{
let mut h = Blake2s::with_params(32, &[], &[], b"12345678");
let data: Vec<u8> = (0..input_len).map(|_| rng.gen()).collect();
h.update(&data);
let hash_result = h.finalize();
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8)).unwrap().into());
}
}
let r = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
let mut s = hash_result.as_ref().iter()
.flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
},
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
},
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);
}
}
}
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,307 @@
use pairing::{Engine, Field};
use super::*;
use super::num::{
AllocatedNum,
Num
};
use super::boolean::Boolean;
use bellman::{
ConstraintSystem
};
// Synthesize the constants for each base pattern.
fn synth<'a, E: Engine, I>(
window_size: usize,
constants: I,
assignment: &mut [E::Fr]
)
where I: IntoIterator<Item=&'a E::Fr>
{
assert_eq!(assignment.len(), 1 << window_size);
for (i, constant) in constants.into_iter().enumerate() {
let mut cur = assignment[i];
cur.negate();
cur.add_assign(constant);
assignment[i] = cur;
for (j, eval) in assignment.iter_mut().enumerate().skip(i + 1) {
if j & i == i {
eval.add_assign(&cur);
}
}
}
}
/// Performs a 3-bit window table lookup. `bits` is in
/// little-endian order.
pub fn lookup3_xy<E: Engine, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(E::Fr, E::Fr)]
) -> Result<(AllocatedNum<E>, AllocatedNum<E>), SynthesisError>
where CS: ConstraintSystem<E>
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 8);
// Calculate the index into `coords`
let i =
match (bits[0].get_value(), bits[1].get_value(), bits[2].get_value()) {
(Some(a_value), Some(b_value), Some(c_value)) => {
let mut tmp = 0;
if a_value {
tmp += 1;
}
if b_value {
tmp += 2;
}
if c_value {
tmp += 4;
}
Some(tmp)
},
_ => None
};
// Allocate the x-coordinate resulting from the lookup
let res_x = AllocatedNum::alloc(
cs.namespace(|| "x"),
|| {
Ok(coords[*i.get()?].0)
}
)?;
// Allocate the y-coordinate resulting from the lookup
let res_y = AllocatedNum::alloc(
cs.namespace(|| "y"),
|| {
Ok(coords[*i.get()?].1)
}
)?;
// Compute the coefficients for the lookup constraints
let mut x_coeffs = [E::Fr::zero(); 8];
let mut y_coeffs = [E::Fr::zero(); 8];
synth::<E, _>(3, coords.iter().map(|c| &c.0), &mut x_coeffs);
synth::<E, _>(3, coords.iter().map(|c| &c.1), &mut y_coeffs);
let precomp = Boolean::and(cs.namespace(|| "precomp"), &bits[1], &bits[2])?;
let one = CS::one();
cs.enforce(
|| "x-coordinate lookup",
|lc| lc + (x_coeffs[0b001], one)
+ &bits[1].lc::<E>(one, x_coeffs[0b011])
+ &bits[2].lc::<E>(one, x_coeffs[0b101])
+ &precomp.lc::<E>(one, x_coeffs[0b111]),
|lc| lc + &bits[0].lc::<E>(one, E::Fr::one()),
|lc| lc + res_x.get_variable()
- (x_coeffs[0b000], one)
- &bits[1].lc::<E>(one, x_coeffs[0b010])
- &bits[2].lc::<E>(one, x_coeffs[0b100])
- &precomp.lc::<E>(one, x_coeffs[0b110]),
);
cs.enforce(
|| "y-coordinate lookup",
|lc| lc + (y_coeffs[0b001], one)
+ &bits[1].lc::<E>(one, y_coeffs[0b011])
+ &bits[2].lc::<E>(one, y_coeffs[0b101])
+ &precomp.lc::<E>(one, y_coeffs[0b111]),
|lc| lc + &bits[0].lc::<E>(one, E::Fr::one()),
|lc| lc + res_y.get_variable()
- (y_coeffs[0b000], one)
- &bits[1].lc::<E>(one, y_coeffs[0b010])
- &bits[2].lc::<E>(one, y_coeffs[0b100])
- &precomp.lc::<E>(one, y_coeffs[0b110]),
);
Ok((res_x, res_y))
}
/// Performs a 3-bit window table lookup, where
/// one of the bits is a sign bit.
pub fn lookup3_xy_with_conditional_negation<E: Engine, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(E::Fr, E::Fr)]
) -> Result<(Num<E>, Num<E>), SynthesisError>
where CS: ConstraintSystem<E>
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 4);
// Calculate the index into `coords`
let i =
match (bits[0].get_value(), bits[1].get_value()) {
(Some(a_value), Some(b_value)) => {
let mut tmp = 0;
if a_value {
tmp += 1;
}
if b_value {
tmp += 2;
}
Some(tmp)
},
_ => None
};
// Allocate the y-coordinate resulting from the lookup
// and conditional negation
let y = AllocatedNum::alloc(
cs.namespace(|| "y"),
|| {
let mut tmp = coords[*i.get()?].1;
if *bits[2].get_value().get()? {
tmp.negate();
}
Ok(tmp)
}
)?;
let one = CS::one();
// Compute the coefficients for the lookup constraints
let mut x_coeffs = [E::Fr::zero(); 4];
let mut y_coeffs = [E::Fr::zero(); 4];
synth::<E, _>(2, coords.iter().map(|c| &c.0), &mut x_coeffs);
synth::<E, _>(2, coords.iter().map(|c| &c.1), &mut y_coeffs);
let precomp = Boolean::and(cs.namespace(|| "precomp"), &bits[0], &bits[1])?;
let x = Num::zero()
.add_bool_with_coeff(one, &Boolean::constant(true), x_coeffs[0b00])
.add_bool_with_coeff(one, &bits[0], x_coeffs[0b01])
.add_bool_with_coeff(one, &bits[1], x_coeffs[0b10])
.add_bool_with_coeff(one, &precomp, x_coeffs[0b11]);
let y_lc = precomp.lc::<E>(one, y_coeffs[0b11]) +
&bits[1].lc::<E>(one, y_coeffs[0b10]) +
&bits[0].lc::<E>(one, y_coeffs[0b01]) +
(y_coeffs[0b00], one);
cs.enforce(
|| "y-coordinate lookup",
|lc| lc + &y_lc + &y_lc,
|lc| lc + &bits[2].lc::<E>(one, E::Fr::one()),
|lc| lc + &y_lc - y.get_variable()
);
Ok((x, y.into()))
}
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rand, Rng, XorShiftRng};
use super::*;
use ::circuit::test::*;
use ::circuit::boolean::{Boolean, AllocatedBit};
use pairing::bls12_381::{Bls12, Fr};
#[test]
fn test_lookup3_xy() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0656]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a_val = rng.gen();
let a = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap()
);
let b_val = rng.gen();
let b = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap()
);
let c_val = rng.gen();
let c = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap()
);
let bits = vec![a, b, c];
let points: Vec<(Fr, Fr)> = (0..8).map(|_| (rng.gen(), rng.gen())).collect();
let res = lookup3_xy(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val { index += 1 }
if b_val { index += 2 }
if c_val { index += 4 }
assert_eq!(res.0.get_value().unwrap(), points[index].0);
assert_eq!(res.1.get_value().unwrap(), points[index].1);
}
}
#[test]
fn test_lookup3_xy_with_conditional_negation() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a_val = rng.gen();
let a = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap()
);
let b_val = rng.gen();
let b = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap()
);
let c_val = rng.gen();
let c = Boolean::from(
AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap()
);
let bits = vec![a, b, c];
let points: Vec<(Fr, Fr)> = (0..4).map(|_| (rng.gen(), rng.gen())).collect();
let res = lookup3_xy_with_conditional_negation(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val { index += 1 }
if b_val { index += 2 }
assert_eq!(res.0.get_value().unwrap(), points[index].0);
let mut tmp = points[index].1;
if c_val { tmp.negate() }
assert_eq!(res.1.get_value().unwrap(), tmp);
}
}
#[test]
fn test_synth() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let window_size = 4;
let mut assignment = vec![Fr::zero(); 1 << window_size];
let constants: Vec<_> = (0..(1 << window_size)).map(|_| Fr::rand(&mut rng)).collect();
synth::<Bls12, _>(window_size, &constants, &mut assignment);
for b in 0..(1 << window_size) {
let mut acc = Fr::zero();
for j in 0..(1 << window_size) {
if j & b == j {
acc.add_assign(&assignment[j]);
}
}
assert_eq!(acc, constants[b]);
}
}
}

View File

@ -0,0 +1,39 @@
#[cfg(test)]
pub mod test;
pub mod boolean;
pub mod multieq;
pub mod uint32;
pub mod blake2s;
pub mod num;
pub mod lookup;
pub mod ecc;
pub mod pedersen_hash;
pub mod multipack;
pub mod sha256;
pub mod sapling;
pub mod sprout;
use bellman::{
SynthesisError
};
// TODO: This should probably be removed and we
// should use existing helper methods on `Option`
// for mapping with an error.
/// This basically is just an extension to `Option`
/// which allows for a convenient mapping to an
/// error on `None`.
trait Assignment<T> {
fn get(&self) -> Result<&T, SynthesisError>;
}
impl<T> Assignment<T> for Option<T> {
fn get(&self) -> Result<&T, SynthesisError> {
match *self {
Some(ref v) => Ok(v),
None => Err(SynthesisError::AssignmentMissing)
}
}
}

View File

@ -0,0 +1,137 @@
use pairing::{
Engine,
Field,
PrimeField
};
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination,
Variable
};
pub struct MultiEq<E: Engine, CS: ConstraintSystem<E>>{
cs: CS,
ops: usize,
bits_used: usize,
lhs: LinearCombination<E>,
rhs: LinearCombination<E>,
}
impl<E: Engine, CS: ConstraintSystem<E>> MultiEq<E, CS> {
pub fn new(cs: CS) -> Self {
MultiEq {
cs: cs,
ops: 0,
bits_used: 0,
lhs: LinearCombination::zero(),
rhs: LinearCombination::zero()
}
}
fn accumulate(&mut self)
{
let ops = self.ops;
let lhs = self.lhs.clone();
let rhs = self.rhs.clone();
self.cs.enforce(
|| format!("multieq {}", ops),
|_| lhs,
|lc| lc + CS::one(),
|_| rhs
);
self.lhs = LinearCombination::zero();
self.rhs = LinearCombination::zero();
self.bits_used = 0;
self.ops += 1;
}
pub fn enforce_equal(
&mut self,
num_bits: usize,
lhs: &LinearCombination<E>,
rhs: &LinearCombination<E>
)
{
// Check if we will exceed the capacity
if (E::Fr::CAPACITY as usize) <= (self.bits_used + num_bits) {
self.accumulate();
}
assert!((E::Fr::CAPACITY as usize) > (self.bits_used + num_bits));
let coeff = E::Fr::from_str("2").unwrap().pow(&[self.bits_used as u64]);
self.lhs = self.lhs.clone() + (coeff, lhs);
self.rhs = self.rhs.clone() + (coeff, rhs);
self.bits_used += num_bits;
}
}
impl<E: Engine, CS: ConstraintSystem<E>> Drop for MultiEq<E, CS> {
fn drop(&mut self) {
if self.bits_used > 0 {
self.accumulate();
}
}
}
impl<E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for MultiEq<E, CS>
{
type Root = Self;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.cs.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.cs.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
self.cs.enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
{
self.cs.get_root().push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
self.cs.get_root().pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
self
}
}

View File

@ -0,0 +1,113 @@
use pairing::{Engine, Field, PrimeField};
use bellman::{ConstraintSystem, SynthesisError};
use super::boolean::{Boolean};
use super::num::Num;
use super::Assignment;
/// Takes a sequence of booleans and exposes them as compact
/// public inputs
pub fn pack_into_inputs<E, CS>(
mut cs: CS,
bits: &[Boolean]
) -> Result<(), SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
for (i, bits) in bits.chunks(E::Fr::CAPACITY as usize).enumerate()
{
let mut num = Num::<E>::zero();
let mut coeff = E::Fr::one();
for bit in bits {
num = num.add_bool_with_coeff(CS::one(), bit, coeff);
coeff.double();
}
let input = cs.alloc_input(|| format!("input {}", i), || {
Ok(*num.get_value().get()?)
})?;
// num * 1 = input
cs.enforce(
|| format!("packing constraint {}", i),
|_| num.lc(E::Fr::one()),
|lc| lc + CS::one(),
|lc| lc + input
);
}
Ok(())
}
pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool>
{
bytes.iter()
.flat_map(|&v| (0..8).rev().map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn bytes_to_bits_le(bytes: &[u8]) -> Vec<bool>
{
bytes.iter()
.flat_map(|&v| (0..8).map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn compute_multipacking<E: Engine>(
bits: &[bool]
) -> Vec<E::Fr>
{
let mut result = vec![];
for bits in bits.chunks(E::Fr::CAPACITY as usize)
{
let mut cur = E::Fr::zero();
let mut coeff = E::Fr::one();
for bit in bits {
if *bit {
cur.add_assign(&coeff);
}
coeff.double();
}
result.push(cur);
}
result
}
#[test]
fn test_multipacking() {
use rand::{SeedableRng, Rng, XorShiftRng};
use bellman::{ConstraintSystem};
use pairing::bls12_381::{Bls12};
use ::circuit::test::*;
use super::boolean::{AllocatedBit, Boolean};
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for num_bits in 0..1500 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let bits: Vec<bool> = (0..num_bits).map(|_| rng.gen()).collect();
let circuit_bits = bits.iter().enumerate()
.map(|(i, &b)| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
Some(b)
).unwrap()
)
})
.collect::<Vec<_>>();
let expected_inputs = compute_multipacking::<Bls12>(&bits);
pack_into_inputs(cs.namespace(|| "pack"), &circuit_bits).unwrap();
assert!(cs.is_satisfied());
assert!(cs.verify(&expected_inputs));
}
}

View File

@ -0,0 +1,622 @@
use pairing::{
Engine,
Field,
PrimeField,
PrimeFieldRepr,
BitIterator
};
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination,
Variable
};
use super::{
Assignment
};
use super::boolean::{
self,
Boolean,
AllocatedBit
};
pub struct AllocatedNum<E: Engine> {
value: Option<E::Fr>,
variable: Variable
}
impl<E: Engine> Clone for AllocatedNum<E> {
fn clone(&self) -> Self {
AllocatedNum {
value: self.value,
variable: self.variable
}
}
}
impl<E: Engine> AllocatedNum<E> {
pub fn alloc<CS, F>(
mut cs: CS,
value: F,
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>,
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let mut new_value = None;
let var = cs.alloc(|| "num", || {
let tmp = value()?;
new_value = Some(tmp);
Ok(tmp)
})?;
Ok(AllocatedNum {
value: new_value,
variable: var
})
}
pub fn inputize<CS>(
&self,
mut cs: CS
) -> Result<(), SynthesisError>
where CS: ConstraintSystem<E>
{
let input = cs.alloc_input(
|| "input variable",
|| {
Ok(*self.value.get()?)
}
)?;
cs.enforce(
|| "enforce input is correct",
|lc| lc + input,
|lc| lc + CS::one(),
|lc| lc + self.variable
);
Ok(())
}
/// Deconstructs this allocated number into its
/// boolean representation in little-endian bit
/// order, requiring that the representation
/// strictly exists "in the field" (i.e., a
/// congruency is not allowed.)
pub fn into_bits_le_strict<CS>(
&self,
mut cs: CS
) -> Result<Vec<Boolean>, SynthesisError>
where CS: ConstraintSystem<E>
{
pub fn kary_and<E, CS>(
mut cs: CS,
v: &[AllocatedBit]
) -> Result<AllocatedBit, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
{
assert!(v.len() > 0);
// Let's keep this simple for now and just AND them all
// manually
let mut cur = None;
for (i, v) in v.iter().enumerate() {
if cur.is_none() {
cur = Some(v.clone());
} else {
cur = Some(AllocatedBit::and(
cs.namespace(|| format!("and {}", i)),
cur.as_ref().unwrap(),
v
)?);
}
}
Ok(cur.expect("v.len() > 0"))
}
// We want to ensure that the bit representation of a is
// less than or equal to r - 1.
let mut a = self.value.map(|e| BitIterator::new(e.into_repr()));
let mut b = E::Fr::char();
b.sub_noborrow(&1.into());
let mut result = vec![];
// Runs of ones in r
let mut last_run = None;
let mut current_run = vec![];
let mut found_one = false;
let mut i = 0;
for b in BitIterator::new(b) {
let a_bit = a.as_mut().map(|e| e.next().unwrap());
// Skip over unset bits at the beginning
found_one |= b;
if !found_one {
// a_bit should also be false
a_bit.map(|e| assert!(!e));
continue;
}
if b {
// This is part of a run of ones. Let's just
// allocate the boolean with the expected value.
let a_bit = AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
a_bit
)?;
// ... and add it to the current run of ones.
current_run.push(a_bit.clone());
result.push(a_bit);
} else {
if current_run.len() > 0 {
// This is the start of a run of zeros, but we need
// to k-ary AND against `last_run` first.
if last_run.is_some() {
current_run.push(last_run.clone().unwrap());
}
last_run = Some(kary_and(
cs.namespace(|| format!("run ending at {}", i)),
&current_run
)?);
current_run.truncate(0);
}
// If `last_run` is true, `a` must be false, or it would
// not be in the field.
//
// If `last_run` is false, `a` can be true or false.
let a_bit = AllocatedBit::alloc_conditionally(
cs.namespace(|| format!("bit {}", i)),
a_bit,
&last_run.as_ref().expect("char always starts with a one")
)?;
result.push(a_bit);
}
i += 1;
}
// char is prime, so we'll always end on
// a run of zeros.
assert_eq!(current_run.len(), 0);
// Now, we have `result` in big-endian order.
// However, now we have to unpack self!
let mut lc = LinearCombination::zero();
let mut coeff = E::Fr::one();
for bit in result.iter().rev() {
lc = lc + (coeff, bit.get_variable());
coeff.double();
}
lc = lc - self.variable;
cs.enforce(
|| "unpacking constraint",
|lc| lc,
|lc| lc,
|_| lc
);
// Convert into booleans, and reverse for little-endian bit order
Ok(result.into_iter().map(|b| Boolean::from(b)).rev().collect())
}
/// Convert the allocated number into its little-endian representation.
/// Note that this does not strongly enforce that the commitment is
/// "in the field."
pub fn into_bits_le<CS>(
&self,
mut cs: CS
) -> Result<Vec<Boolean>, SynthesisError>
where CS: ConstraintSystem<E>
{
let bits = boolean::field_into_allocated_bits_le(
&mut cs,
self.value
)?;
let mut lc = LinearCombination::zero();
let mut coeff = E::Fr::one();
for bit in bits.iter() {
lc = lc + (coeff, bit.get_variable());
coeff.double();
}
lc = lc - self.variable;
cs.enforce(
|| "unpacking constraint",
|lc| lc,
|lc| lc,
|_| lc
);
Ok(bits.into_iter().map(|b| Boolean::from(b)).collect())
}
pub fn mul<CS>(
&self,
mut cs: CS,
other: &Self
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
let mut value = None;
let var = cs.alloc(|| "product num", || {
let mut tmp = *self.value.get()?;
tmp.mul_assign(other.value.get()?);
value = Some(tmp);
Ok(tmp)
})?;
// Constrain: a * b = ab
cs.enforce(
|| "multiplication constraint",
|lc| lc + self.variable,
|lc| lc + other.variable,
|lc| lc + var
);
Ok(AllocatedNum {
value: value,
variable: var
})
}
pub fn square<CS>(
&self,
mut cs: CS
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
let mut value = None;
let var = cs.alloc(|| "squared num", || {
let mut tmp = *self.value.get()?;
tmp.square();
value = Some(tmp);
Ok(tmp)
})?;
// Constrain: a * a = aa
cs.enforce(
|| "squaring constraint",
|lc| lc + self.variable,
|lc| lc + self.variable,
|lc| lc + var
);
Ok(AllocatedNum {
value: value,
variable: var
})
}
pub fn assert_nonzero<CS>(
&self,
mut cs: CS
) -> Result<(), SynthesisError>
where CS: ConstraintSystem<E>
{
let inv = cs.alloc(|| "ephemeral inverse", || {
let tmp = *self.value.get()?;
if tmp.is_zero() {
Err(SynthesisError::DivisionByZero)
} else {
Ok(tmp.inverse().unwrap())
}
})?;
// Constrain a * inv = 1, which is only valid
// iff a has a multiplicative inverse, untrue
// for zero.
cs.enforce(
|| "nonzero assertion constraint",
|lc| lc + self.variable,
|lc| lc + inv,
|lc| lc + CS::one()
);
Ok(())
}
/// Takes two allocated numbers (a, b) and returns
/// (b, a) if the condition is true, and (a, b)
/// otherwise.
pub fn conditionally_reverse<CS>(
mut cs: CS,
a: &Self,
b: &Self,
condition: &Boolean
) -> Result<(Self, Self), SynthesisError>
where CS: ConstraintSystem<E>
{
let c = Self::alloc(
cs.namespace(|| "conditional reversal result 1"),
|| {
if *condition.get_value().get()? {
Ok(*b.value.get()?)
} else {
Ok(*a.value.get()?)
}
}
)?;
cs.enforce(
|| "first conditional reversal",
|lc| lc + a.variable - b.variable,
|_| condition.lc(CS::one(), E::Fr::one()),
|lc| lc + a.variable - c.variable
);
let d = Self::alloc(
cs.namespace(|| "conditional reversal result 2"),
|| {
if *condition.get_value().get()? {
Ok(*a.value.get()?)
} else {
Ok(*b.value.get()?)
}
}
)?;
cs.enforce(
|| "second conditional reversal",
|lc| lc + b.variable - a.variable,
|_| condition.lc(CS::one(), E::Fr::one()),
|lc| lc + b.variable - d.variable
);
Ok((c, d))
}
pub fn get_value(&self) -> Option<E::Fr> {
self.value
}
pub fn get_variable(&self) -> Variable {
self.variable
}
}
pub struct Num<E: Engine> {
value: Option<E::Fr>,
lc: LinearCombination<E>
}
impl<E: Engine> From<AllocatedNum<E>> for Num<E> {
fn from(num: AllocatedNum<E>) -> Num<E> {
Num {
value: num.value,
lc: LinearCombination::<E>::zero() + num.variable
}
}
}
impl<E: Engine> Num<E> {
pub fn zero() -> Self {
Num {
value: Some(E::Fr::zero()),
lc: LinearCombination::zero()
}
}
pub fn get_value(&self) -> Option<E::Fr> {
self.value
}
pub fn lc(&self, coeff: E::Fr) -> LinearCombination<E> {
LinearCombination::zero() + (coeff, &self.lc)
}
pub fn add_bool_with_coeff(
self,
one: Variable,
bit: &Boolean,
coeff: E::Fr
) -> Self
{
let newval = match (self.value, bit.get_value()) {
(Some(mut curval), Some(bval)) => {
if bval {
curval.add_assign(&coeff);
}
Some(curval)
},
_ => None
};
Num {
value: newval,
lc: self.lc + &bit.lc(one, coeff)
}
}
}
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rand, Rng, XorShiftRng};
use bellman::{ConstraintSystem};
use pairing::bls12_381::{Bls12, Fr};
use pairing::{Field, PrimeField, BitIterator};
use ::circuit::test::*;
use super::{AllocatedNum, Boolean};
#[test]
fn test_allocated_num() {
let mut cs = TestConstraintSystem::<Bls12>::new();
AllocatedNum::alloc(&mut cs, || Ok(Fr::one())).unwrap();
assert!(cs.get("num") == Fr::one());
}
#[test]
fn test_num_squaring() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Fr::from_str("3").unwrap())).unwrap();
let n2 = n.square(&mut cs).unwrap();
assert!(cs.is_satisfied());
assert!(cs.get("squared num") == Fr::from_str("9").unwrap());
assert!(n2.value.unwrap() == Fr::from_str("9").unwrap());
cs.set("squared num", Fr::from_str("10").unwrap());
assert!(!cs.is_satisfied());
}
#[test]
fn test_num_multiplication() {
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Fr::from_str("12").unwrap())).unwrap();
let n2 = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Fr::from_str("10").unwrap())).unwrap();
let n3 = n.mul(&mut cs, &n2).unwrap();
assert!(cs.is_satisfied());
assert!(cs.get("product num") == Fr::from_str("120").unwrap());
assert!(n3.value.unwrap() == Fr::from_str("120").unwrap());
cs.set("product num", Fr::from_str("121").unwrap());
assert!(!cs.is_satisfied());
}
#[test]
fn test_num_conditional_reversal() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(rng.gen())).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(rng.gen())).unwrap();
let condition = Boolean::constant(false);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
assert!(cs.is_satisfied());
assert_eq!(a.value.unwrap(), c.value.unwrap());
assert_eq!(b.value.unwrap(), d.value.unwrap());
}
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let a = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(rng.gen())).unwrap();
let b = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(rng.gen())).unwrap();
let condition = Boolean::constant(true);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
assert!(cs.is_satisfied());
assert_eq!(a.value.unwrap(), d.value.unwrap());
assert_eq!(b.value.unwrap(), c.value.unwrap());
}
}
#[test]
fn test_num_nonzero() {
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Fr::from_str("3").unwrap())).unwrap();
n.assert_nonzero(&mut cs).unwrap();
assert!(cs.is_satisfied());
cs.set("ephemeral inverse", Fr::from_str("3").unwrap());
assert!(cs.which_is_unsatisfied() == Some("nonzero assertion constraint"));
}
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Fr::zero())).unwrap();
assert!(n.assert_nonzero(&mut cs).is_err());
}
}
#[test]
fn test_into_bits_strict() {
let mut negone = Fr::one();
negone.negate();
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(negone)).unwrap();
n.into_bits_le_strict(&mut cs).unwrap();
assert!(cs.is_satisfied());
// make the bit representation the characteristic
cs.set("bit 254/boolean", Fr::one());
// this makes the conditional boolean constraint fail
assert_eq!(cs.which_is_unsatisfied().unwrap(), "bit 254/boolean constraint");
}
#[test]
fn test_into_bits() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for i in 0..200 {
let r = Fr::rand(&mut rng);
let mut cs = TestConstraintSystem::<Bls12>::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(r)).unwrap();
let bits = if i % 2 == 0 {
n.into_bits_le(&mut cs).unwrap()
} else {
n.into_bits_le_strict(&mut cs).unwrap()
};
assert!(cs.is_satisfied());
for (b, a) in BitIterator::new(r.into_repr()).skip(1).zip(bits.iter().rev()) {
if let &Boolean::Is(ref a) = a {
assert_eq!(b, a.get_value().unwrap());
} else {
unreachable!()
}
}
cs.set("num", Fr::rand(&mut rng));
assert!(!cs.is_satisfied());
cs.set("num", r);
assert!(cs.is_satisfied());
for i in 0..Fr::NUM_BITS {
let name = format!("bit {}/boolean", i);
let cur = cs.get(&name);
let mut tmp = Fr::one();
tmp.sub_assign(&cur);
cs.set(&name, tmp);
assert!(!cs.is_satisfied());
cs.set(&name, cur);
assert!(cs.is_satisfied());
}
}
}
}

View File

@ -0,0 +1,194 @@
use super::*;
use super::ecc::{
MontgomeryPoint,
EdwardsPoint
};
use super::boolean::Boolean;
use ::jubjub::*;
use bellman::{
ConstraintSystem
};
use super::lookup::*;
pub use pedersen_hash::Personalization;
impl Personalization {
fn get_constant_bools(&self) -> Vec<Boolean> {
self.get_bits()
.into_iter()
.map(|e| Boolean::constant(e))
.collect()
}
}
pub fn pedersen_hash<E: JubjubEngine, CS>(
mut cs: CS,
personalization: Personalization,
bits: &[Boolean],
params: &E::Params
) -> Result<EdwardsPoint<E>, SynthesisError>
where CS: ConstraintSystem<E>
{
let personalization = personalization.get_constant_bools();
assert_eq!(personalization.len(), 6);
let mut edwards_result = None;
let mut bits = personalization.iter().chain(bits.iter());
let mut segment_generators = params.pedersen_circuit_generators().iter();
let boolean_false = Boolean::constant(false);
let mut segment_i = 0;
loop {
let mut segment_result = None;
let mut segment_windows = &segment_generators.next()
.expect("enough segments")[..];
let mut window_i = 0;
while let Some(a) = bits.next() {
let b = bits.next().unwrap_or(&boolean_false);
let c = bits.next().unwrap_or(&boolean_false);
let tmp = lookup3_xy_with_conditional_negation(
cs.namespace(|| format!("segment {}, window {}", segment_i, window_i)),
&[a.clone(), b.clone(), c.clone()],
&segment_windows[0]
)?;
let tmp = MontgomeryPoint::interpret_unchecked(tmp.0, tmp.1);
match segment_result {
None => {
segment_result = Some(tmp);
},
Some(ref mut segment_result) => {
*segment_result = tmp.add(
cs.namespace(|| format!("addition of segment {}, window {}", segment_i, window_i)),
segment_result,
params
)?;
}
}
segment_windows = &segment_windows[1..];
if segment_windows.len() == 0 {
break;
}
window_i += 1;
}
match segment_result {
Some(segment_result) => {
// Convert this segment into twisted Edwards form.
let segment_result = segment_result.into_edwards(
cs.namespace(|| format!("conversion of segment {} into edwards", segment_i)),
params
)?;
match edwards_result {
Some(ref mut edwards_result) => {
*edwards_result = segment_result.add(
cs.namespace(|| format!("addition of segment {} to accumulator", segment_i)),
edwards_result,
params
)?;
},
None => {
edwards_result = Some(segment_result);
}
}
},
None => {
// We didn't process any new bits.
break;
}
}
segment_i += 1;
}
Ok(edwards_result.unwrap())
}
#[cfg(test)]
mod test {
use rand::{SeedableRng, Rng, XorShiftRng};
use super::*;
use ::circuit::test::*;
use ::circuit::boolean::{Boolean, AllocatedBit};
use pairing::bls12_381::{Bls12, Fr};
use pairing::PrimeField;
#[test]
fn test_pedersen_hash_constraints() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let params = &JubjubBls12::new();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input: Vec<bool> = (0..(Fr::NUM_BITS * 2)).map(|_| rng.gen()).collect();
let input_bools: Vec<Boolean> = input.iter().enumerate().map(|(i, b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("input {}", i)), Some(*b)).unwrap()
)
}).collect();
pedersen_hash(
cs.namespace(|| "pedersen hash"),
Personalization::NoteCommitment,
&input_bools,
params
).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1377);
}
#[test]
fn test_pedersen_hash() {
let mut rng = XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let params = &JubjubBls12::new();
for length in 0..751 {
for _ in 0..5 {
let mut input: Vec<bool> = (0..length).map(|_| rng.gen()).collect();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bools: Vec<Boolean> = input.iter().enumerate().map(|(i, b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("input {}", i)), Some(*b)).unwrap()
)
}).collect();
let res = pedersen_hash(
cs.namespace(|| "pedersen hash"),
Personalization::MerkleTree(1),
&input_bools,
params
).unwrap();
assert!(cs.is_satisfied());
let expected = ::pedersen_hash::pedersen_hash::<Bls12, _>(
Personalization::MerkleTree(1),
input.clone().into_iter(),
params
).into_xy();
assert_eq!(res.get_x().get_value().unwrap(), expected.0);
assert_eq!(res.get_y().get_value().unwrap(), expected.1);
// Test against the output of a different personalization
let unexpected = ::pedersen_hash::pedersen_hash::<Bls12, _>(
Personalization::MerkleTree(0),
input.into_iter(),
params
).into_xy();
assert!(res.get_x().get_value().unwrap() != unexpected.0);
assert!(res.get_y().get_value().unwrap() != unexpected.1);
}
}
}
}

View File

@ -0,0 +1,815 @@
use pairing::{
PrimeField,
PrimeFieldRepr,
Field,
};
use bellman::{
SynthesisError,
ConstraintSystem,
Circuit
};
use jubjub::{
JubjubEngine,
FixedGenerators
};
use constants;
use primitives::{
ValueCommitment,
ProofGenerationKey,
PaymentAddress
};
use super::Assignment;
use super::boolean;
use super::ecc;
use super::pedersen_hash;
use super::blake2s;
use super::num;
use super::multipack;
/// This is an instance of the `Spend` circuit.
pub struct Spend<'a, E: JubjubEngine> {
pub params: &'a E::Params,
/// Pedersen commitment to the value being spent
pub value_commitment: Option<ValueCommitment<E>>,
/// Key required to construct proofs for spending notes
/// for a particular spending key
pub proof_generation_key: Option<ProofGenerationKey<E>>,
/// The payment address associated with the note
pub payment_address: Option<PaymentAddress<E>>,
/// The randomness of the note commitment
pub commitment_randomness: Option<E::Fs>,
/// Re-randomization of the public key
pub ar: Option<E::Fs>,
/// The authentication path of the commitment in the tree
pub auth_path: Vec<Option<(E::Fr, bool)>>,
/// The anchor; the root of the tree. If the note being
/// spent is zero-value, this can be anything.
pub anchor: Option<E::Fr>
}
/// This is an output circuit instance.
pub struct Output<'a, E: JubjubEngine> {
pub params: &'a E::Params,
/// Pedersen commitment to the value being spent
pub value_commitment: Option<ValueCommitment<E>>,
/// The payment address of the recipient
pub payment_address: Option<PaymentAddress<E>>,
/// The randomness used to hide the note commitment data
pub commitment_randomness: Option<E::Fs>,
/// The ephemeral secret key for DH with recipient
pub esk: Option<E::Fs>
}
/// Exposes a Pedersen commitment to the value as an
/// input to the circuit
fn expose_value_commitment<E, CS>(
mut cs: CS,
value_commitment: Option<ValueCommitment<E>>,
params: &E::Params
) -> Result<Vec<boolean::Boolean>, SynthesisError>
where E: JubjubEngine,
CS: ConstraintSystem<E>
{
// Booleanize the value into little-endian bit order
let value_bits = boolean::u64_into_boolean_vec_le(
cs.namespace(|| "value"),
value_commitment.as_ref().map(|c| c.value)
)?;
// Compute the note value in the exponent
let value = ecc::fixed_base_multiplication(
cs.namespace(|| "compute the value in the exponent"),
FixedGenerators::ValueCommitmentValue,
&value_bits,
params
)?;
// Booleanize the randomness. This does not ensure
// the bit representation is "in the field" because
// it doesn't matter for security.
let rcv = boolean::field_into_boolean_vec_le(
cs.namespace(|| "rcv"),
value_commitment.as_ref().map(|c| c.randomness)
)?;
// Compute the randomness in the exponent
let rcv = ecc::fixed_base_multiplication(
cs.namespace(|| "computation of rcv"),
FixedGenerators::ValueCommitmentRandomness,
&rcv,
params
)?;
// Compute the Pedersen commitment to the value
let cv = value.add(
cs.namespace(|| "computation of cv"),
&rcv,
params
)?;
// Expose the commitment as an input to the circuit
cv.inputize(cs.namespace(|| "commitment point"))?;
Ok(value_bits)
}
impl<'a, E: JubjubEngine> Circuit<E> for Spend<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError>
{
// Prover witnesses ak (ensures that it's on the curve)
let ak = ecc::EdwardsPoint::witness(
cs.namespace(|| "ak"),
self.proof_generation_key.as_ref().map(|k| k.ak.clone()),
self.params
)?;
// There are no sensible attacks on small order points
// of ak (that we're aware of!) but it's a cheap check,
// so we do it.
ak.assert_not_small_order(
cs.namespace(|| "ak not small order"),
self.params
)?;
// Rerandomize ak and expose it as an input to the circuit
{
let ar = boolean::field_into_boolean_vec_le(
cs.namespace(|| "ar"),
self.ar
)?;
// Compute the randomness in the exponent
let ar = ecc::fixed_base_multiplication(
cs.namespace(|| "computation of randomization for the signing key"),
FixedGenerators::SpendingKeyGenerator,
&ar,
self.params
)?;
let rk = ak.add(
cs.namespace(|| "computation of rk"),
&ar,
self.params
)?;
rk.inputize(cs.namespace(|| "rk"))?;
}
// Compute nk = [nsk] ProofGenerationKey
let nk;
{
// Witness nsk as bits
let nsk = boolean::field_into_boolean_vec_le(
cs.namespace(|| "nsk"),
self.proof_generation_key.as_ref().map(|k| k.nsk.clone())
)?;
// NB: We don't ensure that the bit representation of nsk
// is "in the field" (Fs) because it's not used except to
// demonstrate the prover knows it. If they know a
// congruency then that's equivalent.
// Compute nk = [nsk] ProvingPublicKey
nk = ecc::fixed_base_multiplication(
cs.namespace(|| "computation of nk"),
FixedGenerators::ProofGenerationKey,
&nsk,
self.params
)?;
}
// This is the "viewing key" preimage for CRH^ivk
let mut ivk_preimage = vec![];
// Place ak in the preimage for CRH^ivk
ivk_preimage.extend(
ak.repr(cs.namespace(|| "representation of ak"))?
);
// This is the nullifier preimage for PRF^nf
let mut nf_preimage = vec![];
// Extend ivk and nf preimages with the representation of
// nk.
{
let repr_nk = nk.repr(
cs.namespace(|| "representation of nk")
)?;
ivk_preimage.extend(repr_nk.iter().cloned());
nf_preimage.extend(repr_nk);
}
assert_eq!(ivk_preimage.len(), 512);
assert_eq!(nf_preimage.len(), 256);
// Compute the incoming viewing key ivk
let mut ivk = blake2s::blake2s(
cs.namespace(|| "computation of ivk"),
&ivk_preimage,
constants::CRH_IVK_PERSONALIZATION
)?;
// drop_5 to ensure it's in the field
ivk.truncate(E::Fs::CAPACITY as usize);
// Witness g_d, checking that it's on the curve.
let g_d = {
// This binding is to avoid a weird edge case in Rust's
// ownership/borrowing rules. self is partially moved
// above, but the closure for and_then will have to
// move self (or a reference to self) to reference
// self.params, so we have to copy self.params here.
let params = self.params;
ecc::EdwardsPoint::witness(
cs.namespace(|| "witness g_d"),
self.payment_address.as_ref().and_then(|a| a.g_d(params)),
self.params
)?
};
// Check that g_d is not small order. Technically, this check
// is already done in the Output circuit, and this proof ensures
// g_d is bound to a product of that check, but for defense in
// depth let's check it anyway. It's cheap.
g_d.assert_not_small_order(
cs.namespace(|| "g_d not small order"),
self.params
)?;
// Compute pk_d = g_d^ivk
let pk_d = g_d.mul(
cs.namespace(|| "compute pk_d"),
&ivk,
self.params
)?;
// Compute note contents:
// value (in big endian) followed by g_d and pk_d
let mut note_contents = vec![];
// Handle the value; we'll need it later for the
// dummy input check.
let mut value_num = num::Num::zero();
{
// Get the value in little-endian bit order
let value_bits = expose_value_commitment(
cs.namespace(|| "value commitment"),
self.value_commitment,
self.params
)?;
// Compute the note's value as a linear combination
// of the bits.
let mut coeff = E::Fr::one();
for bit in &value_bits {
value_num = value_num.add_bool_with_coeff(
CS::one(),
bit,
coeff
);
coeff.double();
}
// Place the value in the note
note_contents.extend(value_bits);
}
// Place g_d in the note
note_contents.extend(
g_d.repr(cs.namespace(|| "representation of g_d"))?
);
// Place pk_d in the note
note_contents.extend(
pk_d.repr(cs.namespace(|| "representation of pk_d"))?
);
assert_eq!(
note_contents.len(),
64 + // value
256 + // g_d
256 // p_d
);
// Compute the hash of the note contents
let mut cm = pedersen_hash::pedersen_hash(
cs.namespace(|| "note content hash"),
pedersen_hash::Personalization::NoteCommitment,
&note_contents,
self.params
)?;
{
// Booleanize the randomness for the note commitment
let rcm = boolean::field_into_boolean_vec_le(
cs.namespace(|| "rcm"),
self.commitment_randomness
)?;
// Compute the note commitment randomness in the exponent
let rcm = ecc::fixed_base_multiplication(
cs.namespace(|| "computation of commitment randomness"),
FixedGenerators::NoteCommitmentRandomness,
&rcm,
self.params
)?;
// Randomize the note commitment. Pedersen hashes are not
// themselves hiding commitments.
cm = cm.add(
cs.namespace(|| "randomization of note commitment"),
&rcm,
self.params
)?;
}
// This will store (least significant bit first)
// the position of the note in the tree, for use
// in nullifier computation.
let mut position_bits = vec![];
// This is an injective encoding, as cur is a
// point in the prime order subgroup.
let mut cur = cm.get_x().clone();
// Ascend the merkle tree authentication path
for (i, e) in self.auth_path.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("merkle tree hash {}", i));
// Determines if the current subtree is the "right" leaf at this
// depth of the tree.
let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(
cs.namespace(|| "position bit"),
e.map(|e| e.1)
)?);
// Push this boolean for nullifier computation later
position_bits.push(cur_is_right.clone());
// Witness the authentication path element adjacent
// at this depth.
let path_element = num::AllocatedNum::alloc(
cs.namespace(|| "path element"),
|| {
Ok(e.get()?.0)
}
)?;
// Swap the two if the current subtree is on the right
let (xl, xr) = num::AllocatedNum::conditionally_reverse(
cs.namespace(|| "conditional reversal of preimage"),
&cur,
&path_element,
&cur_is_right
)?;
// We don't need to be strict, because the function is
// collision-resistant. If the prover witnesses a congruency,
// they will be unable to find an authentication path in the
// tree with high probability.
let mut preimage = vec![];
preimage.extend(xl.into_bits_le(cs.namespace(|| "xl into bits"))?);
preimage.extend(xr.into_bits_le(cs.namespace(|| "xr into bits"))?);
// Compute the new subtree value
cur = pedersen_hash::pedersen_hash(
cs.namespace(|| "computation of pedersen hash"),
pedersen_hash::Personalization::MerkleTree(i),
&preimage,
self.params
)?.get_x().clone(); // Injective encoding
}
{
let real_anchor_value = self.anchor;
// Allocate the "real" anchor that will be exposed.
let rt = num::AllocatedNum::alloc(
cs.namespace(|| "conditional anchor"),
|| {
Ok(*real_anchor_value.get()?)
}
)?;
// (cur - rt) * value = 0
// if value is zero, cur and rt can be different
// if value is nonzero, they must be equal
cs.enforce(
|| "conditionally enforce correct root",
|lc| lc + cur.get_variable() - rt.get_variable(),
|lc| lc + &value_num.lc(E::Fr::one()),
|lc| lc
);
// Expose the anchor
rt.inputize(cs.namespace(|| "anchor"))?;
}
// Compute the cm + g^position for preventing
// faerie gold attacks
let mut rho = cm;
{
// Compute the position in the exponent
let position = ecc::fixed_base_multiplication(
cs.namespace(|| "g^position"),
FixedGenerators::NullifierPosition,
&position_bits,
self.params
)?;
// Add the position to the commitment
rho = rho.add(
cs.namespace(|| "faerie gold prevention"),
&position,
self.params
)?;
}
// Let's compute nf = BLAKE2s(nk || rho)
nf_preimage.extend(
rho.repr(cs.namespace(|| "representation of rho"))?
);
assert_eq!(nf_preimage.len(), 512);
// Compute nf
let nf = blake2s::blake2s(
cs.namespace(|| "nf computation"),
&nf_preimage,
constants::PRF_NF_PERSONALIZATION
)?;
multipack::pack_into_inputs(cs.namespace(|| "pack nullifier"), &nf)
}
}
impl<'a, E: JubjubEngine> Circuit<E> for Output<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError>
{
// Let's start to construct our note, which contains
// value (big endian)
let mut note_contents = vec![];
// Expose the value commitment and place the value
// in the note.
note_contents.extend(expose_value_commitment(
cs.namespace(|| "value commitment"),
self.value_commitment,
self.params
)?);
// Let's deal with g_d
{
let params = self.params;
// Prover witnesses g_d, ensuring it's on the
// curve.
let g_d = ecc::EdwardsPoint::witness(
cs.namespace(|| "witness g_d"),
self.payment_address.as_ref().and_then(|a| a.g_d(params)),
self.params
)?;
// g_d is ensured to be large order. The relationship
// between g_d and pk_d ultimately binds ivk to the
// note. If this were a small order point, it would
// not do this correctly, and the prover could
// double-spend by finding random ivk's that satisfy
// the relationship.
//
// Further, if it were small order, epk would be
// small order too!
g_d.assert_not_small_order(
cs.namespace(|| "g_d not small order"),
self.params
)?;
// Extend our note contents with the representation of
// g_d.
note_contents.extend(
g_d.repr(cs.namespace(|| "representation of g_d"))?
);
// Booleanize our ephemeral secret key
let esk = boolean::field_into_boolean_vec_le(
cs.namespace(|| "esk"),
self.esk
)?;
// Create the ephemeral public key from g_d.
let epk = g_d.mul(
cs.namespace(|| "epk computation"),
&esk,
self.params
)?;
// Expose epk publicly.
epk.inputize(cs.namespace(|| "epk"))?;
}
// Now let's deal with pk_d. We don't do any checks and
// essentially allow the prover to witness any 256 bits
// they would like.
{
// Just grab pk_d from the witness
let pk_d = self.payment_address.as_ref().map(|e| e.pk_d.into_xy());
// Witness the y-coordinate, encoded as little
// endian bits (to match the representation)
let y_contents = boolean::field_into_boolean_vec_le(
cs.namespace(|| "pk_d bits of y"),
pk_d.map(|e| e.1)
)?;
// Witness the sign bit
let sign_bit = boolean::Boolean::from(boolean::AllocatedBit::alloc(
cs.namespace(|| "pk_d bit of x"),
pk_d.map(|e| e.0.into_repr().is_odd())
)?);
// Extend the note with pk_d representation
note_contents.extend(y_contents);
note_contents.push(sign_bit);
}
assert_eq!(
note_contents.len(),
64 + // value
256 + // g_d
256 // pk_d
);
// Compute the hash of the note contents
let mut cm = pedersen_hash::pedersen_hash(
cs.namespace(|| "note content hash"),
pedersen_hash::Personalization::NoteCommitment,
&note_contents,
self.params
)?;
{
// Booleanize the randomness
let rcm = boolean::field_into_boolean_vec_le(
cs.namespace(|| "rcm"),
self.commitment_randomness
)?;
// Compute the note commitment randomness in the exponent
let rcm = ecc::fixed_base_multiplication(
cs.namespace(|| "computation of commitment randomness"),
FixedGenerators::NoteCommitmentRandomness,
&rcm,
self.params
)?;
// Randomize our note commitment
cm = cm.add(
cs.namespace(|| "randomization of note commitment"),
&rcm,
self.params
)?;
}
// Only the x-coordinate of the output is revealed,
// since we know it is prime order, and we know that
// the x-coordinate is an injective encoding for
// prime-order elements.
cm.get_x().inputize(cs.namespace(|| "commitment"))?;
Ok(())
}
}
#[test]
fn test_input_circuit_with_bls12_381() {
use pairing::{Field, BitIterator};
use pairing::bls12_381::*;
use rand::{SeedableRng, Rng, XorShiftRng};
use ::circuit::test::*;
use jubjub::{JubjubBls12, fs, edwards};
let params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let tree_depth = 32;
for _ in 0..10 {
let value_commitment = ValueCommitment {
value: rng.gen(),
randomness: rng.gen()
};
let nsk: fs::Fs = rng.gen();
let ak = edwards::Point::rand(rng, params).mul_by_cofactor(params);
let proof_generation_key = ::primitives::ProofGenerationKey {
ak: ak.clone(),
nsk: nsk.clone()
};
let viewing_key = proof_generation_key.into_viewing_key(params);
let payment_address;
loop {
let diversifier = ::primitives::Diversifier(rng.gen());
if let Some(p) = viewing_key.into_payment_address(
diversifier,
params
)
{
payment_address = p;
break;
}
}
let g_d = payment_address.diversifier.g_d(params).unwrap();
let commitment_randomness: fs::Fs = rng.gen();
let auth_path = vec![Some((rng.gen(), rng.gen())); tree_depth];
let ar: fs::Fs = rng.gen();
{
let rk = viewing_key.rk(ar, params).into_xy();
let expected_value_cm = value_commitment.cm(params).into_xy();
let note = ::primitives::Note {
value: value_commitment.value,
g_d: g_d.clone(),
pk_d: payment_address.pk_d.clone(),
r: commitment_randomness.clone()
};
let mut position = 0u64;
let cm: Fr = note.cm(params);
let mut cur = cm.clone();
for (i, val) in auth_path.clone().into_iter().enumerate()
{
let (uncle, b) = val.unwrap();
let mut lhs = cur;
let mut rhs = uncle;
if b {
::std::mem::swap(&mut lhs, &mut rhs);
}
let mut lhs: Vec<bool> = BitIterator::new(lhs.into_repr()).collect();
let mut rhs: Vec<bool> = BitIterator::new(rhs.into_repr()).collect();
lhs.reverse();
rhs.reverse();
cur = ::pedersen_hash::pedersen_hash::<Bls12, _>(
::pedersen_hash::Personalization::MerkleTree(i),
lhs.into_iter()
.take(Fr::NUM_BITS as usize)
.chain(rhs.into_iter().take(Fr::NUM_BITS as usize)),
params
).into_xy().0;
if b {
position |= 1 << i;
}
}
let expected_nf = note.nf(&viewing_key, position, params);
let expected_nf = multipack::bytes_to_bits_le(&expected_nf);
let expected_nf = multipack::compute_multipacking::<Bls12>(&expected_nf);
assert_eq!(expected_nf.len(), 2);
let mut cs = TestConstraintSystem::<Bls12>::new();
let instance = Spend {
params: params,
value_commitment: Some(value_commitment.clone()),
proof_generation_key: Some(proof_generation_key.clone()),
payment_address: Some(payment_address.clone()),
commitment_randomness: Some(commitment_randomness),
ar: Some(ar),
auth_path: auth_path.clone(),
anchor: Some(cur)
};
instance.synthesize(&mut cs).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 98777);
assert_eq!(cs.hash(), "d37c738e83df5d9b0bb6495ac96abf21bcb2697477e2c15c2c7916ff7a3b6a89");
assert_eq!(cs.get("randomization of note commitment/x3/num"), cm);
assert_eq!(cs.num_inputs(), 8);
assert_eq!(cs.get_input(0, "ONE"), Fr::one());
assert_eq!(cs.get_input(1, "rk/x/input variable"), rk.0);
assert_eq!(cs.get_input(2, "rk/y/input variable"), rk.1);
assert_eq!(cs.get_input(3, "value commitment/commitment point/x/input variable"), expected_value_cm.0);
assert_eq!(cs.get_input(4, "value commitment/commitment point/y/input variable"), expected_value_cm.1);
assert_eq!(cs.get_input(5, "anchor/input variable"), cur);
assert_eq!(cs.get_input(6, "pack nullifier/input 0"), expected_nf[0]);
assert_eq!(cs.get_input(7, "pack nullifier/input 1"), expected_nf[1]);
}
}
}
#[test]
fn test_output_circuit_with_bls12_381() {
use pairing::{Field};
use pairing::bls12_381::*;
use rand::{SeedableRng, Rng, XorShiftRng};
use ::circuit::test::*;
use jubjub::{JubjubBls12, fs, edwards};
let params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
let value_commitment = ValueCommitment {
value: rng.gen(),
randomness: rng.gen()
};
let nsk: fs::Fs = rng.gen();
let ak = edwards::Point::rand(rng, params).mul_by_cofactor(params);
let proof_generation_key = ::primitives::ProofGenerationKey {
ak: ak.clone(),
nsk: nsk.clone()
};
let viewing_key = proof_generation_key.into_viewing_key(params);
let payment_address;
loop {
let diversifier = ::primitives::Diversifier(rng.gen());
if let Some(p) = viewing_key.into_payment_address(
diversifier,
params
)
{
payment_address = p;
break;
}
}
let commitment_randomness: fs::Fs = rng.gen();
let esk: fs::Fs = rng.gen();
{
let mut cs = TestConstraintSystem::<Bls12>::new();
let instance = Output {
params: params,
value_commitment: Some(value_commitment.clone()),
payment_address: Some(payment_address.clone()),
commitment_randomness: Some(commitment_randomness),
esk: Some(esk.clone())
};
instance.synthesize(&mut cs).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 7827);
assert_eq!(cs.hash(), "c26d5cdfe6ccd65c03390902c02e11393ea6bb96aae32a7f2ecb12eb9103faee");
let expected_cm = payment_address.create_note(
value_commitment.value,
commitment_randomness,
params
).expect("should be valid").cm(params);
let expected_value_cm = value_commitment.cm(params).into_xy();
let expected_epk = payment_address.g_d(params).expect("should be valid").mul(esk, params);
let expected_epk_xy = expected_epk.into_xy();
assert_eq!(cs.num_inputs(), 6);
assert_eq!(cs.get_input(0, "ONE"), Fr::one());
assert_eq!(cs.get_input(1, "value commitment/commitment point/x/input variable"), expected_value_cm.0);
assert_eq!(cs.get_input(2, "value commitment/commitment point/y/input variable"), expected_value_cm.1);
assert_eq!(cs.get_input(3, "epk/x/input variable"), expected_epk_xy.0);
assert_eq!(cs.get_input(4, "epk/y/input variable"), expected_epk_xy.1);
assert_eq!(cs.get_input(5, "commitment/input variable"), expected_cm);
}
}
}

View File

@ -0,0 +1,417 @@
use super::uint32::UInt32;
use super::multieq::MultiEq;
use super::boolean::Boolean;
use bellman::{ConstraintSystem, SynthesisError};
use pairing::Engine;
const ROUND_CONSTANTS: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
const IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
];
pub fn sha256_block_no_padding<E, CS>(
mut cs: CS,
input: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
assert_eq!(input.len(), 512);
Ok(sha256_compression_function(
&mut cs,
&input,
&get_sha256_iv()
)?
.into_iter()
.flat_map(|e| e.into_bits_be())
.collect())
}
pub fn sha256<E, CS>(
mut cs: CS,
input: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
assert!(input.len() % 8 == 0);
let mut padded = input.to_vec();
let plen = padded.len() as u64;
// append a single '1' bit
padded.push(Boolean::constant(true));
// append K '0' bits, where K is the minimum number >= 0 such that L + 1 + K + 64 is a multiple of 512
while (padded.len() + 64) % 512 != 0 {
padded.push(Boolean::constant(false));
}
// append L as a 64-bit big-endian integer, making the total post-processed length a multiple of 512 bits
for b in (0..64).rev().map(|i| (plen >> i) & 1 == 1) {
padded.push(Boolean::constant(b));
}
assert!(padded.len() % 512 == 0);
let mut cur = get_sha256_iv();
for (i, block) in padded.chunks(512).enumerate() {
cur = sha256_compression_function(
cs.namespace(|| format!("block {}", i)),
block,
&cur
)?;
}
Ok(cur.into_iter()
.flat_map(|e| e.into_bits_be())
.collect())
}
fn get_sha256_iv() -> Vec<UInt32> {
IV.iter().map(|&v| UInt32::constant(v)).collect()
}
fn sha256_compression_function<E, CS>(
cs: CS,
input: &[Boolean],
current_hash_value: &[UInt32]
) -> Result<Vec<UInt32>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
assert_eq!(input.len(), 512);
assert_eq!(current_hash_value.len(), 8);
let mut w = input.chunks(32)
.map(|e| UInt32::from_bits_be(e))
.collect::<Vec<_>>();
// We can save some constraints by combining some of
// the constraints in different u32 additions
let mut cs = MultiEq::new(cs);
for i in 16..64 {
let cs = &mut cs.namespace(|| format!("w extension {}", i));
// s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3)
let mut s0 = w[i-15].rotr(7);
s0 = s0.xor(
cs.namespace(|| "first xor for s0"),
&w[i-15].rotr(18)
)?;
s0 = s0.xor(
cs.namespace(|| "second xor for s0"),
&w[i-15].shr(3)
)?;
// s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10)
let mut s1 = w[i-2].rotr(17);
s1 = s1.xor(
cs.namespace(|| "first xor for s1"),
&w[i-2].rotr(19)
)?;
s1 = s1.xor(
cs.namespace(|| "second xor for s1"),
&w[i-2].shr(10)
)?;
let tmp = UInt32::addmany(
cs.namespace(|| "computation of w[i]"),
&[w[i-16].clone(), s0, w[i-7].clone(), s1]
)?;
// w[i] := w[i-16] + s0 + w[i-7] + s1
w.push(tmp);
}
assert_eq!(w.len(), 64);
enum Maybe {
Deferred(Vec<UInt32>),
Concrete(UInt32)
}
impl Maybe {
fn compute<E, CS, M>(
self,
cs: M,
others: &[UInt32]
) -> Result<UInt32, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root=MultiEq<E, CS>>
{
Ok(match self {
Maybe::Concrete(ref v) => {
return Ok(v.clone())
},
Maybe::Deferred(mut v) => {
v.extend(others.into_iter().cloned());
UInt32::addmany(
cs,
&v
)?
}
})
}
}
let mut a = Maybe::Concrete(current_hash_value[0].clone());
let mut b = current_hash_value[1].clone();
let mut c = current_hash_value[2].clone();
let mut d = current_hash_value[3].clone();
let mut e = Maybe::Concrete(current_hash_value[4].clone());
let mut f = current_hash_value[5].clone();
let mut g = current_hash_value[6].clone();
let mut h = current_hash_value[7].clone();
for i in 0..64 {
let cs = &mut cs.namespace(|| format!("compression round {}", i));
// S1 := (e rightrotate 6) xor (e rightrotate 11) xor (e rightrotate 25)
let new_e = e.compute(cs.namespace(|| "deferred e computation"), &[])?;
let mut s1 = new_e.rotr(6);
s1 = s1.xor(
cs.namespace(|| "first xor for s1"),
&new_e.rotr(11)
)?;
s1 = s1.xor(
cs.namespace(|| "second xor for s1"),
&new_e.rotr(25)
)?;
// ch := (e and f) xor ((not e) and g)
let ch = UInt32::sha256_ch(
cs.namespace(|| "ch"),
&new_e,
&f,
&g
)?;
// temp1 := h + S1 + ch + k[i] + w[i]
let temp1 = vec![
h.clone(),
s1,
ch,
UInt32::constant(ROUND_CONSTANTS[i]),
w[i].clone()
];
// S0 := (a rightrotate 2) xor (a rightrotate 13) xor (a rightrotate 22)
let new_a = a.compute(cs.namespace(|| "deferred a computation"), &[])?;
let mut s0 = new_a.rotr(2);
s0 = s0.xor(
cs.namespace(|| "first xor for s0"),
&new_a.rotr(13)
)?;
s0 = s0.xor(
cs.namespace(|| "second xor for s0"),
&new_a.rotr(22)
)?;
// maj := (a and b) xor (a and c) xor (b and c)
let maj = UInt32::sha256_maj(
cs.namespace(|| "maj"),
&new_a,
&b,
&c
)?;
// temp2 := S0 + maj
let temp2 = vec![s0, maj];
/*
h := g
g := f
f := e
e := d + temp1
d := c
c := b
b := a
a := temp1 + temp2
*/
h = g;
g = f;
f = new_e;
e = Maybe::Deferred(temp1.iter().cloned().chain(Some(d)).collect::<Vec<_>>());
d = c;
c = b;
b = new_a;
a = Maybe::Deferred(temp1.iter().cloned().chain(temp2.iter().cloned()).collect::<Vec<_>>());
}
/*
Add the compressed chunk to the current hash value:
h0 := h0 + a
h1 := h1 + b
h2 := h2 + c
h3 := h3 + d
h4 := h4 + e
h5 := h5 + f
h6 := h6 + g
h7 := h7 + h
*/
let h0 = a.compute(
cs.namespace(|| "deferred h0 computation"),
&[current_hash_value[0].clone()]
)?;
let h1 = UInt32::addmany(
cs.namespace(|| "new h1"),
&[current_hash_value[1].clone(), b]
)?;
let h2 = UInt32::addmany(
cs.namespace(|| "new h2"),
&[current_hash_value[2].clone(), c]
)?;
let h3 = UInt32::addmany(
cs.namespace(|| "new h3"),
&[current_hash_value[3].clone(), d]
)?;
let h4 = e.compute(
cs.namespace(|| "deferred h4 computation"),
&[current_hash_value[4].clone()]
)?;
let h5 = UInt32::addmany(
cs.namespace(|| "new h5"),
&[current_hash_value[5].clone(), f]
)?;
let h6 = UInt32::addmany(
cs.namespace(|| "new h6"),
&[current_hash_value[6].clone(), g]
)?;
let h7 = UInt32::addmany(
cs.namespace(|| "new h7"),
&[current_hash_value[7].clone(), h]
)?;
Ok(vec![h0, h1, h2, h3, h4, h5, h6, h7])
}
#[cfg(test)]
mod test {
use super::*;
use circuit::boolean::AllocatedBit;
use pairing::bls12_381::Bls12;
use circuit::test::TestConstraintSystem;
use rand::{XorShiftRng, SeedableRng, Rng};
#[test]
fn test_blank_hash() {
let iv = get_sha256_iv();
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut input_bits: Vec<_> = (0..512).map(|_| Boolean::Constant(false)).collect();
input_bits[0] = Boolean::Constant(true);
let out = sha256_compression_function(
&mut cs,
&input_bits,
&iv
).unwrap();
let out_bits: Vec<_> = out.into_iter().flat_map(|e| e.into_bits_be()).collect();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
let expected = hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
let mut out = out_bits.into_iter();
for b in expected.into_iter() {
for i in (0..8).rev() {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_full_block() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let iv = get_sha256_iv();
let mut cs = TestConstraintSystem::<Bls12>::new();
let input_bits: Vec<_> = (0..512).map(|i| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("input bit {}", i)),
Some(rng.gen())
).unwrap()
)
}).collect();
sha256_compression_function(
cs.namespace(|| "sha256"),
&input_bits,
&iv
).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints() - 512, 25840);
}
#[test]
fn test_against_vectors() {
use crypto::sha2::Sha256;
use crypto::digest::Digest;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0))
{
let mut h = Sha256::new();
let data: Vec<u8> = (0..input_len).map(|_| rng.gen()).collect();
h.input(&data);
let mut hash_result = [0u8; 32];
h.result(&mut hash_result[..]);
let mut cs = TestConstraintSystem::<Bls12>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in (0..8).rev() {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8)).unwrap().into());
}
}
let r = sha256(&mut cs, &input_bits).unwrap();
assert!(cs.is_satisfied());
let mut s = hash_result.as_ref().iter()
.flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
},
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
},
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);
}
}
}
}
}
}

View File

@ -0,0 +1,42 @@
use pairing::{Engine};
use bellman::{ConstraintSystem, SynthesisError};
use circuit::sha256::{
sha256
};
use circuit::boolean::{
Boolean
};
pub fn note_comm<E, CS>(
cs: CS,
a_pk: &[Boolean],
value: &[Boolean],
rho: &[Boolean],
r: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
assert_eq!(a_pk.len(), 256);
assert_eq!(value.len(), 64);
assert_eq!(rho.len(), 256);
assert_eq!(r.len(), 256);
let mut image = vec![];
image.push(Boolean::constant(true));
image.push(Boolean::constant(false));
image.push(Boolean::constant(true));
image.push(Boolean::constant(true));
image.push(Boolean::constant(false));
image.push(Boolean::constant(false));
image.push(Boolean::constant(false));
image.push(Boolean::constant(false));
image.extend(a_pk.iter().cloned());
image.extend(value.iter().cloned());
image.extend(rho.iter().cloned());
image.extend(r.iter().cloned());
sha256(
cs,
&image
)
}

View File

@ -0,0 +1,226 @@
use pairing::{Engine};
use bellman::{ConstraintSystem, SynthesisError};
use circuit::sha256::{
sha256_block_no_padding
};
use circuit::boolean::{
AllocatedBit,
Boolean
};
use super::*;
use super::prfs::*;
use super::commitment::note_comm;
pub struct InputNote {
pub nf: Vec<Boolean>,
pub mac: Vec<Boolean>,
}
impl InputNote {
pub fn compute<E, CS>(
mut cs: CS,
a_sk: Option<SpendingKey>,
rho: Option<UniqueRandomness>,
r: Option<CommitmentRandomness>,
value: &NoteValue,
h_sig: &[Boolean],
nonce: bool,
auth_path: [Option<([u8; 32], bool)>; TREE_DEPTH],
rt: &[Boolean]
) -> Result<InputNote, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
let a_sk = witness_u252(
cs.namespace(|| "a_sk"),
a_sk.as_ref().map(|a_sk| &a_sk.0[..])
)?;
let rho = witness_u256(
cs.namespace(|| "rho"),
rho.as_ref().map(|rho| &rho.0[..])
)?;
let r = witness_u256(
cs.namespace(|| "r"),
r.as_ref().map(|r| &r.0[..])
)?;
let a_pk = prf_a_pk(
cs.namespace(|| "a_pk computation"),
&a_sk
)?;
let nf = prf_nf(
cs.namespace(|| "nf computation"),
&a_sk,
&rho
)?;
let mac = prf_pk(
cs.namespace(|| "mac computation"),
&a_sk,
h_sig,
nonce
)?;
let cm = note_comm(
cs.namespace(|| "cm computation"),
&a_pk,
&value.bits_le(),
&rho,
&r
)?;
// Witness into the merkle tree
let mut cur = cm.clone();
for (i, layer) in auth_path.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("layer {}", i));
let cur_is_right = AllocatedBit::alloc(
cs.namespace(|| "cur is right"),
layer.as_ref().map(|&(_, p)| p)
)?;
let lhs = cur;
let rhs = witness_u256(
cs.namespace(|| "sibling"),
layer.as_ref().map(|&(ref sibling, _)| &sibling[..])
)?;
// Conditionally swap if cur is right
let preimage = conditionally_swap_u256(
cs.namespace(|| "conditional swap"),
&lhs[..],
&rhs[..],
&cur_is_right
)?;
cur = sha256_block_no_padding(
cs.namespace(|| "hash of this layer"),
&preimage
)?;
}
// enforce must be true if the value is nonzero
let enforce = AllocatedBit::alloc(
cs.namespace(|| "enforce"),
value.get_value().map(|n| n != 0)
)?;
// value * (1 - enforce) = 0
// If `value` is zero, `enforce` _can_ be zero.
// If `value` is nonzero, `enforce` _must_ be one.
cs.enforce(
|| "enforce validity",
|_| value.lc(),
|lc| lc + CS::one() - enforce.get_variable(),
|lc| lc
);
assert_eq!(cur.len(), rt.len());
// Check that the anchor (exposed as a public input)
// is equal to the merkle tree root that we calculated
// for this note
for (i, (cur, rt)) in cur.into_iter().zip(rt.iter()).enumerate() {
// (cur - rt) * enforce = 0
// if enforce is zero, cur and rt can be different
// if enforce is one, they must be equal
cs.enforce(
|| format!("conditionally enforce correct root for bit {}", i),
|_| cur.lc(CS::one(), E::Fr::one()) - &rt.lc(CS::one(), E::Fr::one()),
|lc| lc + enforce.get_variable(),
|lc| lc
);
}
Ok(InputNote {
mac: mac,
nf: nf
})
}
}
/// Swaps two 256-bit blobs conditionally, returning the
/// 512-bit concatenation.
pub fn conditionally_swap_u256<E, CS>(
mut cs: CS,
lhs: &[Boolean],
rhs: &[Boolean],
condition: &AllocatedBit
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
assert_eq!(lhs.len(), 256);
assert_eq!(rhs.len(), 256);
let mut new_lhs = vec![];
let mut new_rhs = vec![];
for (i, (lhs, rhs)) in lhs.iter().zip(rhs.iter()).enumerate() {
let cs = &mut cs.namespace(|| format!("bit {}", i));
let x = Boolean::from(AllocatedBit::alloc(
cs.namespace(|| "x"),
condition.get_value().and_then(|v| {
if v {
rhs.get_value()
} else {
lhs.get_value()
}
})
)?);
// x = (1-condition)lhs + (condition)rhs
// x = lhs - lhs(condition) + rhs(condition)
// x - lhs = condition (rhs - lhs)
// if condition is zero, we don't swap, so
// x - lhs = 0
// x = lhs
// if condition is one, we do swap, so
// x - lhs = rhs - lhs
// x = rhs
cs.enforce(
|| "conditional swap for x",
|lc| lc + &rhs.lc(CS::one(), E::Fr::one())
- &lhs.lc(CS::one(), E::Fr::one()),
|lc| lc + condition.get_variable(),
|lc| lc + &x.lc(CS::one(), E::Fr::one())
- &lhs.lc(CS::one(), E::Fr::one())
);
let y = Boolean::from(AllocatedBit::alloc(
cs.namespace(|| "y"),
condition.get_value().and_then(|v| {
if v {
lhs.get_value()
} else {
rhs.get_value()
}
})
)?);
// y = (1-condition)rhs + (condition)lhs
// y - rhs = condition (lhs - rhs)
cs.enforce(
|| "conditional swap for y",
|lc| lc + &lhs.lc(CS::one(), E::Fr::one())
- &rhs.lc(CS::one(), E::Fr::one()),
|lc| lc + condition.get_variable(),
|lc| lc + &y.lc(CS::one(), E::Fr::one())
- &rhs.lc(CS::one(), E::Fr::one())
);
new_lhs.push(x);
new_rhs.push(y);
}
let mut f = new_lhs;
f.extend(new_rhs);
assert_eq!(f.len(), 512);
Ok(f)
}

View File

@ -0,0 +1,488 @@
use pairing::{Engine, Field};
use bellman::{ConstraintSystem, SynthesisError, Circuit, LinearCombination};
use circuit::boolean::{
AllocatedBit,
Boolean
};
use circuit::multipack::pack_into_inputs;
mod prfs;
mod commitment;
mod input;
mod output;
use self::input::*;
use self::output::*;
pub const TREE_DEPTH: usize = 29;
pub struct SpendingKey(pub [u8; 32]);
pub struct PayingKey(pub [u8; 32]);
pub struct UniqueRandomness(pub [u8; 32]);
pub struct CommitmentRandomness(pub [u8; 32]);
pub struct JoinSplit {
pub vpub_old: Option<u64>,
pub vpub_new: Option<u64>,
pub h_sig: Option<[u8; 32]>,
pub phi: Option<[u8; 32]>,
pub inputs: Vec<JSInput>,
pub outputs: Vec<JSOutput>,
pub rt: Option<[u8; 32]>,
}
pub struct JSInput {
pub value: Option<u64>,
pub a_sk: Option<SpendingKey>,
pub rho: Option<UniqueRandomness>,
pub r: Option<CommitmentRandomness>,
pub auth_path: [Option<([u8; 32], bool)>; TREE_DEPTH]
}
pub struct JSOutput {
pub value: Option<u64>,
pub a_pk: Option<PayingKey>,
pub r: Option<CommitmentRandomness>
}
impl<E: Engine> Circuit<E> for JoinSplit {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.inputs.len(), 2);
assert_eq!(self.outputs.len(), 2);
// vpub_old is the value entering the
// JoinSplit from the "outside" value
// pool
let vpub_old = NoteValue::new(
cs.namespace(|| "vpub_old"),
self.vpub_old
)?;
// vpub_new is the value leaving the
// JoinSplit into the "outside" value
// pool
let vpub_new = NoteValue::new(
cs.namespace(|| "vpub_new"),
self.vpub_new
)?;
// The left hand side of the balance equation
// vpub_old + inputs[0].value + inputs[1].value
let mut lhs = vpub_old.lc();
// The right hand side of the balance equation
// vpub_old + inputs[0].value + inputs[1].value
let mut rhs = vpub_new.lc();
// Witness rt (merkle tree root)
let rt = witness_u256(
cs.namespace(|| "rt"),
self.rt.as_ref().map(|v| &v[..])
).unwrap();
// Witness h_sig
let h_sig = witness_u256(
cs.namespace(|| "h_sig"),
self.h_sig.as_ref().map(|v| &v[..])
).unwrap();
// Witness phi
let phi = witness_u252(
cs.namespace(|| "phi"),
self.phi.as_ref().map(|v| &v[..])
).unwrap();
let mut input_notes = vec![];
let mut lhs_total = self.vpub_old;
// Iterate over the JoinSplit inputs
for (i, input) in self.inputs.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("input {}", i));
// Accumulate the value of the left hand side
if let Some(value) = input.value {
lhs_total = lhs_total.map(|v| v.wrapping_add(value));
}
// Allocate the value of the note
let value = NoteValue::new(
cs.namespace(|| "value"),
input.value
)?;
// Compute the nonce (for PRF inputs) which is false
// for the first input, and true for the second input.
let nonce = match i {
0 => false,
1 => true,
_ => unreachable!()
};
// Perform input note computations
input_notes.push(InputNote::compute(
cs.namespace(|| "note"),
input.a_sk,
input.rho,
input.r,
&value,
&h_sig,
nonce,
input.auth_path,
&rt
)?);
// Add the note value to the left hand side of
// the balance equation
lhs = lhs + &value.lc();
}
// Rebind lhs so that it isn't mutable anymore
let lhs = lhs;
// See zcash/zcash/issues/854
{
// Expected sum of the left hand side of the balance
// equation, expressed as a 64-bit unsigned integer
let lhs_total = NoteValue::new(
cs.namespace(|| "total value of left hand side"),
lhs_total
)?;
// Enforce that the left hand side can be expressed as a 64-bit
// integer
cs.enforce(
|| "left hand side can be expressed as a 64-bit unsigned integer",
|_| lhs.clone(),
|lc| lc + CS::one(),
|_| lhs_total.lc()
);
}
let mut output_notes = vec![];
// Iterate over the JoinSplit outputs
for (i, output) in self.outputs.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("output {}", i));
let value = NoteValue::new(
cs.namespace(|| "value"),
output.value
)?;
// Compute the nonce (for PRF inputs) which is false
// for the first output, and true for the second output.
let nonce = match i {
0 => false,
1 => true,
_ => unreachable!()
};
// Perform output note computations
output_notes.push(OutputNote::compute(
cs.namespace(|| "note"),
output.a_pk,
&value,
output.r,
&phi,
&h_sig,
nonce
)?);
// Add the note value to the right hand side of
// the balance equation
rhs = rhs + &value.lc();
}
// Enforce that balance is equal
cs.enforce(
|| "balance equation",
|_| lhs.clone(),
|lc| lc + CS::one(),
|_| rhs
);
let mut public_inputs = vec![];
public_inputs.extend(rt);
public_inputs.extend(h_sig);
for note in input_notes {
public_inputs.extend(note.nf);
public_inputs.extend(note.mac);
}
for note in output_notes {
public_inputs.extend(note.cm);
}
public_inputs.extend(vpub_old.bits_le());
public_inputs.extend(vpub_new.bits_le());
pack_into_inputs(cs.namespace(|| "input packing"), &public_inputs)
}
}
pub struct NoteValue {
value: Option<u64>,
// Least significant digit first
bits: Vec<AllocatedBit>
}
impl NoteValue {
fn new<E, CS>(
mut cs: CS,
value: Option<u64>
) -> Result<NoteValue, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
let mut values;
match value {
Some(mut val) => {
values = vec![];
for _ in 0..64 {
values.push(Some(val & 1 == 1));
val >>= 1;
}
},
None => {
values = vec![None; 64];
}
}
let mut bits = vec![];
for (i, value) in values.into_iter().enumerate() {
bits.push(
AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
value
)?
);
}
Ok(NoteValue {
value: value,
bits: bits
})
}
/// Encodes the bits of the value into little-endian
/// byte order.
fn bits_le(&self) -> Vec<Boolean> {
self.bits.chunks(8)
.flat_map(|v| v.iter().rev())
.cloned()
.map(|e| Boolean::from(e))
.collect()
}
/// Computes this value as a linear combination of
/// its bits.
fn lc<E: Engine>(&self) -> LinearCombination<E> {
let mut tmp = LinearCombination::zero();
let mut coeff = E::Fr::one();
for b in &self.bits {
tmp = tmp + (coeff, b.get_variable());
coeff.double();
}
tmp
}
fn get_value(&self) -> Option<u64> {
self.value
}
}
/// Witnesses some bytes in the constraint system,
/// skipping the first `skip_bits`.
fn witness_bits<E, CS>(
mut cs: CS,
value: Option<&[u8]>,
num_bits: usize,
skip_bits: usize
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
let bit_values = if let Some(value) = value {
let mut tmp = vec![];
for b in value.iter()
.flat_map(|&m| (0..8).rev().map(move |i| m >> i & 1 == 1))
.skip(skip_bits)
{
tmp.push(Some(b));
}
tmp
} else {
vec![None; num_bits]
};
assert_eq!(bit_values.len(), num_bits);
let mut bits = vec![];
for (i, value) in bit_values.into_iter().enumerate() {
bits.push(Boolean::from(AllocatedBit::alloc(
cs.namespace(|| format!("bit {}", i)),
value
)?));
}
Ok(bits)
}
fn witness_u256<E, CS>(
cs: CS,
value: Option<&[u8]>,
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
witness_bits(cs, value, 256, 0)
}
fn witness_u252<E, CS>(
cs: CS,
value: Option<&[u8]>,
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
witness_bits(cs, value, 252, 4)
}
#[test]
fn test_sprout_constraints() {
use pairing::bls12_381::{Bls12};
use ::circuit::test::*;
use byteorder::{WriteBytesExt, ReadBytesExt, LittleEndian};
let test_vector = include_bytes!("test_vectors.dat");
let mut test_vector = &test_vector[..];
fn get_u256<R: ReadBytesExt>(mut reader: R) -> [u8; 32] {
let mut result = [0u8; 32];
for i in 0..32 {
result[i] = reader.read_u8().unwrap();
}
result
}
while test_vector.len() != 0 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let phi = Some(get_u256(&mut test_vector));
let rt = Some(get_u256(&mut test_vector));
let h_sig = Some(get_u256(&mut test_vector));
let mut inputs = vec![];
for _ in 0..2 {
test_vector.read_u8().unwrap();
let mut auth_path = [None; TREE_DEPTH];
for i in (0..TREE_DEPTH).rev() {
test_vector.read_u8().unwrap();
let sibling = get_u256(&mut test_vector);
auth_path[i] = Some((sibling, false));
}
let mut position = test_vector.read_u64::<LittleEndian>().unwrap();
for i in 0..TREE_DEPTH {
auth_path[i].as_mut().map(|p| {
p.1 = (position & 1) == 1
});
position >>= 1;
}
// a_pk
let _ = Some(SpendingKey(get_u256(&mut test_vector)));
let value = Some(test_vector.read_u64::<LittleEndian>().unwrap());
let rho = Some(UniqueRandomness(get_u256(&mut test_vector)));
let r = Some(CommitmentRandomness(get_u256(&mut test_vector)));
let a_sk = Some(SpendingKey(get_u256(&mut test_vector)));
inputs.push(
JSInput {
value: value,
a_sk: a_sk,
rho: rho,
r: r,
auth_path: auth_path
}
);
}
let mut outputs = vec![];
for _ in 0..2 {
let a_pk = Some(PayingKey(get_u256(&mut test_vector)));
let value = Some(test_vector.read_u64::<LittleEndian>().unwrap());
get_u256(&mut test_vector);
let r = Some(CommitmentRandomness(get_u256(&mut test_vector)));
outputs.push(
JSOutput {
value: value,
a_pk: a_pk,
r: r
}
);
}
let vpub_old = Some(test_vector.read_u64::<LittleEndian>().unwrap());
let vpub_new = Some(test_vector.read_u64::<LittleEndian>().unwrap());
let nf1 = get_u256(&mut test_vector);
let nf2 = get_u256(&mut test_vector);
let cm1 = get_u256(&mut test_vector);
let cm2 = get_u256(&mut test_vector);
let mac1 = get_u256(&mut test_vector);
let mac2 = get_u256(&mut test_vector);
let js = JoinSplit {
vpub_old: vpub_old,
vpub_new: vpub_new,
h_sig: h_sig,
phi: phi,
inputs: inputs,
outputs: outputs,
rt: rt
};
js.synthesize(&mut cs).unwrap();
if let Some(s) = cs.which_is_unsatisfied() {
panic!("{:?}", s);
}
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1989085);
assert_eq!(cs.num_inputs(), 10);
assert_eq!(cs.hash(), "1a228d3c6377130d1778c7885811dc8b8864049cb5af8aff7e6cd46c5bc4b84c");
let mut expected_inputs = vec![];
expected_inputs.extend(rt.unwrap().to_vec());
expected_inputs.extend(h_sig.unwrap().to_vec());
expected_inputs.extend(nf1.to_vec());
expected_inputs.extend(mac1.to_vec());
expected_inputs.extend(nf2.to_vec());
expected_inputs.extend(mac2.to_vec());
expected_inputs.extend(cm1.to_vec());
expected_inputs.extend(cm2.to_vec());
expected_inputs.write_u64::<LittleEndian>(vpub_old.unwrap()).unwrap();
expected_inputs.write_u64::<LittleEndian>(vpub_new.unwrap()).unwrap();
use circuit::multipack;
let expected_inputs = multipack::bytes_to_bits(&expected_inputs);
let expected_inputs = multipack::compute_multipacking::<Bls12>(&expected_inputs);
assert!(cs.verify(&expected_inputs));
}
}

View File

@ -0,0 +1,54 @@
use pairing::{Engine};
use bellman::{ConstraintSystem, SynthesisError};
use circuit::boolean::{Boolean};
use super::*;
use super::prfs::*;
use super::commitment::note_comm;
pub struct OutputNote {
pub cm: Vec<Boolean>
}
impl OutputNote {
pub fn compute<'a, E, CS>(
mut cs: CS,
a_pk: Option<PayingKey>,
value: &NoteValue,
r: Option<CommitmentRandomness>,
phi: &[Boolean],
h_sig: &[Boolean],
nonce: bool
) -> Result<Self, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>,
{
let rho = prf_rho(
cs.namespace(|| "rho"),
phi,
h_sig,
nonce
)?;
let a_pk = witness_u256(
cs.namespace(|| "a_pk"),
a_pk.as_ref().map(|a_pk| &a_pk.0[..])
)?;
let r = witness_u256(
cs.namespace(|| "r"),
r.as_ref().map(|r| &r.0[..])
)?;
let cm = note_comm(
cs.namespace(|| "cm computation"),
&a_pk,
&value.bits_le(),
&rho,
&r
)?;
Ok(OutputNote {
cm: cm
})
}
}

View File

@ -0,0 +1,79 @@
use pairing::{Engine};
use bellman::{ConstraintSystem, SynthesisError};
use circuit::sha256::{
sha256_block_no_padding
};
use circuit::boolean::{
Boolean
};
fn prf<E, CS>(
cs: CS,
a: bool,
b: bool,
c: bool,
d: bool,
x: &[Boolean],
y: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
assert_eq!(x.len(), 252);
assert_eq!(y.len(), 256);
let mut image = vec![];
image.push(Boolean::constant(a));
image.push(Boolean::constant(b));
image.push(Boolean::constant(c));
image.push(Boolean::constant(d));
image.extend(x.iter().cloned());
image.extend(y.iter().cloned());
assert_eq!(image.len(), 512);
sha256_block_no_padding(
cs,
&image
)
}
pub fn prf_a_pk<E, CS>(
cs: CS,
a_sk: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
prf(cs, true, true, false, false, a_sk, &(0..256).map(|_| Boolean::constant(false)).collect::<Vec<_>>())
}
pub fn prf_nf<E, CS>(
cs: CS,
a_sk: &[Boolean],
rho: &[Boolean]
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
prf(cs, true, true, true, false, a_sk, rho)
}
pub fn prf_pk<E, CS>(
cs: CS,
a_sk: &[Boolean],
h_sig: &[Boolean],
nonce: bool
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
prf(cs, false, nonce, false, false, a_sk, h_sig)
}
pub fn prf_rho<E, CS>(
cs: CS,
phi: &[Boolean],
h_sig: &[Boolean],
nonce: bool
) -> Result<Vec<Boolean>, SynthesisError>
where E: Engine, CS: ConstraintSystem<E>
{
prf(cs, false, nonce, true, false, phi, h_sig)
}

Binary file not shown.

View File

@ -0,0 +1,492 @@
use pairing::{
Engine,
Field,
PrimeField,
PrimeFieldRepr
};
use bellman::{
LinearCombination,
SynthesisError,
ConstraintSystem,
Variable,
Index
};
use std::collections::HashMap;
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use blake2_rfc::blake2s::Blake2s;
#[derive(Debug)]
enum NamedObject {
Constraint(usize),
Var(Variable),
Namespace
}
/// Constraint system for testing purposes.
pub struct TestConstraintSystem<E: Engine> {
named_objects: HashMap<String, NamedObject>,
current_namespace: Vec<String>,
constraints: Vec<(
LinearCombination<E>,
LinearCombination<E>,
LinearCombination<E>,
String
)>,
inputs: Vec<(E::Fr, String)>,
aux: Vec<(E::Fr, String)>
}
#[derive(Clone, Copy)]
struct OrderedVariable(Variable);
impl Eq for OrderedVariable {}
impl PartialEq for OrderedVariable {
fn eq(&self, other: &OrderedVariable) -> bool {
match (self.0.get_unchecked(), other.0.get_unchecked()) {
(Index::Input(ref a), Index::Input(ref b)) => a == b,
(Index::Aux(ref a), Index::Aux(ref b)) => a == b,
_ => false
}
}
}
impl PartialOrd for OrderedVariable {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for OrderedVariable {
fn cmp(&self, other: &Self) -> Ordering {
match (self.0.get_unchecked(), other.0.get_unchecked()) {
(Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),
(Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),
(Index::Input(_), Index::Aux(_)) => Ordering::Less,
(Index::Aux(_), Index::Input(_)) => Ordering::Greater
}
}
}
fn proc_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
) -> BTreeMap<OrderedVariable, E::Fr>
{
let mut map = BTreeMap::new();
for &(var, coeff) in terms {
map.entry(OrderedVariable(var))
.or_insert(E::Fr::zero())
.add_assign(&coeff);
}
// Remove terms that have a zero coefficient to normalize
let mut to_remove = vec![];
for (var, coeff) in map.iter() {
if coeff.is_zero() {
to_remove.push(var.clone())
}
}
for var in to_remove {
map.remove(&var);
}
map
}
fn hash_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
h: &mut Blake2s
)
{
let map = proc_lc::<E>(terms);
let mut buf = [0u8; 9 + 32];
BigEndian::write_u64(&mut buf[0..8], map.len() as u64);
h.update(&buf[0..8]);
for (var, coeff) in map {
match var.0.get_unchecked() {
Index::Input(i) => {
buf[0] = b'I';
BigEndian::write_u64(&mut buf[1..9], i as u64);
},
Index::Aux(i) => {
buf[0] = b'A';
BigEndian::write_u64(&mut buf[1..9], i as u64);
}
}
coeff.into_repr().write_be(&mut buf[9..]).unwrap();
h.update(&buf);
}
}
fn eval_lc<E: Engine>(
terms: &[(Variable, E::Fr)],
inputs: &[(E::Fr, String)],
aux: &[(E::Fr, String)]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for &(var, ref coeff) in terms {
let mut tmp = match var.get_unchecked() {
Index::Input(index) => inputs[index].0,
Index::Aux(index) => aux[index].0
};
tmp.mul_assign(&coeff);
acc.add_assign(&tmp);
}
acc
}
impl<E: Engine> TestConstraintSystem<E> {
pub fn new() -> TestConstraintSystem<E> {
let mut map = HashMap::new();
map.insert("ONE".into(), NamedObject::Var(TestConstraintSystem::<E>::one()));
TestConstraintSystem {
named_objects: map,
current_namespace: vec![],
constraints: vec![],
inputs: vec![(E::Fr::one(), "ONE".into())],
aux: vec![]
}
}
pub fn pretty_print(&self) -> String {
let mut s = String::new();
let negone = {
let mut tmp = E::Fr::one();
tmp.negate();
tmp
};
let powers_of_two = (0..E::Fr::NUM_BITS).map(|i| {
E::Fr::from_str("2").unwrap().pow(&[i as u64])
}).collect::<Vec<_>>();
let pp = |s: &mut String, lc: &LinearCombination<E>| {
write!(s, "(").unwrap();
let mut is_first = true;
for (var, coeff) in proc_lc::<E>(lc.as_ref()) {
if coeff == negone {
write!(s, " - ").unwrap();
} else if !is_first {
write!(s, " + ").unwrap();
}
is_first = false;
if coeff != E::Fr::one() && coeff != negone {
for (i, x) in powers_of_two.iter().enumerate() {
if x == &coeff {
write!(s, "2^{} . ", i).unwrap();
break;
}
}
write!(s, "{} . ", coeff).unwrap();
}
match var.0.get_unchecked() {
Index::Input(i) => {
write!(s, "`{}`", &self.inputs[i].1).unwrap();
},
Index::Aux(i) => {
write!(s, "`{}`", &self.aux[i].1).unwrap();
}
}
}
if is_first {
// Nothing was visited, print 0.
write!(s, "0").unwrap();
}
write!(s, ")").unwrap();
};
for &(ref a, ref b, ref c, ref name) in &self.constraints {
write!(&mut s, "\n").unwrap();
write!(&mut s, "{}: ", name).unwrap();
pp(&mut s, a);
write!(&mut s, " * ").unwrap();
pp(&mut s, b);
write!(&mut s, " = ").unwrap();
pp(&mut s, c);
}
write!(&mut s, "\n").unwrap();
s
}
pub fn hash(&self) -> String {
let mut h = Blake2s::new(32);
{
let mut buf = [0u8; 24];
BigEndian::write_u64(&mut buf[0..8], self.inputs.len() as u64);
BigEndian::write_u64(&mut buf[8..16], self.aux.len() as u64);
BigEndian::write_u64(&mut buf[16..24], self.constraints.len() as u64);
h.update(&buf);
}
for constraint in &self.constraints {
hash_lc::<E>(constraint.0.as_ref(), &mut h);
hash_lc::<E>(constraint.1.as_ref(), &mut h);
hash_lc::<E>(constraint.2.as_ref(), &mut h);
}
let mut s = String::new();
for b in h.finalize().as_ref() {
s += &format!("{:02x}", b);
}
s
}
pub fn which_is_unsatisfied(&self) -> Option<&str> {
for &(ref a, ref b, ref c, ref path) in &self.constraints {
let mut a = eval_lc::<E>(a.as_ref(), &self.inputs, &self.aux);
let b = eval_lc::<E>(b.as_ref(), &self.inputs, &self.aux);
let c = eval_lc::<E>(c.as_ref(), &self.inputs, &self.aux);
a.mul_assign(&b);
if a != c {
return Some(&*path)
}
}
None
}
pub fn is_satisfied(&self) -> bool
{
self.which_is_unsatisfied().is_none()
}
pub fn num_constraints(&self) -> usize
{
self.constraints.len()
}
pub fn set(&mut self, path: &str, to: E::Fr)
{
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => {
match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0 = to,
Index::Aux(index) => self.aux[index].0 = to
}
}
Some(e) => panic!("tried to set path `{}` to value, but `{:?}` already exists there.", path, e),
_ => panic!("no variable exists at path: {}", path)
}
}
pub fn verify(&self, expected: &[E::Fr]) -> bool
{
assert_eq!(expected.len() + 1, self.inputs.len());
for (a, b) in self.inputs.iter().skip(1).zip(expected.iter())
{
if &a.0 != b {
return false
}
}
return true;
}
pub fn num_inputs(&self) -> usize {
self.inputs.len()
}
pub fn get_input(&mut self, index: usize, path: &str) -> E::Fr
{
let (assignment, name) = self.inputs[index].clone();
assert_eq!(path, name);
assignment
}
pub fn get(&mut self, path: &str) -> E::Fr
{
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => {
match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0,
Index::Aux(index) => self.aux[index].0
}
}
Some(e) => panic!("tried to get value of path `{}`, but `{:?}` exists there (not a variable)", path, e),
_ => panic!("no variable exists at path: {}", path)
}
}
fn set_named_obj(&mut self, path: String, to: NamedObject) {
if self.named_objects.contains_key(&path) {
panic!("tried to create object at existing path: {}", path);
}
self.named_objects.insert(path, to);
}
}
fn compute_path(ns: &[String], this: String) -> String {
if this.chars().any(|a| a == '/') {
panic!("'/' is not allowed in names");
}
let mut name = String::new();
let mut needs_separation = false;
for ns in ns.iter().chain(Some(&this).into_iter())
{
if needs_separation {
name += "/";
}
name += ns;
needs_separation = true;
}
name
}
impl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
let index = self.aux.len();
let path = compute_path(&self.current_namespace, annotation().into());
self.aux.push((f()?, path.clone()));
let var = Variable::new_unchecked(Index::Aux(index));
self.set_named_obj(path, NamedObject::Var(var));
Ok(var)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
let index = self.inputs.len();
let path = compute_path(&self.current_namespace, annotation().into());
self.inputs.push((f()?, path.clone()));
let var = Variable::new_unchecked(Index::Input(index));
self.set_named_obj(path, NamedObject::Var(var));
Ok(var)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
let path = compute_path(&self.current_namespace, annotation().into());
let index = self.constraints.len();
self.set_named_obj(path.clone(), NamedObject::Constraint(index));
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.constraints.push((a, b, c, path));
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
{
let name = name_fn().into();
let path = compute_path(&self.current_namespace, name.clone());
self.set_named_obj(path.clone(), NamedObject::Namespace);
self.current_namespace.push(name);
}
fn pop_namespace(&mut self)
{
assert!(self.current_namespace.pop().is_some());
}
fn get_root(&mut self) -> &mut Self::Root
{
self
}
}
#[test]
fn test_cs() {
use pairing::bls12_381::{Bls12, Fr};
use pairing::PrimeField;
let mut cs = TestConstraintSystem::<Bls12>::new();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
let a = cs.namespace(|| "a").alloc(|| "var", || Ok(Fr::from_str("10").unwrap())).unwrap();
let b = cs.namespace(|| "b").alloc(|| "var", || Ok(Fr::from_str("4").unwrap())).unwrap();
let c = cs.alloc(|| "product", || Ok(Fr::from_str("40").unwrap())).unwrap();
cs.enforce(
|| "mult",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1);
cs.set("a/var", Fr::from_str("4").unwrap());
let one = TestConstraintSystem::<Bls12>::one();
cs.enforce(
|| "eq",
|lc| lc + a,
|lc| lc + one,
|lc| lc + b
);
assert!(!cs.is_satisfied());
assert!(cs.which_is_unsatisfied() == Some("mult"));
assert!(cs.get("product") == Fr::from_str("40").unwrap());
cs.set("product", Fr::from_str("16").unwrap());
assert!(cs.is_satisfied());
{
let mut cs = cs.namespace(|| "test1");
let mut cs = cs.namespace(|| "test2");
cs.alloc(|| "hehe", || Ok(Fr::one())).unwrap();
}
assert!(cs.get("test1/test2/hehe") == Fr::one());
}

View File

@ -0,0 +1,755 @@
use pairing::{
Engine,
Field,
PrimeField
};
use bellman::{
SynthesisError,
ConstraintSystem,
LinearCombination
};
use super::boolean::{
Boolean,
AllocatedBit
};
use super::multieq::MultiEq;
/// Represents an interpretation of 32 `Boolean` objects as an
/// unsigned integer.
#[derive(Clone)]
pub struct UInt32 {
// Least significant bit first
bits: Vec<Boolean>,
value: Option<u32>
}
impl UInt32 {
/// Construct a constant `UInt32` from a `u32`
pub fn constant(value: u32) -> Self
{
let mut bits = Vec::with_capacity(32);
let mut tmp = value;
for _ in 0..32 {
if tmp & 1 == 1 {
bits.push(Boolean::constant(true))
} else {
bits.push(Boolean::constant(false))
}
tmp >>= 1;
}
UInt32 {
bits: bits,
value: Some(value)
}
}
/// Allocate a `UInt32` in the constraint system
pub fn alloc<E, CS>(
mut cs: CS,
value: Option<u32>
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
{
let values = match value {
Some(mut val) => {
let mut v = Vec::with_capacity(32);
for _ in 0..32 {
v.push(Some(val & 1 == 1));
val >>= 1;
}
v
},
None => vec![None; 32]
};
let bits = values.into_iter()
.enumerate()
.map(|(i, v)| {
Ok(Boolean::from(AllocatedBit::alloc(
cs.namespace(|| format!("allocated bit {}", i)),
v
)?))
})
.collect::<Result<Vec<_>, SynthesisError>>()?;
Ok(UInt32 {
bits: bits,
value: value
})
}
pub fn into_bits_be(&self) -> Vec<Boolean> {
self.bits.iter().rev().cloned().collect()
}
pub fn from_bits_be(bits: &[Boolean]) -> Self {
assert_eq!(bits.len(), 32);
let mut value = Some(0u32);
for b in bits {
value.as_mut().map(|v| *v <<= 1);
match b.get_value() {
Some(true) => { value.as_mut().map(|v| *v |= 1); },
Some(false) => {},
None => { value = None; }
}
}
UInt32 {
value: value,
bits: bits.iter().rev().cloned().collect()
}
}
/// Turns this `UInt32` into its little-endian byte order representation.
pub fn into_bits(&self) -> Vec<Boolean> {
self.bits.clone()
}
/// Converts a little-endian byte order representation of bits into a
/// `UInt32`.
pub fn from_bits(bits: &[Boolean]) -> Self
{
assert_eq!(bits.len(), 32);
let new_bits = bits.to_vec();
let mut value = Some(0u32);
for b in new_bits.iter().rev() {
value.as_mut().map(|v| *v <<= 1);
match b {
&Boolean::Constant(b) => {
if b {
value.as_mut().map(|v| *v |= 1);
}
},
&Boolean::Is(ref b) => {
match b.get_value() {
Some(true) => { value.as_mut().map(|v| *v |= 1); },
Some(false) => {},
None => { value = None }
}
},
&Boolean::Not(ref b) => {
match b.get_value() {
Some(false) => { value.as_mut().map(|v| *v |= 1); },
Some(true) => {},
None => { value = None }
}
}
}
}
UInt32 {
value: value,
bits: new_bits
}
}
pub fn rotr(&self, by: usize) -> Self {
let by = by % 32;
let new_bits = self.bits.iter()
.skip(by)
.chain(self.bits.iter())
.take(32)
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v.rotate_right(by as u32))
}
}
pub fn shr(&self, by: usize) -> Self {
let by = by % 32;
let fill = Boolean::constant(false);
let new_bits = self.bits
.iter() // The bits are least significant first
.skip(by) // Skip the bits that will be lost during the shift
.chain(Some(&fill).into_iter().cycle()) // Rest will be zeros
.take(32) // Only 32 bits needed!
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v >> by as u32)
}
}
fn triop<E, CS, F, U>(
mut cs: CS,
a: &Self,
b: &Self,
c: &Self,
tri_fn: F,
circuit_fn: U
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
F: Fn(u32, u32, u32) -> u32,
U: Fn(&mut CS, usize, &Boolean, &Boolean, &Boolean) -> Result<Boolean, SynthesisError>
{
let new_value = match (a.value, b.value, c.value) {
(Some(a), Some(b), Some(c)) => {
Some(tri_fn(a, b, c))
},
_ => None
};
let bits = a.bits.iter()
.zip(b.bits.iter())
.zip(c.bits.iter())
.enumerate()
.map(|(i, ((a, b), c))| circuit_fn(&mut cs, i, a, b, c))
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits: bits,
value: new_value
})
}
/// Compute the `maj` value (a and b) xor (a and c) xor (b and c)
/// during SHA256.
pub fn sha256_maj<E, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
{
Self::triop(cs, a, b, c, |a, b, c| (a & b) ^ (a & c) ^ (b & c),
|cs, i, a, b, c| {
Boolean::sha256_maj(
cs.namespace(|| format!("maj {}", i)),
a,
b,
c
)
}
)
}
/// Compute the `ch` value `(a and b) xor ((not a) and c)`
/// during SHA256.
pub fn sha256_ch<E, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
{
Self::triop(cs, a, b, c, |a, b, c| (a & b) ^ ((!a) & c),
|cs, i, a, b, c| {
Boolean::sha256_ch(
cs.namespace(|| format!("ch {}", i)),
a,
b,
c
)
}
)
}
/// XOR this `UInt32` with another `UInt32`
pub fn xor<E, CS>(
&self,
mut cs: CS,
other: &Self
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>
{
let new_value = match (self.value, other.value) {
(Some(a), Some(b)) => {
Some(a ^ b)
},
_ => None
};
let bits = self.bits.iter()
.zip(other.bits.iter())
.enumerate()
.map(|(i, (a, b))| {
Boolean::xor(
cs.namespace(|| format!("xor of bit {}", i)),
a,
b
)
})
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits: bits,
value: new_value
})
}
/// Perform modular addition of several `UInt32` objects.
pub fn addmany<E, CS, M>(
mut cs: M,
operands: &[Self]
) -> Result<Self, SynthesisError>
where E: Engine,
CS: ConstraintSystem<E>,
M: ConstraintSystem<E, Root=MultiEq<E, CS>>
{
// Make some arbitrary bounds for ourselves to avoid overflows
// in the scalar field
assert!(E::Fr::NUM_BITS >= 64);
assert!(operands.len() >= 2); // Weird trivial cases that should never happen
assert!(operands.len() <= 10);
// Compute the maximum value of the sum so we allocate enough bits for
// the result
let mut max_value = (operands.len() as u64) * (u32::max_value() as u64);
// Keep track of the resulting value
let mut result_value = Some(0u64);
// This is a linear combination that we will enforce to equal the
// output
let mut lc = LinearCombination::zero();
let mut all_constants = true;
// Iterate over the operands
for op in operands {
// Accumulate the value
match op.value {
Some(val) => {
result_value.as_mut().map(|v| *v += val as u64);
},
None => {
// If any of our operands have unknown value, we won't
// know the value of the result
result_value = None;
}
}
// Iterate over each bit of the operand and add the operand to
// the linear combination
let mut coeff = E::Fr::one();
for bit in &op.bits {
lc = lc + &bit.lc(CS::one(), coeff);
all_constants &= bit.is_constant();
coeff.double();
}
}
// The value of the actual result is modulo 2^32
let modular_value = result_value.map(|v| v as u32);
if all_constants && modular_value.is_some() {
// We can just return a constant, rather than
// unpacking the result into allocated bits.
return Ok(UInt32::constant(modular_value.unwrap()));
}
// Storage area for the resulting bits
let mut result_bits = vec![];
// Linear combination representing the output,
// for comparison with the sum of the operands
let mut result_lc = LinearCombination::zero();
// Allocate each bit of the result
let mut coeff = E::Fr::one();
let mut i = 0;
while max_value != 0 {
// Allocate the bit
let b = AllocatedBit::alloc(
cs.namespace(|| format!("result bit {}", i)),
result_value.map(|v| (v >> i) & 1 == 1)
)?;
// Add this bit to the result combination
result_lc = result_lc + (coeff, b.get_variable());
result_bits.push(b.into());
max_value >>= 1;
i += 1;
coeff.double();
}
// Enforce equality between the sum and result
cs.get_root().enforce_equal(i, &lc, &result_lc);
// Discard carry bits that we don't care about
result_bits.truncate(32);
Ok(UInt32 {
bits: result_bits,
value: modular_value
})
}
}
#[cfg(test)]
mod test {
use rand::{XorShiftRng, SeedableRng, Rng};
use ::circuit::boolean::{Boolean};
use super::{UInt32};
use pairing::bls12_381::{Bls12};
use pairing::{Field};
use ::circuit::test::*;
use bellman::{ConstraintSystem};
use circuit::multieq::MultiEq;
#[test]
fn test_uint32_from_bits_be() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
for _ in 0..1000 {
let mut v = (0..32).map(|_| Boolean::constant(rng.gen())).collect::<Vec<_>>();
let b = UInt32::from_bits_be(&v);
for (i, bit) in b.bits.iter().enumerate() {
match bit {
&Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
},
_ => unreachable!()
}
}
let expected_to_be_same = b.into_bits_be();
for x in v.iter().zip(expected_to_be_same.iter())
{
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {},
(&Boolean::Constant(false), &Boolean::Constant(false)) => {},
_ => unreachable!()
}
}
}
}
#[test]
fn test_uint32_from_bits() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
for _ in 0..1000 {
let mut v = (0..32).map(|_| Boolean::constant(rng.gen())).collect::<Vec<_>>();
let b = UInt32::from_bits(&v);
for (i, bit) in b.bits.iter().enumerate() {
match bit {
&Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
},
_ => unreachable!()
}
}
let expected_to_be_same = b.into_bits();
for x in v.iter().zip(expected_to_be_same.iter())
{
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {},
(&Boolean::Constant(false), &Boolean::Constant(false)) => {},
_ => unreachable!()
}
}
}
}
#[test]
fn test_uint32_xor() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let mut expected = a ^ b ^ c;
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = a_bit.xor(cs.namespace(|| "first xor"), &b_bit).unwrap();
let r = r.xor(cs.namespace(|| "second xor"), &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
fn test_uint32_addmany_constants() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let a_bit = UInt32::constant(a);
let b_bit = UInt32::constant(b);
let c_bit = UInt32::constant(c);
let mut expected = a.wrapping_add(b).wrapping_add(c);
let r = {
let mut cs = MultiEq::new(&mut cs);
let r = UInt32::addmany(cs.namespace(|| "addition"), &[a_bit, b_bit, c_bit]).unwrap();
r
};
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
&Boolean::Is(_) => panic!(),
&Boolean::Not(_) => panic!(),
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
fn test_uint32_addmany() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let d: u32 = rng.gen();
let mut expected = (a ^ b).wrapping_add(c).wrapping_add(d);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::constant(c);
let d_bit = UInt32::alloc(cs.namespace(|| "d_bit"), Some(d)).unwrap();
let r = a_bit.xor(cs.namespace(|| "xor"), &b_bit).unwrap();
let r = {
let mut cs = MultiEq::new(&mut cs);
let r = UInt32::addmany(cs.namespace(|| "addition"), &[r, c_bit, d_bit]).unwrap();
r
};
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Constant(_) => {
unreachable!()
}
}
expected >>= 1;
}
// Flip a bit and see if the addition constraint still works
if cs.get("addition/result bit 0/boolean").is_zero() {
cs.set("addition/result bit 0/boolean", Field::one());
} else {
cs.set("addition/result bit 0/boolean", Field::zero());
}
assert!(!cs.is_satisfied());
}
}
#[test]
fn test_uint32_rotr() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut num = rng.gen();
let a = UInt32::constant(num);
for i in 0..32 {
let b = a.rotr(i);
assert_eq!(a.bits.len(), b.bits.len());
assert!(b.value.unwrap() == num);
let mut tmp = num;
for b in &b.bits {
match b {
&Boolean::Constant(b) => {
assert_eq!(b, tmp & 1 == 1);
},
_ => unreachable!()
}
tmp >>= 1;
}
num = num.rotate_right(1);
}
}
#[test]
fn test_uint32_shr() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..50 {
for i in 0..60 {
let num = rng.gen();
let a = UInt32::constant(num).shr(i);
let b = UInt32::constant(num >> i);
assert_eq!(a.value.unwrap(), num >> i);
assert_eq!(a.bits.len(), b.bits.len());
for (a, b) in a.bits.iter().zip(b.bits.iter()) {
assert_eq!(a.get_value().unwrap(), b.get_value().unwrap());
}
}
}
}
#[test]
fn test_uint32_sha256_maj() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let mut expected = (a & b) ^ (a & c) ^ (b & c);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = UInt32::sha256_maj(&mut cs, &a_bit, &b_bit, &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
fn test_uint32_sha256_ch() {
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0653]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Bls12>::new();
let a: u32 = rng.gen();
let b: u32 = rng.gen();
let c: u32 = rng.gen();
let mut expected = (a & b) ^ ((!a) & c);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = UInt32::sha256_ch(&mut cs, &a_bit, &b_bit, &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
&Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Not(ref b) => {
assert!(!b.get_value().unwrap() == (expected & 1 == 1));
},
&Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
}

View File

@ -0,0 +1,40 @@
/// First 64 bytes of the BLAKE2s input during group hash.
/// This is chosen to be some random string that we couldn't have anticipated when we designed
/// the algorithm, for rigidity purposes.
/// We deliberately use an ASCII hex string of 32 bytes here.
pub const GH_FIRST_BLOCK: &'static [u8; 64]
= b"096b36a5804bfacef1691e173c366a47ff5ba84a44f26ddd7e8d9f79d5b42df0";
// BLAKE2s invocation personalizations
/// BLAKE2s Personalization for CRH^ivk = BLAKE2s(ak | nk)
pub const CRH_IVK_PERSONALIZATION: &'static [u8; 8]
= b"Zcashivk";
/// BLAKE2s Personalization for PRF^nf = BLAKE2s(nk | rho)
pub const PRF_NF_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_nf";
// Group hash personalizations
/// BLAKE2s Personalization for Pedersen hash generators.
pub const PEDERSEN_HASH_GENERATORS_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_PH";
/// BLAKE2s Personalization for the group hash for key diversification
pub const KEY_DIVERSIFICATION_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_gd";
/// BLAKE2s Personalization for the spending key base point
pub const SPENDING_KEY_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_G_";
/// BLAKE2s Personalization for the proof generation key base point
pub const PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_H_";
/// BLAKE2s Personalization for the value commitment generator for the value
pub const VALUE_COMMITMENT_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_cv";
/// BLAKE2s Personalization for the nullifier position generator (for computing rho)
pub const NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION: &'static [u8; 8]
= b"Zcash_J_";

View File

@ -0,0 +1,46 @@
use jubjub::{
JubjubEngine,
PrimeOrder,
edwards
};
use pairing::{
PrimeField
};
use blake2_rfc::blake2s::Blake2s;
use constants;
/// Produces a random point in the Jubjub curve.
/// The point is guaranteed to be prime order
/// and not the identity.
pub fn group_hash<E: JubjubEngine>(
tag: &[u8],
personalization: &[u8],
params: &E::Params
) -> Option<edwards::Point<E, PrimeOrder>>
{
assert_eq!(personalization.len(), 8);
// Check to see that scalar field is 255 bits
assert!(E::Fr::NUM_BITS == 255);
let mut h = Blake2s::with_params(32, &[], &[], personalization);
h.update(constants::GH_FIRST_BLOCK);
h.update(tag);
let h = h.finalize().as_ref().to_vec();
assert!(h.len() == 32);
match edwards::Point::<E, _>::read(&h[..], params) {
Ok(p) => {
let p = p.mul_by_cofactor(params);
if p != edwards::Point::zero() {
Some(p)
} else {
None
}
},
Err(_) => None
}
}

View File

@ -0,0 +1,523 @@
use pairing::{
Field,
SqrtField,
PrimeField,
PrimeFieldRepr,
BitIterator
};
use super::{
JubjubEngine,
JubjubParams,
Unknown,
PrimeOrder,
montgomery
};
use rand::{
Rng
};
use std::marker::PhantomData;
use std::io::{
self,
Write,
Read
};
// Represents the affine point (X/Z, Y/Z) via the extended
// twisted Edwards coordinates.
//
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
pub struct Point<E: JubjubEngine, Subgroup> {
x: E::Fr,
y: E::Fr,
t: E::Fr,
z: E::Fr,
_marker: PhantomData<Subgroup>
}
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2>
{
Point {
x: from.x,
y: from.y,
t: from.t,
z: from.z,
_marker: PhantomData
}
}
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown>
{
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown>
{
convert_subgroup(&p)
}
}
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup>
{
fn clone(&self) -> Self {
convert_subgroup(self)
}
}
impl<E: JubjubEngine, Subgroup> PartialEq for Point<E, Subgroup> {
fn eq(&self, other: &Point<E, Subgroup>) -> bool {
// p1 = (x1/z1, y1/z1)
// p2 = (x2/z2, y2/z2)
// Deciding that these two points are equal is a matter of
// determining that x1/z1 = x2/z2, or equivalently that
// x1*z2 = x2*z1, and similarly for y.
let mut x1 = self.x;
x1.mul_assign(&other.z);
let mut y1 = self.y;
y1.mul_assign(&other.z);
let mut x2 = other.x;
x2.mul_assign(&self.z);
let mut y2 = other.y;
y2.mul_assign(&self.z);
x1 == x2 && y1 == y2
}
}
impl<E: JubjubEngine> Point<E, Unknown> {
pub fn read<R: Read>(
reader: R,
params: &E::Params
) -> io::Result<Self>
{
let mut y_repr = <E::Fr as PrimeField>::Repr::default();
y_repr.read_le(reader)?;
let x_sign = (y_repr.as_ref()[3] >> 63) == 1;
y_repr.as_mut()[3] &= 0x7fffffffffffffff;
match E::Fr::from_repr(y_repr) {
Ok(y) => {
match Self::get_for_y(y, x_sign, params) {
Some(p) => Ok(p),
None => {
Err(io::Error::new(io::ErrorKind::InvalidInput, "not on curve"))
}
}
},
Err(_) => {
Err(io::Error::new(io::ErrorKind::InvalidInput, "y is not in field"))
}
}
}
pub fn get_for_y(y: E::Fr, sign: bool, params: &E::Params) -> Option<Self>
{
// Given a y on the curve, x^2 = (y^2 - 1) / (dy^2 + 1)
// This is defined for all valid y-coordinates,
// as dy^2 + 1 = 0 has no solution in Fr.
// tmp1 = y^2
let mut tmp1 = y;
tmp1.square();
// tmp2 = (y^2 * d) + 1
let mut tmp2 = tmp1;
tmp2.mul_assign(params.edwards_d());
tmp2.add_assign(&E::Fr::one());
// tmp1 = y^2 - 1
tmp1.sub_assign(&E::Fr::one());
match tmp2.inverse() {
Some(tmp2) => {
// tmp1 = (y^2 - 1) / (dy^2 + 1)
tmp1.mul_assign(&tmp2);
match tmp1.sqrt() {
Some(mut x) => {
if x.into_repr().is_odd() != sign {
x.negate();
}
let mut t = x;
t.mul_assign(&y);
Some(Point {
x: x,
y: y,
t: t,
z: E::Fr::one(),
_marker: PhantomData
})
},
None => None
}
},
None => None
}
}
/// This guarantees the point is in the prime order subgroup
#[must_use]
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder>
{
let tmp = self.double(params)
.double(params)
.double(params);
convert_subgroup(&tmp)
}
pub fn rand<R: Rng>(rng: &mut R, params: &E::Params) -> Self
{
loop {
let y: E::Fr = rng.gen();
if let Some(p) = Self::get_for_y(y, rng.gen(), params) {
return p;
}
}
}
}
impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
pub fn write<W: Write>(
&self,
writer: W
) -> io::Result<()>
{
let (x, y) = self.into_xy();
assert_eq!(E::Fr::NUM_BITS, 255);
let x_repr = x.into_repr();
let mut y_repr = y.into_repr();
if x_repr.is_odd() {
y_repr.as_mut()[3] |= 0x8000000000000000u64;
}
y_repr.write_le(writer)
}
/// Convert from a Montgomery point
pub fn from_montgomery(
m: &montgomery::Point<E, Subgroup>,
params: &E::Params
) -> Self
{
match m.into_xy() {
None => {
// Map the point at infinity to the neutral element.
Point::zero()
},
Some((x, y)) => {
// The map from a Montgomery curve is defined as:
// (x, y) -> (u, v) where
// u = x / y
// v = (x - 1) / (x + 1)
//
// This map is not defined for y = 0 and x = -1.
//
// y = 0 is a valid point only for x = 0:
// y^2 = x^3 + A.x^2 + x
// 0 = x^3 + A.x^2 + x
// 0 = x(x^2 + A.x + 1)
// We have: x = 0 OR x^2 + A.x + 1 = 0
// x^2 + A.x + 1 = 0
// (2.x + A)^2 = A^2 - 4 (Complete the square.)
// The left hand side is a square, and so if A^2 - 4
// is nonsquare, there is no solution. Indeed, A^2 - 4
// is nonsquare.
//
// (0, 0) is a point of order 2, and so we map it to
// (0, -1) in the twisted Edwards curve, which is the
// only point of order 2 that is not the neutral element.
if y.is_zero() {
// This must be the point (0, 0) as above.
let mut neg1 = E::Fr::one();
neg1.negate();
Point {
x: E::Fr::zero(),
y: neg1,
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData
}
} else {
// Otherwise, as stated above, the mapping is still
// not defined at x = -1. However, x = -1 is not
// on the curve when A - 2 is nonsquare:
// y^2 = x^3 + A.x^2 + x
// y^2 = (-1) + A + (-1)
// y^2 = A - 2
// Indeed, A - 2 is nonsquare.
//
// We need to map into (projective) extended twisted
// Edwards coordinates (X, Y, T, Z) which represents
// the point (X/Z, Y/Z) with Z nonzero and T = XY/Z.
//
// Thus, we compute...
//
// u = x(x + 1)
// v = y(x - 1)
// t = x(x - 1)
// z = y(x + 1) (Cannot be nonzero, as above.)
//
// ... which represents the point ( x / y , (x - 1) / (x + 1) )
// as required by the mapping and preserves the property of
// the auxiliary coordinate t.
//
// We need to scale the coordinate, so u and t will have
// an extra factor s.
// u = xs
let mut u = x;
u.mul_assign(params.scale());
// v = x - 1
let mut v = x;
v.sub_assign(&E::Fr::one());
// t = xs(x - 1)
let mut t = u;
t.mul_assign(&v);
// z = (x + 1)
let mut z = x;
z.add_assign(&E::Fr::one());
// u = xs(x + 1)
u.mul_assign(&z);
// z = y(x + 1)
z.mul_assign(&y);
// v = y(x - 1)
v.mul_assign(&y);
Point {
x: u,
y: v,
t: t,
z: z,
_marker: PhantomData
}
}
}
}
}
/// Attempts to cast this as a prime order element, failing if it's
/// not in the prime order subgroup.
pub fn as_prime_order(&self, params: &E::Params) -> Option<Point<E, PrimeOrder>> {
if self.mul(E::Fs::char(), params) == Point::zero() {
Some(convert_subgroup(self))
} else {
None
}
}
pub fn zero() -> Self {
Point {
x: E::Fr::zero(),
y: E::Fr::one(),
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData
}
}
pub fn into_xy(&self) -> (E::Fr, E::Fr)
{
let zinv = self.z.inverse().unwrap();
let mut x = self.x;
x.mul_assign(&zinv);
let mut y = self.y;
y.mul_assign(&zinv);
(x, y)
}
#[must_use]
pub fn negate(&self) -> Self {
let mut p = self.clone();
p.x.negate();
p.t.negate();
p
}
#[must_use]
pub fn double(&self, _: &E::Params) -> Self {
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
// Section 3.3
// http://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html#doubling-dbl-2008-hwcd
// A = X1^2
let mut a = self.x;
a.square();
// B = Y1^2
let mut b = self.y;
b.square();
// C = 2*Z1^2
let mut c = self.z;
c.square();
c.double();
// D = a*A
// = -A
let mut d = a;
d.negate();
// E = (X1+Y1)^2 - A - B
let mut e = self.x;
e.add_assign(&self.y);
e.square();
e.add_assign(&d); // -A = D
e.sub_assign(&b);
// G = D+B
let mut g = d;
g.add_assign(&b);
// F = G-C
let mut f = g;
f.sub_assign(&c);
// H = D-B
let mut h = d;
h.sub_assign(&b);
// X3 = E*F
let mut x3 = e;
x3.mul_assign(&f);
// Y3 = G*H
let mut y3 = g;
y3.mul_assign(&h);
// T3 = E*H
let mut t3 = e;
t3.mul_assign(&h);
// Z3 = F*G
let mut z3 = f;
z3.mul_assign(&g);
Point {
x: x3,
y: y3,
t: t3,
z: z3,
_marker: PhantomData
}
}
#[must_use]
pub fn add(&self, other: &Self, params: &E::Params) -> Self
{
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
// 3.1 Unified Addition in E^e
// A = x1 * x2
let mut a = self.x;
a.mul_assign(&other.x);
// B = y1 * y2
let mut b = self.y;
b.mul_assign(&other.y);
// C = d * t1 * t2
let mut c = params.edwards_d().clone();
c.mul_assign(&self.t);
c.mul_assign(&other.t);
// D = z1 * z2
let mut d = self.z;
d.mul_assign(&other.z);
// H = B - aA
// = B + A
let mut h = b;
h.add_assign(&a);
// E = (x1 + y1) * (x2 + y2) - A - B
// = (x1 + y1) * (x2 + y2) - H
let mut e = self.x;
e.add_assign(&self.y);
{
let mut tmp = other.x;
tmp.add_assign(&other.y);
e.mul_assign(&tmp);
}
e.sub_assign(&h);
// F = D - C
let mut f = d;
f.sub_assign(&c);
// G = D + C
let mut g = d;
g.add_assign(&c);
// x3 = E * F
let mut x3 = e;
x3.mul_assign(&f);
// y3 = G * H
let mut y3 = g;
y3.mul_assign(&h);
// t3 = E * H
let mut t3 = e;
t3.mul_assign(&h);
// z3 = F * G
let mut z3 = f;
z3.mul_assign(&g);
Point {
x: x3,
y: y3,
t: t3,
z: z3,
_marker: PhantomData
}
}
#[must_use]
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(
&self,
scalar: S,
params: &E::Params
) -> Self
{
// Standard double-and-add scalar multiplication
let mut res = Self::zero();
for b in BitIterator::new(scalar.into()) {
res = res.double(params);
if b {
res = res.add(self, params);
}
}
res
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,435 @@
//! Jubjub is a twisted Edwards curve defined over the BLS12-381 scalar
//! field, Fr. It takes the form `-x^2 + y^2 = 1 + dx^2y^2` with
//! `d = -(10240/10241)`. It is birationally equivalent to a Montgomery
//! curve of the form `y^2 = x^3 + Ax^2 + x` with `A = 40962`. This
//! value `A` is the smallest integer choice such that:
//!
//! * `(A - 2) / 4` is a small integer (`10240`).
//! * `A^2 - 4` is quadratic nonresidue.
//! * The group order of the curve and its quadratic twist has a large
//! prime factor.
//!
//! Jubjub has `s = 0x0e7db4ea6533afa906673b0101343b00a6682093ccc81082d0970e5ed6f72cb7`
//! as the prime subgroup order, with cofactor 8. (The twist has
//! cofactor 4.)
//!
//! It is a complete twisted Edwards curve, so the equivalence with
//! the Montgomery curve forms a group isomorphism, allowing points
//! to be freely converted between the two forms.
use pairing::{
Engine,
Field,
PrimeField,
SqrtField
};
use group_hash::group_hash;
use constants;
use pairing::bls12_381::{
Bls12,
Fr
};
/// This is an implementation of the twisted Edwards Jubjub curve.
pub mod edwards;
/// This is an implementation of the birationally equivalent
/// Montgomery curve.
pub mod montgomery;
/// This is an implementation of the scalar field for Jubjub.
pub mod fs;
#[cfg(test)]
pub mod tests;
/// Point of unknown order.
pub enum Unknown { }
/// Point of prime order.
pub enum PrimeOrder { }
/// Fixed generators of the Jubjub curve of unknown
/// exponent.
#[derive(Copy, Clone)]
pub enum FixedGenerators {
/// The prover will demonstrate knowledge of discrete log
/// with respect to this base when they are constructing
/// a proof, in order to authorize proof construction.
ProofGenerationKey = 0,
/// The note commitment is randomized over this generator.
NoteCommitmentRandomness = 1,
/// The node commitment is randomized again by the position
/// in order to supply the nullifier computation with a
/// unique input w.r.t. the note being spent, to prevent
/// Faerie gold attacks.
NullifierPosition = 2,
/// The value commitment is used to check balance between
/// inputs and outputs. The value is placed over this
/// generator.
ValueCommitmentValue = 3,
/// The value commitment is randomized over this generator,
/// for privacy.
ValueCommitmentRandomness = 4,
/// The spender proves discrete log with respect to this
/// base at spend time.
SpendingKeyGenerator = 5,
Max = 6
}
pub trait ToUniform {
fn to_uniform(digest: &[u8]) -> Self;
}
/// This is an extension to the pairing Engine trait which
/// offers a scalar field for the embedded curve (Jubjub)
/// and some pre-computed parameters.
pub trait JubjubEngine: Engine {
/// The scalar field of the Jubjub curve
type Fs: PrimeField + SqrtField + ToUniform;
/// The parameters of Jubjub and the Sapling protocol
type Params: JubjubParams<Self>;
}
/// The pre-computed parameters for Jubjub, including curve
/// constants and various limits and window tables.
pub trait JubjubParams<E: JubjubEngine>: Sized {
/// The `d` constant of the twisted Edwards curve.
fn edwards_d(&self) -> &E::Fr;
/// The `A` constant of the birationally equivalent Montgomery curve.
fn montgomery_a(&self) -> &E::Fr;
/// The `A` constant, doubled.
fn montgomery_2a(&self) -> &E::Fr;
/// The scaling factor used for conversion from the Montgomery form.
fn scale(&self) -> &E::Fr;
/// Returns the generators (for each segment) used in all Pedersen commitments.
fn pedersen_hash_generators(&self) -> &[edwards::Point<E, PrimeOrder>];
/// Returns the exp table for Pedersen hashes.
fn pedersen_hash_exp_table(&self) -> &[Vec<Vec<edwards::Point<E, PrimeOrder>>>];
/// Returns the maximum number of chunks per segment of the Pedersen hash.
fn pedersen_hash_chunks_per_generator(&self) -> usize;
/// Returns the pre-computed window tables [-4, 3, 2, 1, 1, 2, 3, 4] of different
/// magnitudes of the Pedersen hash segment generators.
fn pedersen_circuit_generators(&self) -> &[Vec<Vec<(E::Fr, E::Fr)>>];
/// Returns the number of chunks needed to represent a full scalar during fixed-base
/// exponentiation.
fn fixed_base_chunks_per_generator(&self) -> usize;
/// Returns a fixed generator.
fn generator(&self, base: FixedGenerators) -> &edwards::Point<E, PrimeOrder>;
/// Returns a window table [0, 1, ..., 8] for different magnitudes of some
/// fixed generator.
fn circuit_generators(&self, FixedGenerators) -> &[Vec<(E::Fr, E::Fr)>];
/// Returns the window size for exponentiation of Pedersen hash generators
/// outside the circuit
fn pedersen_hash_exp_window_size() -> u32;
}
impl JubjubEngine for Bls12 {
type Fs = self::fs::Fs;
type Params = JubjubBls12;
}
pub struct JubjubBls12 {
edwards_d: Fr,
montgomery_a: Fr,
montgomery_2a: Fr,
scale: Fr,
pedersen_hash_generators: Vec<edwards::Point<Bls12, PrimeOrder>>,
pedersen_hash_exp: Vec<Vec<Vec<edwards::Point<Bls12, PrimeOrder>>>>,
pedersen_circuit_generators: Vec<Vec<Vec<(Fr, Fr)>>>,
fixed_base_generators: Vec<edwards::Point<Bls12, PrimeOrder>>,
fixed_base_circuit_generators: Vec<Vec<Vec<(Fr, Fr)>>>,
}
impl JubjubParams<Bls12> for JubjubBls12 {
fn edwards_d(&self) -> &Fr { &self.edwards_d }
fn montgomery_a(&self) -> &Fr { &self.montgomery_a }
fn montgomery_2a(&self) -> &Fr { &self.montgomery_2a }
fn scale(&self) -> &Fr { &self.scale }
fn pedersen_hash_generators(&self) -> &[edwards::Point<Bls12, PrimeOrder>] {
&self.pedersen_hash_generators
}
fn pedersen_hash_exp_table(&self) -> &[Vec<Vec<edwards::Point<Bls12, PrimeOrder>>>] {
&self.pedersen_hash_exp
}
fn pedersen_hash_chunks_per_generator(&self) -> usize {
63
}
fn fixed_base_chunks_per_generator(&self) -> usize {
84
}
fn pedersen_circuit_generators(&self) -> &[Vec<Vec<(Fr, Fr)>>] {
&self.pedersen_circuit_generators
}
fn generator(&self, base: FixedGenerators) -> &edwards::Point<Bls12, PrimeOrder>
{
&self.fixed_base_generators[base as usize]
}
fn circuit_generators(&self, base: FixedGenerators) -> &[Vec<(Fr, Fr)>]
{
&self.fixed_base_circuit_generators[base as usize][..]
}
fn pedersen_hash_exp_window_size() -> u32 {
8
}
}
impl JubjubBls12 {
pub fn new() -> Self {
let montgomery_a = Fr::from_str("40962").unwrap();
let mut montgomery_2a = montgomery_a;
montgomery_2a.double();
let mut tmp_params = JubjubBls12 {
// d = -(10240/10241)
edwards_d: Fr::from_str("19257038036680949359750312669786877991949435402254120286184196891950884077233").unwrap(),
// A = 40962
montgomery_a: montgomery_a,
// 2A = 2.A
montgomery_2a: montgomery_2a,
// scaling factor = sqrt(4 / (a - d))
scale: Fr::from_str("17814886934372412843466061268024708274627479829237077604635722030778476050649").unwrap(),
// We'll initialize these below
pedersen_hash_generators: vec![],
pedersen_hash_exp: vec![],
pedersen_circuit_generators: vec![],
fixed_base_generators: vec![],
fixed_base_circuit_generators: vec![],
};
fn find_group_hash<E: JubjubEngine>(
m: &[u8],
personalization: &[u8; 8],
params: &E::Params
) -> edwards::Point<E, PrimeOrder>
{
let mut tag = m.to_vec();
let i = tag.len();
tag.push(0u8);
loop {
let gh = group_hash(
&tag,
personalization,
params
);
// We don't want to overflow and start reusing generators
assert!(tag[i] != u8::max_value());
tag[i] += 1;
if let Some(gh) = gh {
break gh;
}
}
}
// Create the bases for the Pedersen hashes
{
let mut pedersen_hash_generators = vec![];
for m in 0..5 {
use byteorder::{WriteBytesExt, LittleEndian};
let mut segment_number = [0u8; 4];
(&mut segment_number[0..4]).write_u32::<LittleEndian>(m).unwrap();
pedersen_hash_generators.push(
find_group_hash(
&segment_number,
constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION,
&tmp_params
)
);
}
// Check for duplicates, far worse than spec inconsistencies!
for (i, p1) in pedersen_hash_generators.iter().enumerate() {
if p1 == &edwards::Point::zero() {
panic!("Neutral element!");
}
for p2 in pedersen_hash_generators.iter().skip(i+1) {
if p1 == p2 {
panic!("Duplicate generator!");
}
}
}
tmp_params.pedersen_hash_generators = pedersen_hash_generators;
}
// Create the exp table for the Pedersen hash generators
{
let mut pedersen_hash_exp = vec![];
for g in &tmp_params.pedersen_hash_generators {
let mut g = g.clone();
let window = JubjubBls12::pedersen_hash_exp_window_size();
let mut tables = vec![];
let mut num_bits = 0;
while num_bits <= fs::Fs::NUM_BITS {
let mut table = Vec::with_capacity(1 << window);
let mut base = edwards::Point::zero();
for _ in 0..(1 << window) {
table.push(base.clone());
base = base.add(&g, &tmp_params);
}
tables.push(table);
num_bits += window;
for _ in 0..window {
g = g.double(&tmp_params);
}
}
pedersen_hash_exp.push(tables);
}
tmp_params.pedersen_hash_exp = pedersen_hash_exp;
}
// Create the bases for other parts of the protocol
{
let mut fixed_base_generators = vec![edwards::Point::zero(); FixedGenerators::Max as usize];
fixed_base_generators[FixedGenerators::ProofGenerationKey as usize] =
find_group_hash(&[], constants::PROOF_GENERATION_KEY_BASE_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::NoteCommitmentRandomness as usize] =
find_group_hash(b"r", constants::PEDERSEN_HASH_GENERATORS_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::NullifierPosition as usize] =
find_group_hash(&[], constants::NULLIFIER_POSITION_IN_TREE_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::ValueCommitmentValue as usize] =
find_group_hash(b"v", constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::ValueCommitmentRandomness as usize] =
find_group_hash(b"r", constants::VALUE_COMMITMENT_GENERATOR_PERSONALIZATION, &tmp_params);
fixed_base_generators[FixedGenerators::SpendingKeyGenerator as usize] =
find_group_hash(&[], constants::SPENDING_KEY_GENERATOR_PERSONALIZATION, &tmp_params);
// Check for duplicates, far worse than spec inconsistencies!
for (i, p1) in fixed_base_generators.iter().enumerate() {
if p1 == &edwards::Point::zero() {
panic!("Neutral element!");
}
for p2 in fixed_base_generators.iter().skip(i+1) {
if p1 == p2 {
panic!("Duplicate generator!");
}
}
}
tmp_params.fixed_base_generators = fixed_base_generators;
}
// Create the 2-bit window table lookups for each 4-bit
// "chunk" in each segment of the Pedersen hash
{
let mut pedersen_circuit_generators = vec![];
// Process each segment
for mut gen in tmp_params.pedersen_hash_generators.iter().cloned() {
let mut gen = montgomery::Point::from_edwards(&gen, &tmp_params);
let mut windows = vec![];
for _ in 0..tmp_params.pedersen_hash_chunks_per_generator() {
// Create (x, y) coeffs for this chunk
let mut coeffs = vec![];
let mut g = gen.clone();
// coeffs = g, g*2, g*3, g*4
for _ in 0..4 {
coeffs.push(g.into_xy().expect("cannot produce O"));
g = g.add(&gen, &tmp_params);
}
windows.push(coeffs);
// Our chunks are separated by 2 bits to prevent overlap.
for _ in 0..4 {
gen = gen.double(&tmp_params);
}
}
pedersen_circuit_generators.push(windows);
}
tmp_params.pedersen_circuit_generators = pedersen_circuit_generators;
}
// Create the 3-bit window table lookups for fixed-base
// exp of each base in the protocol.
{
let mut fixed_base_circuit_generators = vec![];
for mut gen in tmp_params.fixed_base_generators.iter().cloned() {
let mut windows = vec![];
for _ in 0..tmp_params.fixed_base_chunks_per_generator() {
let mut coeffs = vec![(Fr::zero(), Fr::one())];
let mut g = gen.clone();
for _ in 0..7 {
coeffs.push(g.into_xy());
g = g.add(&gen, &tmp_params);
}
windows.push(coeffs);
// gen = gen * 8
gen = g;
}
fixed_base_circuit_generators.push(windows);
}
tmp_params.fixed_base_circuit_generators = fixed_base_circuit_generators;
}
tmp_params
}
}
#[test]
fn test_jubjub_bls12() {
let params = JubjubBls12::new();
tests::test_suite::<Bls12>(&params);
let test_repr = hex!("9d12b88b08dcbef8a11ee0712d94cb236ee2f4ca17317075bfafc82ce3139d31");
let p = edwards::Point::<Bls12, _>::read(&test_repr[..], &params).unwrap();
let q = edwards::Point::<Bls12, _>::get_for_y(
Fr::from_str("22440861827555040311190986994816762244378363690614952020532787748720529117853").unwrap(),
false,
&params
).unwrap();
assert!(p == q);
// Same thing, but sign bit set
let test_repr = hex!("9d12b88b08dcbef8a11ee0712d94cb236ee2f4ca17317075bfafc82ce3139db1");
let p = edwards::Point::<Bls12, _>::read(&test_repr[..], &params).unwrap();
let q = edwards::Point::<Bls12, _>::get_for_y(
Fr::from_str("22440861827555040311190986994816762244378363690614952020532787748720529117853").unwrap(),
true,
&params
).unwrap();
assert!(p == q);
}

View File

@ -0,0 +1,358 @@
use pairing::{
Field,
SqrtField,
PrimeField,
PrimeFieldRepr,
BitIterator
};
use super::{
JubjubEngine,
JubjubParams,
Unknown,
PrimeOrder,
edwards
};
use rand::{
Rng
};
use std::marker::PhantomData;
// Represents the affine point (X, Y)
pub struct Point<E: JubjubEngine, Subgroup> {
x: E::Fr,
y: E::Fr,
infinity: bool,
_marker: PhantomData<Subgroup>
}
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2>
{
Point {
x: from.x,
y: from.y,
infinity: from.infinity,
_marker: PhantomData
}
}
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown>
{
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown>
{
convert_subgroup(&p)
}
}
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup>
{
fn clone(&self) -> Self {
convert_subgroup(self)
}
}
impl<E: JubjubEngine, Subgroup> PartialEq for Point<E, Subgroup> {
fn eq(&self, other: &Point<E, Subgroup>) -> bool {
match (self.infinity, other.infinity) {
(true, true) => true,
(true, false) | (false, true) => false,
(false, false) => {
self.x == other.x && self.y == other.y
}
}
}
}
impl<E: JubjubEngine> Point<E, Unknown> {
pub fn get_for_x(x: E::Fr, sign: bool, params: &E::Params) -> Option<Self>
{
// Given an x on the curve, y = sqrt(x^3 + A*x^2 + x)
let mut x2 = x;
x2.square();
let mut rhs = x2;
rhs.mul_assign(params.montgomery_a());
rhs.add_assign(&x);
x2.mul_assign(&x);
rhs.add_assign(&x2);
match rhs.sqrt() {
Some(mut y) => {
if y.into_repr().is_odd() != sign {
y.negate();
}
return Some(Point {
x: x,
y: y,
infinity: false,
_marker: PhantomData
})
},
None => None
}
}
/// This guarantees the point is in the prime order subgroup
#[must_use]
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder>
{
let tmp = self.double(params)
.double(params)
.double(params);
convert_subgroup(&tmp)
}
pub fn rand<R: Rng>(rng: &mut R, params: &E::Params) -> Self
{
loop {
let x: E::Fr = rng.gen();
match Self::get_for_x(x, rng.gen(), params) {
Some(p) => {
return p
},
None => {}
}
}
}
}
impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
/// Convert from an Edwards point
pub fn from_edwards(
e: &edwards::Point<E, Subgroup>,
params: &E::Params
) -> Self
{
let (x, y) = e.into_xy();
if y == E::Fr::one() {
// The only solution for y = 1 is x = 0. (0, 1) is
// the neutral element, so we map this to the point
// at infinity.
Point::zero()
} else {
// The map from a twisted Edwards curve is defined as
// (x, y) -> (u, v) where
// u = (1 + y) / (1 - y)
// v = u / x
//
// This mapping is not defined for y = 1 and for x = 0.
//
// We have that y != 1 above. If x = 0, the only
// solutions for y are 1 (contradiction) or -1.
if x.is_zero() {
// (0, -1) is the point of order two which is not
// the neutral element, so we map it to (0, 0) which is
// the only affine point of order 2.
Point {
x: E::Fr::zero(),
y: E::Fr::zero(),
infinity: false,
_marker: PhantomData
}
} else {
// The mapping is defined as above.
//
// (x, y) -> (u, v) where
// u = (1 + y) / (1 - y)
// v = u / x
let mut u = E::Fr::one();
u.add_assign(&y);
{
let mut tmp = E::Fr::one();
tmp.sub_assign(&y);
u.mul_assign(&tmp.inverse().unwrap())
}
let mut v = u;
v.mul_assign(&x.inverse().unwrap());
// Scale it into the correct curve constants
v.mul_assign(params.scale());
Point {
x: u,
y: v,
infinity: false,
_marker: PhantomData
}
}
}
}
/// Attempts to cast this as a prime order element, failing if it's
/// not in the prime order subgroup.
pub fn as_prime_order(&self, params: &E::Params) -> Option<Point<E, PrimeOrder>> {
if self.mul(E::Fs::char(), params) == Point::zero() {
Some(convert_subgroup(self))
} else {
None
}
}
pub fn zero() -> Self {
Point {
x: E::Fr::zero(),
y: E::Fr::zero(),
infinity: true,
_marker: PhantomData
}
}
pub fn into_xy(&self) -> Option<(E::Fr, E::Fr)>
{
if self.infinity {
None
} else {
Some((self.x, self.y))
}
}
#[must_use]
pub fn negate(&self) -> Self {
let mut p = self.clone();
p.y.negate();
p
}
#[must_use]
pub fn double(&self, params: &E::Params) -> Self {
if self.infinity {
return Point::zero();
}
// (0, 0) is the point of order 2. Doubling
// produces the point at infinity.
if self.y == E::Fr::zero() {
return Point::zero();
}
// This is a standard affine point doubling formula
// See 4.3.2 The group law for Weierstrass curves
// Montgomery curves and the Montgomery Ladder
// Daniel J. Bernstein and Tanja Lange
let mut delta = E::Fr::one();
{
let mut tmp = params.montgomery_a().clone();
tmp.mul_assign(&self.x);
tmp.double();
delta.add_assign(&tmp);
}
{
let mut tmp = self.x;
tmp.square();
delta.add_assign(&tmp);
tmp.double();
delta.add_assign(&tmp);
}
{
let mut tmp = self.y;
tmp.double();
delta.mul_assign(&tmp.inverse().expect("y is nonzero so this must be nonzero"));
}
let mut x3 = delta;
x3.square();
x3.sub_assign(params.montgomery_a());
x3.sub_assign(&self.x);
x3.sub_assign(&self.x);
let mut y3 = x3;
y3.sub_assign(&self.x);
y3.mul_assign(&delta);
y3.add_assign(&self.y);
y3.negate();
Point {
x: x3,
y: y3,
infinity: false,
_marker: PhantomData
}
}
#[must_use]
pub fn add(&self, other: &Self, params: &E::Params) -> Self
{
// This is a standard affine point addition formula
// See 4.3.2 The group law for Weierstrass curves
// Montgomery curves and the Montgomery Ladder
// Daniel J. Bernstein and Tanja Lange
match (self.infinity, other.infinity) {
(true, true) => Point::zero(),
(true, false) => other.clone(),
(false, true) => self.clone(),
(false, false) => {
if self.x == other.x {
if self.y == other.y {
self.double(params)
} else {
Point::zero()
}
} else {
let mut delta = other.y;
delta.sub_assign(&self.y);
{
let mut tmp = other.x;
tmp.sub_assign(&self.x);
delta.mul_assign(&tmp.inverse().expect("self.x != other.x, so this must be nonzero"));
}
let mut x3 = delta;
x3.square();
x3.sub_assign(params.montgomery_a());
x3.sub_assign(&self.x);
x3.sub_assign(&other.x);
let mut y3 = x3;
y3.sub_assign(&self.x);
y3.mul_assign(&delta);
y3.add_assign(&self.y);
y3.negate();
Point {
x: x3,
y: y3,
infinity: false,
_marker: PhantomData
}
}
}
}
}
#[must_use]
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(
&self,
scalar: S,
params: &E::Params
) -> Self
{
// Standard double-and-add scalar multiplication
let mut res = Self::zero();
for b in BitIterator::new(scalar.into()) {
res = res.double(params);
if b {
res = res.add(self, params);
}
}
res
}
}

View File

@ -0,0 +1,416 @@
use super::{
JubjubEngine,
JubjubParams,
PrimeOrder,
montgomery,
edwards
};
use pairing::{
Field,
PrimeField,
PrimeFieldRepr,
SqrtField,
LegendreSymbol
};
use rand::{XorShiftRng, SeedableRng, Rand};
pub fn test_suite<E: JubjubEngine>(params: &E::Params) {
test_back_and_forth::<E>(params);
test_jubjub_params::<E>(params);
test_rand::<E>(params);
test_get_for::<E>(params);
test_identities::<E>(params);
test_addition_associativity::<E>(params);
test_order::<E>(params);
test_mul_associativity::<E>(params);
test_loworder::<E>(params);
test_read_write::<E>(params);
}
fn is_on_mont_curve<E: JubjubEngine, P: JubjubParams<E>>(
x: E::Fr,
y: E::Fr,
params: &P
) -> bool
{
let mut lhs = y;
lhs.square();
let mut x2 = x;
x2.square();
let mut x3 = x2;
x3.mul_assign(&x);
let mut rhs = x2;
rhs.mul_assign(params.montgomery_a());
rhs.add_assign(&x);
rhs.add_assign(&x3);
lhs == rhs
}
fn is_on_twisted_edwards_curve<E: JubjubEngine, P: JubjubParams<E>>(
x: E::Fr,
y: E::Fr,
params: &P
) -> bool
{
let mut x2 = x;
x2.square();
let mut y2 = y;
y2.square();
// -x^2 + y^2
let mut lhs = y2;
lhs.sub_assign(&x2);
// 1 + d x^2 y^2
let mut rhs = y2;
rhs.mul_assign(&x2);
rhs.mul_assign(params.edwards_d());
rhs.add_assign(&E::Fr::one());
lhs == rhs
}
fn test_loworder<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let inf = montgomery::Point::zero();
// try to find a point of order 8
let p = loop {
let r = montgomery::Point::<E, _>::rand(rng, params).mul(E::Fs::char(), params);
let r2 = r.double(params);
let r4 = r2.double(params);
let r8 = r4.double(params);
if r2 != inf && r4 != inf && r8 == inf {
break r;
}
};
let mut loworder_points = vec![];
{
let mut tmp = p.clone();
for _ in 0..8 {
assert!(!loworder_points.contains(&tmp));
loworder_points.push(tmp.clone());
tmp = tmp.add(&p, params);
}
}
assert!(loworder_points[7] == inf);
}
fn test_mul_associativity<E: JubjubEngine>(params: &E::Params) {
use self::edwards::Point;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..100 {
// Pick a random point and multiply it by the cofactor
let base = Point::<E, _>::rand(rng, params).mul_by_cofactor(params);
let mut a = E::Fs::rand(rng);
let b = E::Fs::rand(rng);
let c = E::Fs::rand(rng);
let res1 = base.mul(a, params).mul(b, params).mul(c, params);
let res2 = base.mul(b, params).mul(c, params).mul(a, params);
let res3 = base.mul(c, params).mul(a, params).mul(b, params);
a.mul_assign(&b);
a.mul_assign(&c);
let res4 = base.mul(a, params);
assert!(res1 == res2);
assert!(res2 == res3);
assert!(res3 == res4);
let (x, y) = res1.into_xy();
assert!(is_on_twisted_edwards_curve(x, y, params));
let (x, y) = res2.into_xy();
assert!(is_on_twisted_edwards_curve(x, y, params));
let (x, y) = res3.into_xy();
assert!(is_on_twisted_edwards_curve(x, y, params));
}
}
fn test_order<E: JubjubEngine>(params: &E::Params) {
use self::edwards::Point;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// The neutral element is in the prime order subgroup.
assert!(Point::<E, PrimeOrder>::zero().as_prime_order(params).is_some());
for _ in 0..50 {
// Pick a random point and multiply it by the cofactor
let base = Point::<E, _>::rand(rng, params).mul_by_cofactor(params);
// Any point multiplied by the cofactor will be in the prime
// order subgroup
assert!(base.as_prime_order(params).is_some());
}
// It's very likely that at least one out of 50 random points on the curve
// is not in the prime order subgroup.
let mut at_least_one_not_in_prime_order_subgroup = false;
for _ in 0..50 {
// Pick a random point.
let base = Point::<E, _>::rand(rng, params);
at_least_one_not_in_prime_order_subgroup |= base.as_prime_order(params).is_none();
}
assert!(at_least_one_not_in_prime_order_subgroup);
}
fn test_addition_associativity<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
use self::montgomery::Point;
let a = Point::<E, _>::rand(rng, params);
let b = Point::<E, _>::rand(rng, params);
let c = Point::<E, _>::rand(rng, params);
assert!(a.add(&b, &params).add(&c, &params) == c.add(&a, &params).add(&b, &params));
}
for _ in 0..1000 {
use self::edwards::Point;
let a = Point::<E, _>::rand(rng, params);
let b = Point::<E, _>::rand(rng, params);
let c = Point::<E, _>::rand(rng, params);
assert!(a.add(&b, &params).add(&c, &params) == c.add(&a, &params).add(&b, &params));
}
}
fn test_identities<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
{
use self::edwards::Point;
let z = Point::<E, PrimeOrder>::zero();
assert!(z.double(&params) == z);
assert!(z.negate() == z);
for _ in 0..100 {
let r = Point::<E, _>::rand(rng, params);
assert!(r.add(&Point::zero(), &params) == r);
assert!(r.add(&r.negate(), &params) == Point::zero());
}
}
{
use self::montgomery::Point;
let z = Point::<E, PrimeOrder>::zero();
assert!(z.double(&params) == z);
assert!(z.negate() == z);
for _ in 0..100 {
let r = Point::<E, _>::rand(rng, params);
assert!(r.add(&Point::zero(), &params) == r);
assert!(r.add(&r.negate(), &params) == Point::zero());
}
}
}
fn test_get_for<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let y = E::Fr::rand(rng);
let sign = bool::rand(rng);
if let Some(mut p) = edwards::Point::<E, _>::get_for_y(y, sign, params) {
assert!(p.into_xy().0.into_repr().is_odd() == sign);
p = p.negate();
assert!(
edwards::Point::<E, _>::get_for_y(y, !sign, params).unwrap()
==
p
);
}
}
}
fn test_read_write<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let e = edwards::Point::<E, _>::rand(rng, params);
let mut v = vec![];
e.write(&mut v).unwrap();
let e2 = edwards::Point::read(&v[..], params).unwrap();
assert!(e == e2);
}
}
fn test_rand<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let p = montgomery::Point::<E, _>::rand(rng, params);
let e = edwards::Point::<E, _>::rand(rng, params);
{
let (x, y) = p.into_xy().unwrap();
assert!(is_on_mont_curve(x, y, params));
}
{
let (x, y) = e.into_xy();
assert!(is_on_twisted_edwards_curve(x, y, params));
}
}
}
fn test_back_and_forth<E: JubjubEngine>(params: &E::Params) {
let rng = &mut XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
for _ in 0..1000 {
let s = E::Fs::rand(rng);
let edwards_p1 = edwards::Point::<E, _>::rand(rng, params);
let mont_p1 = montgomery::Point::from_edwards(&edwards_p1, params);
let mont_p2 = montgomery::Point::<E, _>::rand(rng, params);
let edwards_p2 = edwards::Point::from_montgomery(&mont_p2, params);
let mont = mont_p1.add(&mont_p2, params).mul(s, params);
let edwards = edwards_p1.add(&edwards_p2, params).mul(s, params);
assert!(
montgomery::Point::from_edwards(&edwards, params) == mont
);
assert!(
edwards::Point::from_montgomery(&mont, params) == edwards
);
}
}
fn test_jubjub_params<E: JubjubEngine>(params: &E::Params) {
// a = -1
let mut a = E::Fr::one();
a.negate();
{
// Check that 2A is consistent with A
let mut tmp = *params.montgomery_a();
tmp.double();
assert_eq!(&tmp, params.montgomery_2a());
}
{
// The twisted Edwards addition law is complete when d is nonsquare
// and a is square.
assert!(params.edwards_d().legendre() == LegendreSymbol::QuadraticNonResidue);
assert!(a.legendre() == LegendreSymbol::QuadraticResidue);
}
{
// Other convenient sanity checks regarding d
// tmp = d
let mut tmp = *params.edwards_d();
// 1 / d is nonsquare
assert!(tmp.inverse().unwrap().legendre() == LegendreSymbol::QuadraticNonResidue);
// tmp = -d
tmp.negate();
// -d is nonsquare
assert!(tmp.legendre() == LegendreSymbol::QuadraticNonResidue);
// 1 / -d is nonsquare
assert!(tmp.inverse().unwrap().legendre() == LegendreSymbol::QuadraticNonResidue);
}
{
// Check that A^2 - 4 is nonsquare:
let mut tmp = params.montgomery_a().clone();
tmp.square();
tmp.sub_assign(&E::Fr::from_str("4").unwrap());
assert!(tmp.legendre() == LegendreSymbol::QuadraticNonResidue);
}
{
// Check that A - 2 is nonsquare:
let mut tmp = params.montgomery_a().clone();
tmp.sub_assign(&E::Fr::from_str("2").unwrap());
assert!(tmp.legendre() == LegendreSymbol::QuadraticNonResidue);
}
{
// Check the validity of the scaling factor
let mut tmp = a;
tmp.sub_assign(&params.edwards_d());
tmp = tmp.inverse().unwrap();
tmp.mul_assign(&E::Fr::from_str("4").unwrap());
tmp = tmp.sqrt().unwrap();
assert_eq!(&tmp, params.scale());
}
{
// Check that the number of windows per generator
// in the Pedersen hash does not allow for collisions
let mut cur = E::Fs::one().into_repr();
let mut max = E::Fs::char();
{
max.sub_noborrow(&E::Fs::one().into_repr());
max.div2();
}
let mut pacc = E::Fs::zero().into_repr();
let mut nacc = E::Fs::char();
for _ in 0..params.pedersen_hash_chunks_per_generator()
{
// tmp = cur * 4
let mut tmp = cur;
tmp.mul2();
tmp.mul2();
pacc.add_nocarry(&tmp);
nacc.sub_noborrow(&tmp);
assert!(pacc < max);
assert!(pacc < nacc);
// cur = cur * 16
for _ in 0..4 {
cur.mul2();
}
}
}
{
// Check that the number of windows for fixed-base
// scalar multiplication is sufficient for all scalars.
assert!(params.fixed_base_chunks_per_generator() * 3 >= E::Fs::NUM_BITS as usize);
// ... and that it's *just* efficient enough.
assert!((params.fixed_base_chunks_per_generator() - 1) * 3 < E::Fs::NUM_BITS as usize);
}
}

22
sapling-crypto/src/lib.rs Normal file
View File

@ -0,0 +1,22 @@
extern crate pairing;
extern crate bellman;
extern crate blake2_rfc;
extern crate digest;
extern crate rand;
extern crate byteorder;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate crypto;
pub mod jubjub;
pub mod group_hash;
pub mod circuit;
pub mod pedersen_hash;
pub mod primitives;
pub mod constants;
pub mod redjubjub;
pub mod util;

Some files were not shown because too many files have changed in this diff Show More