Skip to content

Commit

Permalink
Merge pull request #16516 from MinaProtocol/merge-back-to-develop-202…
Browse files Browse the repository at this point in the history
…5-01-23

Merge back to develop
  • Loading branch information
georgeee authored Jan 24, 2025
2 parents 9bfe8ca + c45945c commit 9739c56
Show file tree
Hide file tree
Showing 41 changed files with 1,233 additions and 377 deletions.
2 changes: 1 addition & 1 deletion buildkite/src/Monorepo.dhall
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ in \(args : { filter : PipelineFilter.Type, mode : PipelineMode.Type })
, label =
"Monorepo triage ${PipelineFilter.show args.filter}"
, key = "cmds-${PipelineFilter.show args.filter}"
, target = Size.Small
, target = Size.Multi
, docker = Some Docker::{
, image =
(./Constants/ContainerImages.dhall).toolchainBase
Expand Down
2 changes: 1 addition & 1 deletion buildkite/src/Prepare.dhall
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ let config
]
, label = "Prepare monorepo triage"
, key = "monorepo-${mode}-${filter}"
, target = Size.Small
, target = Size.Multi
, docker = Some Docker::{
, image = (./Constants/ContainerImages.dhall).toolchainBase
, environment = [ "BUILDKITE_AGENT_ACCESS_TOKEN" ]
Expand Down
1 change: 1 addition & 0 deletions src/app/dump_blocks/.ocamlformat
42 changes: 38 additions & 4 deletions src/app/test_executive/zkapps.ml
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,40 @@ module Make (Inputs : Intf.Test.Inputs_intf) = struct
zkapp_command_insufficient_fee "Insufficient fee" )
in
let%bind () = wait_for t (Wait_condition.blocks_to_be_produced 1) in
let%bind.Deferred () =
(* Wait for the start of the next slot, attempting to submit all commands
within the same slot.
In particular, this has the goal of reducing flakiness around the
'insufficient replace fee' test, which becomes an 'invalid nonce'
failure if the first transaction has already been included.
Note that this *isn't* redundant with the block waiting above, because
the block will be produced part-way through a slot, and will further
take us some time to receive the message about that block production
due to polling.
*)
let next_slot_time =
let genesis_timestamp =
constants.genesis_constants.protocol.genesis_state_timestamp
|> Int64.to_float |> Time.Span.of_ms |> Time.of_span_since_epoch
in
let block_duration_ms =
constants.constraint_constants.block_window_duration_ms
|> Int.to_float
in
let current_slot_span_ms =
Time.(diff (now ()) genesis_timestamp) |> Time.Span.to_ms
in
let target_slot =
block_duration_ms /. current_slot_span_ms |> Float.round_up
in
let target_slot_span_ms =
target_slot *. current_slot_span_ms |> Time.Span.of_ms
in
Time.add genesis_timestamp target_slot_span_ms
in
after Time.(diff (now ()) next_slot_time)
in
(* Won't be accepted until the previous transactions are applied *)
let%bind () =
section_hard "Send a zkApp transaction to update all fields"
Expand All @@ -775,16 +809,16 @@ module Make (Inputs : Intf.Test.Inputs_intf) = struct
zkapp_command_update_all )
in
let%bind () =
section_hard "Send a zkapp with an invalid proof"
section_hard "Send a zkapp with an insufficient replace fee"
(send_invalid_zkapp ~logger
(Network.Node.get_ingress_uri node)
zkapp_command_invalid_proof "Invalid_proof" )
zkapp_command_insufficient_replace_fee "Insufficient_replace_fee" )
in
let%bind () =
section_hard "Send a zkapp with an insufficient replace fee"
section_hard "Send a zkapp with an invalid proof"
(send_invalid_zkapp ~logger
(Network.Node.get_ingress_uri node)
zkapp_command_insufficient_replace_fee "Insufficient_replace_fee" )
zkapp_command_invalid_proof "Invalid_proof" )
in
let%bind () =
section_hard "Send a zkApp transaction with an invalid nonce"
Expand Down
20 changes: 10 additions & 10 deletions src/lib/crypto/kimchi_bindings/stubs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,31 +13,31 @@ crate-type = ["lib", "staticlib"]

[dependencies]
array-init = "2.0.0"
rmp-serde = "1.1.2"
libc = "0.2.62"
num-bigint = { version = "0.4.4", features = [ "rand", "serde" ] }
# ocaml-specific
ocaml = { version = "0.22.2", features = ["no-caml-startup"] }
ocaml-gen = "0.1.5"
once_cell = "1.10.0"
paste = "1.0.5"
rand = "0.8.5"
rayon = "1.5.0"
rmp-serde = "1.1.2"
serde = "1.0.130"
serde_json = "1.0.103"
sprs = { version = "0.11.0", features = ["multi_thread"] }
once_cell = "1.10.0"

# arkworks
ark-ff = { version = "0.4.2", features = ["parallel", "asm"] }
ark-serialize = "0.4.2"
ark-ec = { version = "0.4.2", features = ["parallel"] }
ark-ff = { version = "0.4.2", features = ["parallel", "asm"] }
ark-poly = { version = "0.4.2", features = ["parallel"] }
ark-serialize = "0.4.2"

# proof-systems
poly-commitment = { path = "../../proof-systems/poly-commitment", features = ["ocaml_types"] }
groupmap = { path = "../../proof-systems/groupmap" }
kimchi = { path = "../../proof-systems/kimchi", features = ["ocaml_types"] }
mina-curves = { path = "../../proof-systems/curves" }
o1-utils = { path = "../../proof-systems/utils" }
mina-poseidon = { path = "../../proof-systems/poseidon" }
kimchi = { path = "../../proof-systems/kimchi", features = ["ocaml_types"] }
o1-utils = { path = "../../proof-systems/utils" }
poly-commitment = { path = "../../proof-systems/poly-commitment", features = ["ocaml_types"] }

# ocaml-specific
ocaml = { version = "0.22.2", features = ["no-caml-startup"] }
ocaml-gen = "0.1.5"
18 changes: 10 additions & 8 deletions src/lib/crypto/kimchi_bindings/stubs/src/caml/caml_pointer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ macro_rules! impl_caml_pointer {
}

extern "C" fn caml_pointer_compare(_: ocaml::Raw, _: ocaml::Raw) -> i32 {
// Always return equal. We can use this for sanity checks, and anything else using this
// would be broken anyway.
// Always return equal. We can use this for sanity checks, and
// anything else using this would be broken anyway.
0
}
}
Expand Down Expand Up @@ -49,12 +49,14 @@ macro_rules! impl_caml_pointer {
unsafe {
// Wholely unsafe, Batman!
// We would use [`get_mut_unchecked`] here, but it is nightly-only.
// Instead, we get coerce our constant pointer to a mutable pointer, in the knowledge
// that
// * all of our mutations called from OCaml are blocking, so we won't have multiple
// live mutable references live simultaneously, and
// * the underlying pointer is in the correct state to be mutable, since we can call
// [`get_mut_unchecked`] in nightly, or can call [`get_mut`] and unwrap if this is
// Instead, we get coerce our constant pointer to a mutable
// pointer, in the knowledge that
// * all of our mutations called from OCaml are blocking, so
// we won't have multiple live mutable references live
// simultaneously, and
// * the underlying pointer is in the correct state to be
// mutable, since we can call [`get_mut_unchecked`] in
// nightly, or can call [`get_mut`] and unwrap if this is
// the only live reference.
&mut *(((&*self.0) as *const Self::Target) as *mut Self::Target)
}
Expand Down
55 changes: 24 additions & 31 deletions src/lib/crypto/kimchi_bindings/stubs/src/lagrange_basis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ fn add_lagrange_basis_with_cache<G: CommitmentCurve, C: LagrangeCache<G>>(
}
if let Some(basis) = cache.load_lagrange_basis_from_cache(srs.g.len(), &domain) {
srs.lagrange_bases.get_or_generate(n, || basis);
return;
} else {
let basis = srs.get_lagrange_basis(domain);
cache.cache_lagrange_basis(srs.g.len(), &domain, basis);
Expand Down Expand Up @@ -81,7 +80,7 @@ mod cache {
&self,
srs_length: usize,
domain: &D<G::ScalarField>,
basis: &Vec<PolyComm<G>>,
basis: &[PolyComm<G>],
);
}

Expand All @@ -100,10 +99,9 @@ mod cache {
}
}

/*
The FileCache implementation uses a directory as a cache for the Lagrange basis hash map --
i.e every file corresponds to a Lagrange basis for a given G-basis and domain size.
*/
// The FileCache implementation uses a directory as a cache for the Lagrange
// basis hash map -- i.e every file corresponds to a Lagrange basis for a
// given G-basis and domain size.
impl<G: AffineRepr> LagrangeCache<G> for FileCache<G> {
type CacheKey = PathBuf;

Expand All @@ -115,7 +113,7 @@ mod cache {
self.cache_dir.clone().join(format!(
"lagrange_basis_{:}-{:}",
srs_length,
domain.size().to_string()
domain.size()
))
}

Expand All @@ -126,14 +124,13 @@ mod cache {
) -> Option<Vec<PolyComm<G>>> {
let cache_key = self.lagrange_basis_cache_key(srs_length, domain);
if Path::exists(&cache_key) {
let f = File::open(cache_key.clone()).expect(&format!(
"Missing lagrange basis cache file {:?}",
cache_key
));
let basis: Vec<PolyComm<G>> = rmp_serde::decode::from_read(f).expect(&format!(
"Error decoding lagrange cache file {:?}",
cache_key
));
let f = File::open(cache_key.clone()).unwrap_or_else(|_| {
panic!("Missing lagrange basis cache file {:?}", cache_key)
});
let basis: Vec<PolyComm<G>> =
rmp_serde::decode::from_read(f).unwrap_or_else(|_| {
panic!("Error decoding lagrange cache file {:?}", cache_key)
});
Some(basis)
} else {
None
Expand All @@ -144,26 +141,22 @@ mod cache {
&self,
srs_length: usize,
domain: &D<G::ScalarField>,
basis: &Vec<PolyComm<G>>,
basis: &[PolyComm<G>],
) {
let cache_key = self.lagrange_basis_cache_key(srs_length, domain);
if Path::exists(&cache_key) {
return;
} else {
let mut f = File::create(cache_key.clone()).expect(&format!(
"Error creating lagrabnge basis cache file {:?}",
cache_key
));
rmp_serde::encode::write(&mut f, basis).expect(&format!(
"Error encoding lagrange basis to file {:?}",
cache_key
));
if !Path::exists(&cache_key) {
let mut f = File::create(cache_key.clone()).unwrap_or_else(|_| {
panic!("Error creating lagrabnge basis cache file {:?}", cache_key)
});
rmp_serde::encode::write(&mut f, basis).unwrap_or_else(|_| {
panic!("Error encoding lagrange basis to file {:?}", cache_key)
});
}
}
}

// The following two caches are all that we need for mina tests. These will not be initialized unless they are
// explicitly called.
// The following two caches are all that we need for mina tests. These will
// not be initialized unless they are explicitly called.
static VESTA_FILE_CACHE: Lazy<FileCache<Vesta>> = Lazy::new(|| {
let cache_base_dir: String =
env::var("LAGRANGE_CACHE_DIR").expect("LAGRANGE_CACHE_DIR missing in env");
Expand All @@ -175,7 +168,7 @@ mod cache {
});

pub fn get_vesta_file_cache() -> &'static FileCache<Vesta> {
&*VESTA_FILE_CACHE
&VESTA_FILE_CACHE
}

static PALLAS_FILE_CACHE: Lazy<FileCache<Pallas>> = Lazy::new(|| {
Expand All @@ -189,6 +182,6 @@ mod cache {
});

pub fn get_pallas_file_cache() -> &'static FileCache<Pallas> {
&*PALLAS_FILE_CACHE
&PALLAS_FILE_CACHE
}
}
4 changes: 2 additions & 2 deletions src/lib/crypto/kimchi_bindings/stubs/src/linearization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ pub fn linearization_strings<F: ark_ff::PrimeField>(
mut index_terms,
} = linearization.linearize(evaluated_cols).unwrap();

// HashMap deliberately uses an unstable order; here we sort to ensure that the output is
// consistent when printing.
// HashMap deliberately uses an unstable order; here we sort to ensure that
// the output is consistent when printing.
index_terms.sort_by(|(x, _), (y, _)| x.cmp(y));

let constant = constant_term.ocaml_str();
Expand Down
15 changes: 10 additions & 5 deletions src/lib/crypto/kimchi_bindings/stubs/src/pasta_fp_plonk_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,8 @@ pub fn caml_pasta_fp_plonk_proof_example_with_ffadd(
for _ in 0..4 {
CircuitGate::extend_multi_range_check(&mut gates, &mut curr_row);
}
// Connect the witnesses of the addition to the corresponding range checks
// Connect the witnesses of the addition to the corresponding range
// checks
gates.connect_ffadd_range_checks(1, Some(4), Some(8), 12);
// Connect the bound check range checks
gates.connect_ffadd_range_checks(2, None, None, 16);
Expand Down Expand Up @@ -701,7 +702,8 @@ pub fn caml_pasta_fp_plonk_proof_example_with_ffadd(
witness
};

// not sure if theres a smarter way instead of the double unwrap, but should be fine in the test
// not sure if theres a smarter way instead of the double unwrap, but should
// be fine in the test
let cs = ConstraintSystem::<Fp>::create(gates)
.public(num_public_inputs)
.build()
Expand Down Expand Up @@ -762,7 +764,8 @@ pub fn caml_pasta_fp_plonk_proof_example_with_xor(
None,
));
}
// 1 XOR of 128 bits. This will create 8 Xor16 gates and a Generic final gate with all zeros.
// 1 XOR of 128 bits. This will create 8 Xor16 gates and a Generic final
// gate with all zeros.
CircuitGate::<Fp>::extend_xor_gadget(&mut gates, 128);
// connect public inputs to the inputs of the XOR
gates.connect_cell_pair((0, 0), (2, 0));
Expand All @@ -789,7 +792,8 @@ pub fn caml_pasta_fp_plonk_proof_example_with_xor(
cols
};

// not sure if theres a smarter way instead of the double unwrap, but should be fine in the test
// not sure if theres a smarter way instead of the double unwrap, but should
// be fine in the test
let cs = ConstraintSystem::<Fp>::create(gates)
.public(num_public_inputs)
.build()
Expand Down Expand Up @@ -882,7 +886,8 @@ pub fn caml_pasta_fp_plonk_proof_example_with_rot(
cols
};

// not sure if theres a smarter way instead of the double unwrap, but should be fine in the test
// not sure if theres a smarter way instead of the double unwrap, but should
// be fine in the test
let cs = ConstraintSystem::<Fp>::create(gates)
.public(num_public_inputs)
.build()
Expand Down
13 changes: 9 additions & 4 deletions src/lib/crypto/kimchi_bindings/stubs/src/pasta_fq_plonk_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,17 @@ pub fn caml_pasta_fq_plonk_proof_create(
// public input
let public_input = witness[0][0..index.cs.public].to_vec();

// NB: This method is designed only to be used by tests. However, since creating a new reference will cause `drop` to be called on it once we are done with it. Since `drop` calls `caml_shutdown` internally, we *really, really* do not want to do this, but we have no other way to get at the active runtime.
// TODO: There's actually a way to get a handle to the runtime as a function argument. Switch
// to doing this instead.
// NB: This method is designed only to be used by tests. However, since
// creating a new reference will cause `drop` to be called on it once we are
// done with it. Since `drop` calls `caml_shutdown` internally, we *really,
// really* do not want to do this, but we have no other way to get at the
// active runtime.
// TODO: There's actually a way to get a handle to the runtime as a function
// argument. Switch to doing this instead.
let runtime = unsafe { ocaml::Runtime::recover_handle() };

// Release the runtime lock so that other threads can run using it while we generate the proof.
// Release the runtime lock so that other threads can run using it while we
// generate the proof.
runtime.releasing_runtime(|| {
let group_map = GroupMap::<Fp>::setup();
let proof = ProverProof::create_recursive::<
Expand Down
3 changes: 2 additions & 1 deletion src/lib/crypto/kimchi_bindings/stubs/src/projective.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ macro_rules! impl_projective {
#[ocaml_gen::func]
#[ocaml::func]
pub fn [<caml_ $name:snake _rng>](i: ocaml::Int) -> $GroupProjective {
// We only care about entropy here, so we force a conversion i32 -> u32.
// We only care about entropy here, so we force a conversion i32
// -> u32.
let i: u64 = (i as u32).into();
let mut rng: StdRng = rand::SeedableRng::seed_from_u64(i);
let proj: $Projective = UniformRand::rand(&mut rng);
Expand Down
Loading

0 comments on commit 9739c56

Please sign in to comment.