From c105e966e4d907e15b0da0e2e5940ff3d2ddae84 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 15 Jun 2025 15:05:18 +0000 Subject: [PATCH 1/8] tr: add `into_control_block` accessor to TrSpendInfoIterItem --- src/descriptor/tr/spend_info.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/descriptor/tr/spend_info.rs b/src/descriptor/tr/spend_info.rs index b9f1d09c9..3bace2d18 100644 --- a/src/descriptor/tr/spend_info.rs +++ b/src/descriptor/tr/spend_info.rs @@ -339,11 +339,19 @@ impl<'sp, Pk: MiniscriptKey> TrSpendInfoIterItem<'sp, Pk> { /// The control block of this leaf. /// /// Unlike the other data obtainable from [`TrSpendInfoIterItem`], this one is computed - /// dynamically during iteration and therefore will not outlive the iterator item. If - /// you need access to multiple control blocks at once, will likely need to clone and - /// store them separately. + /// dynamically during iteration and therefore will not outlive the iterator item. See + /// [`Self::into_control_block`], which consumes the iterator item but will give you an + /// owned copy of the control block. + /// + /// If you need access to multiple control blocks at once, you may need to `clone` the + /// return value of this method, or call [`Self::into_control_block`], and store the + /// result in a separate container. #[inline] pub fn control_block(&self) -> &ControlBlock { &self.control_block } + + /// Extract the control block of this leaf, consuming `self`. + #[inline] + pub fn into_control_block(self) -> ControlBlock { self.control_block } } #[cfg(test)] From 18402ee8954be91f7866c296cb7ecba24443cc65 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 15 Jun 2025 15:08:49 +0000 Subject: [PATCH 2/8] tr: cache TrSpendInfo when computing it This was an oversight in #815. --- src/descriptor/tr/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/descriptor/tr/mod.rs b/src/descriptor/tr/mod.rs index c7e3d7f7f..0e6839a2a 100644 --- a/src/descriptor/tr/mod.rs +++ b/src/descriptor/tr/mod.rs @@ -125,11 +125,19 @@ impl Tr { /// This data is needed to compute the Taproot output, so this method is implicitly /// called through [`Self::script_pubkey`], [`Self::address`], etc. It is also needed /// to compute the hash needed to sign the output. - pub fn spend_info(&self) -> TrSpendInfo + pub fn spend_info(&self) -> Arc> where Pk: ToPublicKey, { - TrSpendInfo::from_tr(self) + let mut lock = self.spend_info.lock().unwrap(); + match *lock { + Some(ref res) => Arc::clone(res), + None => { + let arc = Arc::new(TrSpendInfo::from_tr(self)); + *lock = Some(Arc::clone(&arc)); + arc + } + } } /// Checks whether the descriptor is safe. From 1bb43e92c2821dd359826acae3fe3996243cc8e0 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 15 Jun 2025 15:15:46 +0000 Subject: [PATCH 3/8] fuzz/generate-files: support jujutsu Generally we try not to add special-case logic for weird tools, but this is a a really simple change and I think that jujutsu is going to become pretty popular among rust-bitcoin developers. This is some repeated code which looks like it should be factored out into a utility function, but we can't really do that because we need to figure out the workspace root before we can even locate utility functions. --- fuzz/cycle.sh | 2 +- fuzz/fuzz-util.sh | 2 +- fuzz/fuzz.sh | 2 +- fuzz/generate-files.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fuzz/cycle.sh b/fuzz/cycle.sh index a1d7f48e6..ab27d1b00 100755 --- a/fuzz/cycle.sh +++ b/fuzz/cycle.sh @@ -6,7 +6,7 @@ # For hfuzz options see https://github.com/google/honggfuzz/blob/master/docs/USAGE.md set -e -REPO_DIR=$(git rev-parse --show-toplevel) +REPO_DIR=$(git rev-parse --show-toplevel || jj workspace root) # can't find the file because of the ENV var # shellcheck source=/dev/null source "$REPO_DIR/fuzz/fuzz-util.sh" diff --git a/fuzz/fuzz-util.sh b/fuzz/fuzz-util.sh index dcce45256..9627c980b 100755 --- a/fuzz/fuzz-util.sh +++ b/fuzz/fuzz-util.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -REPO_DIR=$(git rev-parse --show-toplevel) +REPO_DIR=$(git rev-parse --show-toplevel || jj workspace root) # Sort order is effected by locale. See `man sort`. # > Set LC_ALL=C to get the traditional sort order that uses native byte values. diff --git a/fuzz/fuzz.sh b/fuzz/fuzz.sh index 1932f0413..75a9789a5 100755 --- a/fuzz/fuzz.sh +++ b/fuzz/fuzz.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -ex -REPO_DIR=$(git rev-parse --show-toplevel) +REPO_DIR=$(git rev-parse --show-toplevel || jj workspace show) # can't find the file because of the ENV var # shellcheck source=/dev/null diff --git a/fuzz/generate-files.sh b/fuzz/generate-files.sh index 973568e99..fc3c18a91 100755 --- a/fuzz/generate-files.sh +++ b/fuzz/generate-files.sh @@ -2,7 +2,7 @@ set -e -REPO_DIR=$(git rev-parse --show-toplevel) +REPO_DIR=$(git rev-parse --show-toplevel || jj workspace root) # can't find the file because of the ENV var # shellcheck source=/dev/null From e5a195181add19864fa0b19c4f6e7ffc5b3d5127 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 11 May 2025 15:08:25 +0000 Subject: [PATCH 4/8] compiler: forbid fragment probabilities of 0 We can cause panics in the compiler comparing OrdF64s which are NaN, which we can produce by parsing ors where every child has weight 0. Simply make it a parse error to have any zero-probability branches. In theory somebody might want an or where some fragments have positive probability and others are 0, indicating "maximally pessimize these". But it has never occurred to me to do this, and it's easy to simulate this by using 100s and 1s, say. --- .github/workflows/cron-daily-fuzz.yml | 1 + fuzz/Cargo.toml | 6 ++- fuzz/fuzz_targets/regression_compiler.rs | 66 ++++++++++++++++++++++++ fuzz/generate-files.sh | 2 +- src/expression/error.rs | 11 ++++ src/expression/mod.rs | 17 ++++-- src/policy/concrete.rs | 9 +++- 7 files changed, 105 insertions(+), 7 deletions(-) create mode 100644 fuzz/fuzz_targets/regression_compiler.rs diff --git a/.github/workflows/cron-daily-fuzz.yml b/.github/workflows/cron-daily-fuzz.yml index dd7ab5da5..515cbd811 100644 --- a/.github/workflows/cron-daily-fuzz.yml +++ b/.github/workflows/cron-daily-fuzz.yml @@ -24,6 +24,7 @@ miniscript_satisfy, parse_descriptor, parse_descriptor_priv, parse_descriptor_secret, +regression_compiler, regression_descriptor_parse, regression_taptree, roundtrip_concrete, diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index b1722c5df..f7d6d8b08 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -14,7 +14,7 @@ honggfuzz = { version = "0.5.56", default-features = false } # We shouldn't need an explicit version on the next line, but Andrew's tools # choke on it otherwise. See https://github.com/nix-community/crate2nix/issues/373 miniscript = { path = "..", features = [ "compiler" ], version = "13.0" } -old_miniscript = { package = "miniscript", version = "12.3" } +old_miniscript = { package = "miniscript", features = [ "compiler" ], version = "12.3" } regex = "1.0" @@ -42,6 +42,10 @@ path = "fuzz_targets/parse_descriptor_priv.rs" name = "parse_descriptor_secret" path = "fuzz_targets/parse_descriptor_secret.rs" +[[bin]] +name = "regression_compiler" +path = "fuzz_targets/regression_compiler.rs" + [[bin]] name = "regression_descriptor_parse" path = "fuzz_targets/regression_descriptor_parse.rs" diff --git a/fuzz/fuzz_targets/regression_compiler.rs b/fuzz/fuzz_targets/regression_compiler.rs new file mode 100644 index 000000000..8703e7f3f --- /dev/null +++ b/fuzz/fuzz_targets/regression_compiler.rs @@ -0,0 +1,66 @@ +use descriptor_fuzz::FuzzPk; +use honggfuzz::fuzz; +use miniscript::{policy, ParseError, ParseNumError}; +use old_miniscript::policy as old_policy; + +type Policy = policy::Concrete; +type OldPolicy = old_policy::Concrete; + +fn do_test(data: &[u8]) { + let data_str = String::from_utf8_lossy(data); + match (data_str.parse::(), data_str.parse::()) { + (Err(_), Err(_)) => {} + (Ok(x), Err(e)) => panic!("new logic parses {} as {:?}, old fails with {}", data_str, x, e), + // These is anew parse error + ( + Err(miniscript::Error::Parse(ParseError::Num(ParseNumError::IllegalZero { .. }))), + Ok(_), + ) => {} + (Err(e), Ok(x)) => { + panic!("old logic parses {} as {:?}, new fails with {:?}", data_str, x, e) + } + (Ok(new), Ok(old)) => { + assert_eq!( + old.to_string(), + new.to_string(), + "input {} (left is old, right is new)", + data_str + ); + + let comp = new.compile::(); + let old_comp = old.compile::(); + + match (comp, old_comp) { + (Err(_), Err(_)) => {} + (Ok(x), Err(e)) => { + panic!("new logic compiles {} as {:?}, old fails with {}", data_str, x, e) + } + (Err(e), Ok(x)) => { + panic!("old logic compiles {} as {:?}, new fails with {}", data_str, x, e) + } + (Ok(new), Ok(old)) => { + assert_eq!( + old.to_string(), + new.to_string(), + "input {} (left is old, right is new)", + data_str + ); + } + } + } + } +} + +fn main() { + loop { + fuzz!(|data| { + do_test(data); + }); + } +} + +#[cfg(test)] +mod tests { + #[test] + fn duplicate_crash() { crate::do_test(b"or(0@pk(09),0@TRIVIAL)") } +} diff --git a/fuzz/generate-files.sh b/fuzz/generate-files.sh index fc3c18a91..d9ce4d408 100755 --- a/fuzz/generate-files.sh +++ b/fuzz/generate-files.sh @@ -26,7 +26,7 @@ honggfuzz = { version = "0.5.56", default-features = false } # We shouldn't need an explicit version on the next line, but Andrew's tools # choke on it otherwise. See https://github.com/nix-community/crate2nix/issues/373 miniscript = { path = "..", features = [ "compiler" ], version = "13.0" } -old_miniscript = { package = "miniscript", version = "12.3" } +old_miniscript = { package = "miniscript", features = [ "compiler" ], version = "12.3" } regex = "1.0" EOF diff --git a/src/expression/error.rs b/src/expression/error.rs index 781ab8dd6..bd62863d4 100644 --- a/src/expression/error.rs +++ b/src/expression/error.rs @@ -197,6 +197,13 @@ pub enum ParseNumError { StdParse(num::ParseIntError), /// Number had a leading zero, + or -. InvalidLeadingDigit(char), + /// Number was 0 in a context where zero is not allowed. + /// + /// Be aware that locktimes have their own error types and do not use this variant. + IllegalZero { + /// A description of the location where 0 was not allowed. + context: &'static str, + }, } impl fmt::Display for ParseNumError { @@ -206,6 +213,9 @@ impl fmt::Display for ParseNumError { ParseNumError::InvalidLeadingDigit(ch) => { write!(f, "numbers must start with 1-9, not {}", ch) } + ParseNumError::IllegalZero { context } => { + write!(f, "{} may not be 0", context) + } } } } @@ -216,6 +226,7 @@ impl std::error::Error for ParseNumError { match self { ParseNumError::StdParse(ref e) => Some(e), ParseNumError::InvalidLeadingDigit(..) => None, + ParseNumError::IllegalZero { .. } => None, } } } diff --git a/src/expression/mod.rs b/src/expression/mod.rs index 7f7057095..6bfdfd055 100644 --- a/src/expression/mod.rs +++ b/src/expression/mod.rs @@ -678,11 +678,10 @@ impl<'a> Tree<'a> { } } -/// Parse a string as a u32, for timelocks or thresholds -pub fn parse_num(s: &str) -> Result { +/// Parse a string as a u32, forbidding zero. +pub fn parse_num_nonzero(s: &str, context: &'static str) -> Result { if s == "0" { - // Special-case 0 since it is the only number which may start with a leading zero. - return Ok(0); + return Err(ParseNumError::IllegalZero { context }); } if let Some(ch) = s.chars().next() { if !('1'..='9').contains(&ch) { @@ -692,6 +691,15 @@ pub fn parse_num(s: &str) -> Result { u32::from_str(s).map_err(ParseNumError::StdParse) } +/// Parse a string as a u32, for timelocks or thresholds +pub fn parse_num(s: &str) -> Result { + if s == "0" { + // Special-case 0 since it is the only number which may start with a leading zero. + return Ok(0); + } + parse_num_nonzero(s, "") +} + #[cfg(test)] mod tests { use super::*; @@ -772,6 +780,7 @@ mod tests { #[test] fn test_parse_num() { assert!(parse_num("0").is_ok()); + assert!(parse_num_nonzero("0", "").is_err()); assert!(parse_num("00").is_err()); assert!(parse_num("0000").is_err()); assert!(parse_num("06").is_err()); diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index ce7d168c6..4101ce5e6 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -883,7 +883,7 @@ impl expression::FromTree for Policy { let frag_prob = match frag_prob { None => 1, - Some(s) => expression::parse_num(s) + Some(s) => expression::parse_num_nonzero(s, "fragment probability") .map_err(From::from) .map_err(Error::Parse)? as usize, }; @@ -1231,4 +1231,11 @@ mod tests { // This implicitly tests the check_timelocks API (has height and time locks). let _ = Policy::::from_str("and(after(10),after(500000000))").unwrap(); } + + #[test] + fn parse_zero_disjunction() { + "or(0@pk(09),0@TRIVIAL)" + .parse::>() + .unwrap_err(); + } } From acd78c1b4d43b979584e0dc1b1ce245346aa4504 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 11 May 2025 14:14:38 +0000 Subject: [PATCH 5/8] compiler: revert malleability check to simpler version In the compiler we have a function which inserts the "cast closure" of an expression. It accepts the expression as a candidate as well as a large list of cast-wrapped variants of the expression. For each candidate it rejects it if it is consensus-invalid or has malleable satisfactions. In the original compiler (PR #29) it would decide whether an expression was malleable by checking ty.mall.non_malleable on the miniscript. It had the confusing comment "return malleable types directly" which I believe was supposed to mean "return early if the node is malleable". The comment also observes that "If a elem is malleable, all the casts to it are also going to be malleable" but the code doesn't actually use this fact anywhere. It always tries all the casts. But ok, whatever. Later in #97 (add context to Miniscript) we weakened the malleability check in a bizarre way -- now it checks that the node is malleable according to the type system AND that it is non-malleable according to the context system. That is, we whitelist or_i and d: as "acceptable malleability" in a legacy/bare context. This change, which was not commented on in the original PR, seems like it's just totally wrong. I suspect it was supposed to be an or: if the node is malleable according to the type system OR according to the contextual checks, then skip it. But I'm unsure. (The check is also wrong because the contextual check is wrong; see the last commit.) Anyway, after some fairly heavy fuzzing I was unable to find any instance where this check affects compiler output. I suspect that it's merely an optimization. So change it back to the stronger, simpler check and update the comment. --- src/policy/compiler.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 2d97b6886..f8806f6ea 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -674,9 +674,10 @@ fn insert_elem( sat_prob: f64, dissat_prob: Option, ) -> bool { - // return malleable types directly. If a elem is malleable under current context, - // all the casts to it are also going to be malleable - if !elem.ms.ty.mall.non_malleable && Ctx::check_terminal_non_malleable(&elem.ms.node).is_ok() { + // We check before compiling that non-malleable satisfactions exist, and it appears that + // there are no cases when malleable satisfactions beat non-malleable ones (and if there + // are, we don't want to use them). Anyway, detect these and early return. + if !elem.ms.ty.mall.non_malleable { return false; } From 342b2b44b749653a70af47a78effd3ae8c7172a5 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sat, 17 May 2025 14:29:06 +0000 Subject: [PATCH 6/8] miniscript: add infallible multi() and multi_a() constructors These can't be constfs but they're still useful. --- src/miniscript/mod.rs | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 6508ffa25..9dc964fa5 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -52,10 +52,10 @@ mod ms_tests; mod private { use core::marker::PhantomData; - use super::types::{ExtData, Type}; + use super::limits::{MAX_PUBKEYS_IN_CHECKSIGADD, MAX_PUBKEYS_PER_MULTISIG}; + use super::types::{self, ExtData, Type}; use crate::iter::TreeLike as _; pub use crate::miniscript::context::ScriptContext; - use crate::miniscript::types; use crate::prelude::sync::Arc; use crate::{AbsLockTime, Error, MiniscriptKey, RelLockTime, Terminal, MAX_RECURSION_DEPTH}; @@ -270,6 +270,28 @@ mod private { } } + // non-const because Thresh::n is not becasue Vec::len is not (needs Rust 1.87) + /// The `multi` combinator. + pub fn multi(thresh: crate::Threshold) -> Self { + Self { + ty: types::Type::multi(), + ext: types::extra_props::ExtData::multi(thresh.k(), thresh.n()), + node: Terminal::Multi(thresh), + phantom: PhantomData, + } + } + + // non-const because Thresh::n is not becasue Vec::len is not + /// The `multi` combinator. + pub fn multi_a(thresh: crate::Threshold) -> Self { + Self { + ty: types::Type::multi_a(), + ext: types::extra_props::ExtData::multi_a(thresh.k(), thresh.n()), + node: Terminal::MultiA(thresh), + phantom: PhantomData, + } + } + /// Add type information(Type and Extdata) to Miniscript based on /// `AstElem` fragment. Dependent on display and clone because of Error /// Display code of type_check. From 385234ca22954160e87043cf7219116b4265ea2c Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sat, 17 May 2025 14:12:37 +0000 Subject: [PATCH 7/8] compiler: use Miniscript terminal constructors directly This eliminates some expect()s and is plausibly faster. --- src/policy/compiler.rs | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index f8806f6ea..e9042563f 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -520,11 +520,8 @@ impl AstElemExt { } impl AstElemExt { - fn terminal(ast: Terminal) -> AstElemExt { - AstElemExt { - comp_ext_data: CompilerExtData::type_check(&ast), - ms: Arc::new(Miniscript::from_ast(ast).expect("Terminal creation must always succeed")), - } + fn terminal(ms: Miniscript) -> AstElemExt { + AstElemExt { comp_ext_data: CompilerExtData::type_check(ms.as_inner()), ms: Arc::new(ms) } } fn binary( @@ -817,29 +814,29 @@ where match *policy { Concrete::Unsatisfiable => { - insert_wrap!(AstElemExt::terminal(Terminal::False)); + insert_wrap!(AstElemExt::terminal(Miniscript::FALSE)); } Concrete::Trivial => { - insert_wrap!(AstElemExt::terminal(Terminal::True)); + insert_wrap!(AstElemExt::terminal(Miniscript::TRUE)); } Concrete::Key(ref pk) => { - insert_wrap!(AstElemExt::terminal(Terminal::PkH(pk.clone()))); - insert_wrap!(AstElemExt::terminal(Terminal::PkK(pk.clone()))); + insert_wrap!(AstElemExt::terminal(Miniscript::pk_h(pk.clone()))); + insert_wrap!(AstElemExt::terminal(Miniscript::pk_k(pk.clone()))); } - Concrete::After(n) => insert_wrap!(AstElemExt::terminal(Terminal::After(n))), - Concrete::Older(n) => insert_wrap!(AstElemExt::terminal(Terminal::Older(n))), + Concrete::After(n) => insert_wrap!(AstElemExt::terminal(Miniscript::after(n))), + Concrete::Older(n) => insert_wrap!(AstElemExt::terminal(Miniscript::older(n))), Concrete::Sha256(ref hash) => { - insert_wrap!(AstElemExt::terminal(Terminal::Sha256(hash.clone()))) + insert_wrap!(AstElemExt::terminal(Miniscript::sha256(hash.clone()))) } // Satisfaction-cost + script-cost Concrete::Hash256(ref hash) => { - insert_wrap!(AstElemExt::terminal(Terminal::Hash256(hash.clone()))) + insert_wrap!(AstElemExt::terminal(Miniscript::hash256(hash.clone()))) } Concrete::Ripemd160(ref hash) => { - insert_wrap!(AstElemExt::terminal(Terminal::Ripemd160(hash.clone()))) + insert_wrap!(AstElemExt::terminal(Miniscript::ripemd160(hash.clone()))) } Concrete::Hash160(ref hash) => { - insert_wrap!(AstElemExt::terminal(Terminal::Hash160(hash.clone()))) + insert_wrap!(AstElemExt::terminal(Miniscript::hash160(hash.clone()))) } Concrete::And(ref subs) => { assert_eq!(subs.len(), 2, "and takes 2 args"); @@ -859,7 +856,7 @@ where let mut zero_comp = BTreeMap::new(); zero_comp.insert( CompilationKey::from_type(Type::FALSE, ExtData::FALSE.has_free_verify, dissat_prob), - AstElemExt::terminal(Terminal::False), + AstElemExt::terminal(Miniscript::FALSE), ); compile_tern!(&mut left, &mut q_zero_right, &mut zero_comp, [1.0, 0.0]); compile_tern!(&mut right, &mut q_zero_left, &mut zero_comp, [1.0, 0.0]); @@ -1046,12 +1043,12 @@ where match Ctx::sig_type() { SigType::Schnorr => { if let Ok(pk_thresh) = pk_thresh.set_maximum() { - insert_wrap!(AstElemExt::terminal(Terminal::MultiA(pk_thresh))) + insert_wrap!(AstElemExt::terminal(Miniscript::multi_a(pk_thresh))) } } SigType::Ecdsa => { if let Ok(pk_thresh) = pk_thresh.set_maximum() { - insert_wrap!(AstElemExt::terminal(Terminal::Multi(pk_thresh))) + insert_wrap!(AstElemExt::terminal(Miniscript::multi(pk_thresh))) } } } From c317cd7d0cf6089fea33949a145fe32e80d7fd99 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sat, 10 May 2025 15:38:19 +0000 Subject: [PATCH 8/8] compiler: pull non-binary argument errors from PolicyError into CompilerError The limitation that we have only binary and()s and or()s is really just a compiler limit; it's neither needed nor enforced when parsing policies, printing them, lifting them, etc etc. Maybe we _want_ to enforce this at the type level, but we don't now, and given that, these error variants are in the wrong place (and will get in the way of the refactors in the next commit). --- src/policy/compiler.rs | 14 +++++++++++- src/policy/concrete.rs | 52 ++++++++++++++++++++---------------------- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index e9042563f..9775320d1 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -41,6 +41,10 @@ impl Ord for OrdF64 { /// Detailed error type for compiler. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub enum CompilerError { + /// `And` fragments only support two args. + NonBinaryArgAnd, + /// `Or` fragments only support two args. + NonBinaryArgOr, /// Compiler has non-safe input policy. TopLevelNonSafe, /// Non-Malleable compilation does exists for the given sub-policy. @@ -67,6 +71,12 @@ pub enum CompilerError { impl fmt::Display for CompilerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { + CompilerError::NonBinaryArgAnd => { + f.write_str("And policy fragment must take 2 arguments") + } + CompilerError::NonBinaryArgOr => { + f.write_str("Or policy fragment must take 2 arguments") + } CompilerError::TopLevelNonSafe => { f.write_str("Top Level script is not safe on some spendpath") } @@ -93,7 +103,9 @@ impl error::Error for CompilerError { use self::CompilerError::*; match self { - TopLevelNonSafe + NonBinaryArgAnd + | NonBinaryArgOr + | TopLevelNonSafe | ImpossibleNonMalleableCompilation | LimitsExceeded | NoInternalKey diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 4101ce5e6..01ac117a2 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -72,10 +72,6 @@ pub enum Policy { /// Detailed error type for concrete policies. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub enum PolicyError { - /// `And` fragments only support two args. - NonBinaryArgAnd, - /// `Or` fragments only support two args. - NonBinaryArgOr, /// Cannot lift policies that have a combination of height and timelocks. HeightTimelockCombination, /// Duplicate Public Keys. @@ -100,10 +96,6 @@ pub enum DescriptorCtx { impl fmt::Display for PolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - PolicyError::NonBinaryArgAnd => { - f.write_str("And policy fragment must take 2 arguments") - } - PolicyError::NonBinaryArgOr => f.write_str("Or policy fragment must take 2 arguments"), PolicyError::HeightTimelockCombination => { f.write_str("Cannot lift policies that have a heightlock and timelock combination") } @@ -118,7 +110,7 @@ impl error::Error for PolicyError { use self::PolicyError::*; match self { - NonBinaryArgAnd | NonBinaryArgOr | HeightTimelockCombination | DuplicatePubKeys => None, + HeightTimelockCombination | DuplicatePubKeys => None, } } } @@ -157,6 +149,26 @@ impl<'p, Pk: MiniscriptKey> Iterator for TapleafProbabilityIter<'p, Pk> { } impl Policy { + #[cfg(feature = "compiler")] + fn check_binary_ops(&self) -> Result<(), CompilerError> { + for policy in self.pre_order_iter() { + match *policy { + Policy::And(ref subs) => { + if subs.len() != 2 { + return Err(CompilerError::NonBinaryArgAnd); + } + } + Policy::Or(ref subs) => { + if subs.len() != 2 { + return Err(CompilerError::NonBinaryArgOr); + } + } + _ => {} + } + } + Ok(()) + } + /// Flattens the [`Policy`] tree structure into an iterator of tuples `(leaf script, leaf probability)` /// with leaf probabilities corresponding to odds for each sub-branch in the policy. /// We calculate the probability of selecting the sub-branch at every level and calculate the @@ -223,6 +235,7 @@ impl Policy { #[cfg(feature = "compiler")] pub fn compile_tr(&self, unspendable_key: Option) -> Result, CompilerError> { self.is_valid().map_err(CompilerError::PolicyError)?; + self.check_binary_ops()?; match self.is_safe_nonmalleable() { (false, _) => Err(CompilerError::TopLevelNonSafe), (_, false) => Err(CompilerError::ImpossibleNonMalleableCompilation), @@ -286,6 +299,7 @@ impl Policy { unspendable_key: Option, ) -> Result, Error> { self.is_valid().map_err(Error::ConcretePolicy)?; + self.check_binary_ops()?; match self.is_safe_nonmalleable() { (false, _) => Err(Error::from(CompilerError::TopLevelNonSafe)), (_, false) => Err(Error::from(CompilerError::ImpossibleNonMalleableCompilation)), @@ -339,6 +353,7 @@ impl Policy { desc_ctx: DescriptorCtx, ) -> Result, Error> { self.is_valid().map_err(Error::ConcretePolicy)?; + self.check_binary_ops()?; match self.is_safe_nonmalleable() { (false, _) => Err(Error::from(CompilerError::TopLevelNonSafe)), (_, false) => Err(Error::from(CompilerError::ImpossibleNonMalleableCompilation)), @@ -364,6 +379,7 @@ impl Policy { #[cfg(feature = "compiler")] pub fn compile(&self) -> Result, CompilerError> { self.is_valid()?; + self.check_binary_ops()?; match self.is_safe_nonmalleable() { (false, _) => Err(CompilerError::TopLevelNonSafe), (_, false) => Err(CompilerError::ImpossibleNonMalleableCompilation), @@ -684,26 +700,8 @@ impl Policy { /// Validity condition also checks whether there is a possible satisfaction /// combination of timelocks and heightlocks pub fn is_valid(&self) -> Result<(), PolicyError> { - use Policy::*; - self.check_timelocks()?; self.check_duplicate_keys()?; - - for policy in self.pre_order_iter() { - match *policy { - And(ref subs) => { - if subs.len() != 2 { - return Err(PolicyError::NonBinaryArgAnd); - } - } - Or(ref subs) => { - if subs.len() != 2 { - return Err(PolicyError::NonBinaryArgOr); - } - } - _ => {} - } - } Ok(()) }