From 1430acca38ac55f0334c71b3a91671df69f106f7 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Wed, 11 Mar 2026 10:47:30 +0100 Subject: [PATCH 01/11] feat: add deadline.rs --- Cargo.lock | 669 ++++++++++++++++++------------------ crates/core/Cargo.toml | 13 +- crates/core/src/deadline.rs | 563 ++++++++++++++++++++++++++++++ crates/core/src/lib.rs | 3 + 4 files changed, 918 insertions(+), 330 deletions(-) create mode 100644 crates/core/src/deadline.rs diff --git a/Cargo.lock b/Cargo.lock index aace3e87..7373e0d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4973038846323e4e69a433916522195dce2947770076c03078fc21c80ea0f1c4" +checksum = "07dc44b606f29348ce7c127e7f872a6d2df3cfeff85b7d6bba62faca75112fdd" dependencies = [ "alloy-consensus", "alloy-contract", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.2.30" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f374d3c6d729268bbe2d0e0ff992bb97898b2df756691a62ee1d5f0506bc39" +checksum = "6d9d22005bf31b018f31ef9ecadb5d2c39cf4f6acc8db0456f72c815f3d7f757" dependencies = [ "alloy-primitives", "num_enum", @@ -109,9 +109,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c0dc44157867da82c469c13186015b86abef209bf0e41625e4b68bac61d728" +checksum = "4e4ff99651d46cef43767b5e8262ea228cd05287409ccb0c947cc25e70a952f9" dependencies = [ "alloy-eips", "alloy-primitives", @@ -136,9 +136,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4cdb42df3871cd6b346d6a938ec2ba69a9a0f49d1f82714bc5c48349268434" +checksum = "1a0701b0eda8051a2398591113e7862f807ccdd3315d0b441f06c2a0865a379b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -150,9 +150,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca63b7125a981415898ffe2a2a696c83696c9c6bdb1671c8a912946bbd8e49e7" +checksum = "f3c83c7a3c4e1151e8cac383d0a67ddf358f37e5ea51c95a1283d897c9de0a5a" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "alloy-eip7928" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3231de68d5d6e75332b7489cfcc7f4dfabeba94d990a10e4b923af0e6623540" +checksum = "f8222b1d88f9a6d03be84b0f5e76bb60cd83991b43ad8ab6477f0e4a7809b98d" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f7ef09f21bd1e9cb8a686f168cb4a206646804567f0889eadb8dcc4c9288c8" +checksum = "def1626eea28d48c6cc0a6f16f34d4af0001906e4f889df6c660b39c86fd044d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c9cf3b99f46615fbf7dc1add0c96553abb7bf88fc9ec70dfbe7ad0b47ba7fe8" +checksum = "55d9d1aba3f914f0e8db9e4616ae37f3d811426d95bdccf44e47d0605ab202f6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -302,9 +302,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff42cd777eea61f370c0b10f2648a1c81e0b783066cd7269228aa993afd487f7" +checksum = "e57586581f2008933241d16c3e3f633168b3a5d2738c5c42ea5246ec5e0ef17a" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -317,9 +317,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbca04f9b410fdc51aaaf88433cbac761213905a65fe832058bcf6690585762" +checksum = "3b36c2a0ed74e48851f78415ca5b465211bd678891ba11e88fee09eac534bab1" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d6d15e069a8b11f56bef2eccbad2a873c6dd4d4c81d04dda29710f5ea52f04" +checksum = "636c8051da58802e757b76c3b65af610b95799f72423dc955737dec73de234fd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -383,9 +383,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d181c8cc7cf4805d7e589bf4074d56d55064fa1a979f005a45a62b047616d870" +checksum = "b3dd56e2eafe8b1803e325867ac2c8a4c73c9fb5f341ffd8347f9344458c5922" dependencies = [ "alloy-chains", "alloy-consensus", @@ -439,14 +439,14 @@ checksum = "ce8849c74c9ca0f5a03da1c865e3eb6f768df816e67dd3721a398a8a7e398011" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] name = "alloy-rpc-client" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2792758a93ae32a32e9047c843d536e1448044f78422d71bf7d7c05149e103f" +checksum = "91577235d341a1bdbee30a463655d08504408a4d51e9f72edbfc5a622829f402" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -467,9 +467,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bdcbf9dfd5eea8bfeb078b1d906da8cd3a39c4d4dbe7a628025648e323611f6" +checksum = "79cff039bf01a17d76c0aace3a3a773d5f895eb4c68baaae729ec9da9e86c99c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -479,9 +479,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd720b63f82b457610f2eaaf1f32edf44efffe03ae25d537632e7d23e7929e1a" +checksum = "73234a141ecce14e2989748c04fcac23deee67a445e2c4c167cfb42d4dacd1b6" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -490,9 +490,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2dc411f13092f237d2bf6918caf80977fc2f51485f9b90cb2a2f956912c8c9" +checksum = "010e101dbebe0c678248907a2545b574a87d078d82c2f6f5d0e8e7c9a6149a10" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -511,9 +511,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" +checksum = "9e6d631f8b975229361d8af7b2c749af31c73b3cf1352f90e144ddb06227105e" dependencies = [ "alloy-primitives", "serde", @@ -522,9 +522,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2425c6f314522c78e8198979c8cbf6769362be4da381d4152ea8eefce383535d" +checksum = "97f40010b5e8f79b70bf163b38cd15f529b18ca88c4427c0e43441ee54e4ed82" dependencies = [ "alloy-primitives", "async-trait", @@ -537,9 +537,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ecb71ee53d8d9c3fa7bac17542c8116ebc7a9726c91b1bf333ec3d04f5a789" +checksum = "9c4ec1cc27473819399a3f0da83bc1cef0ceaac8c1c93997696e46dc74377a58" dependencies = [ "alloy-consensus", "alloy-network", @@ -562,7 +562,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -580,7 +580,7 @@ dependencies = [ "proc-macro2", "quote", "sha3", - "syn 2.0.116", + "syn 2.0.117", "syn-solidity", ] @@ -598,7 +598,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.116", + "syn 2.0.117", "syn-solidity", ] @@ -626,9 +626,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa186e560d523d196580c48bf00f1bf62e63041f28ecf276acc22f8b27bb9f53" +checksum = "a03bb3f02b9a7ab23dacd1822fa7f69aa5c8eefcdcf57fad085e0b8d76fb4334" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa501ad58dd20acddbfebc65b52e60f05ebf97c52fa40d1b35e91f5e2da0ad0e" +checksum = "5ce599598ef8ebe067f3627509358d9faaa1ef94f77f834a7783cd44209ef55c" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -682,14 +682,14 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa0c53e8c1e1ef4d01066b01c737fb62fc9397ab52c6e7bb5669f97d281b9bc" +checksum = "397406cf04b11ca2a48e6f81804c70af3f40a36abf648e11dc7416043eb0834d" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -759,9 +759,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.101" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "ark-ff" @@ -848,7 +848,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -886,7 +886,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -991,7 +991,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "synstructure", ] @@ -1003,7 +1003,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1087,7 +1087,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1098,7 +1098,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1140,7 +1140,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1151,9 +1151,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" dependencies = [ "aws-lc-sys", "zeroize", @@ -1161,9 +1161,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" dependencies = [ "cc", "cmake", @@ -1282,7 +1282,7 @@ checksum = "7b9a5040dce49a7642c97ccb1ae59567098967b5d52c29773f1299a42d23bb39" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1322,12 +1322,6 @@ dependencies = [ "hex-conservative", ] -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.11.0" @@ -1384,7 +1378,7 @@ checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" dependencies = [ "async-stream", "base64 0.22.1", - "bitflags 2.11.0", + "bitflags", "bollard-buildkit-proto", "bollard-stubs", "bytes", @@ -1475,7 +1469,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1498,7 +1492,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1523,9 +1517,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.20.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6f81257d10a0f602a294ae4182251151ff97dbb504ef9afcdda4a64b24d9b4" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "byte-slice-cast" @@ -1550,9 +1544,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.5" +version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" +checksum = "1a0f582957c24870b7bfd12bf562c40b4734b533cafbaf8ded31d6d85f462c01" dependencies = [ "blst", "cc", @@ -1652,9 +1646,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "js-sys", @@ -1704,9 +1698,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.59" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5caf74d17c3aec5495110c34cc3f78644bfa89af6c8993ed4de2790e49b6499" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -1714,9 +1708,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.59" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370daa45065b80218950227371916a1633217ae42b2715b2287b606dcd618e24" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -1733,7 +1727,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -1784,9 +1778,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" +checksum = "531185e432bb31db1ecda541e9e7ab21468d4d844ad7505e0546a49b4945d49b" dependencies = [ "cfg-if", "cpufeatures", @@ -2027,7 +2021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2074,7 +2068,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2118,7 +2112,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2133,7 +2127,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2146,7 +2140,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2157,7 +2151,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2168,7 +2162,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2179,7 +2173,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2219,7 +2213,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2267,9 +2261,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", "serde_core", @@ -2305,7 +2299,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.116", + "syn 2.0.117", "unicode-xid", ] @@ -2338,7 +2332,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2418,7 +2412,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2478,7 +2472,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2498,7 +2492,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2818,7 +2812,7 @@ checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -2921,20 +2915,20 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 6.0.0", "wasip2", "wasip3", ] @@ -2955,7 +2949,7 @@ version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b88256088d75a56f8ecfa070513a775dd9107f6530ef14919dac831af9cfe2b" dependencies = [ - "bitflags 2.11.0", + "bitflags", "libc", "libgit2-sys", "log", @@ -3353,8 +3347,8 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.2", - "system-configuration 0.7.0", + "socket2 0.6.3", + "system-configuration", "tokio", "tower-service", "tracing", @@ -3516,19 +3510,19 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.10.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" +checksum = "c0a05c691e1fae256cf7013d99dad472dc52d5543322761f83ec8d47eab40d2b" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] name = "if-watch" -version = "3.2.1" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +checksum = "71c02a5161c313f0cbdbadc511611893584a10a7b6153cb554bdf83ddce99ec2" dependencies = [ "async-io", "core-foundation 0.9.4", @@ -3542,9 +3536,9 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "system-configuration 0.6.1", + "system-configuration", "tokio", - "windows 0.53.0", + "windows 0.62.2", ] [[package]] @@ -3585,7 +3579,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -3634,9 +3628,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" @@ -3721,9 +3715,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", @@ -3777,9 +3771,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.182" +version = "0.2.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" [[package]] name = "libgit2-sys" @@ -4328,7 +4322,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -4342,7 +4336,7 @@ dependencies = [ "if-watch", "libc", "libp2p-core", - "socket2 0.6.2", + "socket2 0.6.3", "tokio", "tracing", ] @@ -4507,25 +4501,26 @@ dependencies = [ "thiserror 2.0.18", "tracing", "yamux 0.12.1", - "yamux 0.13.8", + "yamux 0.13.9", ] [[package]] name = "libredox" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" dependencies = [ - "bitflags 2.11.0", + "bitflags", "libc", - "redox_syscall 0.7.1", + "plain", + "redox_syscall 0.7.3", ] [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" dependencies = [ "cc", "libc", @@ -4535,9 +4530,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -4593,7 +4588,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -4604,7 +4599,7 @@ checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -4673,9 +4668,9 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.13" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ac832c50ced444ef6be0767a008b02c106a909ba79d1d830501e94b96f6b7e" +checksum = "85f8024e1c8e71c778968af91d43700ce1d11b219d127d79fb2934153b82b42b" dependencies = [ "crossbeam-channel", "crossbeam-epoch", @@ -4769,46 +4764,30 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +checksum = "3463cbb78394cb0141e2c926b93fc2197e473394b761986eca3b9da2c63ae0f4" dependencies = [ - "anyhow", - "byteorder", - "netlink-packet-utils", + "paste", ] [[package]] name = "netlink-packet-route" -version = "0.17.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +checksum = "4ce3636fa715e988114552619582b530481fd5ef176a1e5c1bf024077c2c9445" dependencies = [ - "anyhow", - "bitflags 1.3.2", - "byteorder", + "bitflags", "libc", + "log", "netlink-packet-core", - "netlink-packet-utils", -] - -[[package]] -name = "netlink-packet-utils" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" -dependencies = [ - "anyhow", - "byteorder", - "paste", - "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.11.5" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +checksum = "b65d130ee111430e47eed7896ea43ca693c387f097dd97376bffafbf25812128" dependencies = [ "bytes", "futures", @@ -4833,12 +4812,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.4" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 1.3.2", + "bitflags", "cfg-if", + "cfg_aliases", "libc", ] @@ -4984,7 +4964,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5044,7 +5024,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_with", - "syn 2.0.116", + "syn 2.0.117", "thiserror 2.0.18", "uuid", "validator", @@ -5093,7 +5073,7 @@ version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.11.0", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -5110,7 +5090,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5178,7 +5158,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5232,7 +5212,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5310,29 +5290,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "pin-utils" @@ -5356,6 +5336,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "plotters" version = "0.3.7" @@ -5492,17 +5478,22 @@ dependencies = [ "cancellation", "chrono", "crossbeam", + "futures", "hex", "libp2p", "pluto-build-proto", + "pluto-eth2api", "prost 0.14.3", "prost-types 0.14.3", "rand 0.8.5", "regex", "serde", "serde_json", + "test-case", "thiserror 2.0.18", "tokio", + "tokio-util", + "tracing", ] [[package]] @@ -5795,7 +5786,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5820,11 +5811,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", + "toml_edit 0.25.4+spec-1.1.0", ] [[package]] @@ -5846,7 +5837,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5878,7 +5869,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5889,7 +5880,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.11.0", + "bitflags", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -5935,7 +5926,7 @@ dependencies = [ "prost 0.14.3", "prost-types 0.14.3", "regex", - "syn 2.0.116", + "syn 2.0.117", "tempfile", ] @@ -5949,7 +5940,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -5962,7 +5953,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -6013,9 +6004,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.39.1" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd58c6a1fc307e1092aa0bb23d204ca4d1f021764142cd0424dccc84d2d5d106" +checksum = "958f21e8e7ceb5a1aa7fa87fab28e7c75976e0bfe7e23ff069e0a260f894067d" dependencies = [ "memchr", "serde", @@ -6035,7 +6026,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.2", + "socket2 0.6.3", "thiserror 2.0.18", "tokio", "tracing", @@ -6044,9 +6035,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "aws-lc-rs", "bytes", @@ -6073,16 +6064,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.2", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.44" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" dependencies = [ "proc-macro2", ] @@ -6093,6 +6084,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "radium" version = "0.7.0" @@ -6213,9 +6210,9 @@ dependencies = [ [[package]] name = "rapidhash" -version = "4.4.0" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111325c42c4bafae99e777cd77b40dea9a2b30c69e9d8c74b6eccd7fba4337de" +checksum = "b5e48930979c155e2f33aa36ab3119b5ee81332beb6482199a8ecd6029b80b59" dependencies = [ "rustversion", ] @@ -6259,16 +6256,16 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.11.0", + "bitflags", ] [[package]] name = "redox_syscall" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" dependencies = [ - "bitflags 2.11.0", + "bitflags", ] [[package]] @@ -6288,7 +6285,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -6316,9 +6313,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "reqwest" @@ -6448,15 +6445,15 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.13.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +checksum = "4b960d5d873a75b5be9761b1e73b146f52dddcd27bac75263f40fba686d4d7b5" dependencies = [ - "futures", + "futures-channel", + "futures-util", "log", "netlink-packet-core", "netlink-packet-route", - "netlink-packet-utils", "netlink-proto", "netlink-sys", "nix", @@ -6539,11 +6536,11 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.11.0", + "bitflags", "errno", "libc", "linux-raw-sys", @@ -6552,9 +6549,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.36" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ "aws-lc-rs", "log", @@ -6778,11 +6775,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.11.0", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -6791,9 +6788,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.16.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -6869,7 +6866,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -6905,7 +6902,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -6931,9 +6928,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" dependencies = [ "base64 0.22.1", "chrono", @@ -6950,14 +6947,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7110,12 +7107,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -7170,7 +7167,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7181,7 +7178,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7202,7 +7199,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7224,9 +7221,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.116" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -7242,7 +7239,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7262,7 +7259,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7279,24 +7276,13 @@ dependencies = [ "windows 0.57.0", ] -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags 2.11.0", - "core-foundation 0.9.4", - "system-configuration-sys", -] - [[package]] name = "system-configuration" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.11.0", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -7325,12 +7311,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.25.0" +version = "3.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" dependencies = [ "fastrand", - "getrandom 0.4.1", + "getrandom 0.4.2", "once_cell", "rustix", "windows-sys 0.61.2", @@ -7354,7 +7340,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7365,7 +7351,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "test-case-core", ] @@ -7425,7 +7411,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7436,7 +7422,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7525,9 +7511,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.49.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" dependencies = [ "bytes", "libc", @@ -7535,20 +7521,20 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.2", + "socket2 0.6.3", "tokio-macros", "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7619,9 +7605,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.5+spec-1.1.0" +version = "1.0.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" dependencies = [ "serde_core", ] @@ -7642,12 +7628,12 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.10+spec-1.0.0" +version = "0.25.4+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" dependencies = [ "indexmap 2.13.0", - "toml_datetime 0.7.5+spec-1.1.0", + "toml_datetime 1.0.0+spec-1.1.0", "toml_parser", "winnow", ] @@ -7669,9 +7655,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f32a6f80051a4111560201420c7885d0082ba9efe2ab61875c587bb6b18b9a0" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "axum", @@ -7686,7 +7672,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.2", + "socket2 0.6.3", "sync_wrapper", "tokio", "tokio-stream", @@ -7698,9 +7684,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f86539c0089bfd09b1f8c0ab0239d80392af74c21bc9e0f15e1b4aca4c1647f" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost 0.14.3", @@ -7732,7 +7718,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.11.0", + "bitflags", "bytes", "futures-util", "http", @@ -7776,7 +7762,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -7871,7 +7857,7 @@ dependencies = [ "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8053,11 +8039,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37" dependencies = [ - "getrandom 0.4.1", + "getrandom 0.4.2", "js-sys", "serde_core", "wasm-bindgen", @@ -8090,7 +8076,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8145,7 +8131,7 @@ source = "git+https://github.com/matter-labs/vise?rev=73c654303d8190023cf30034d6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8208,9 +8194,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -8221,9 +8207,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" dependencies = [ "cfg-if", "futures-util", @@ -8235,9 +8221,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8245,22 +8231,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] @@ -8306,7 +8292,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags 2.11.0", + "bitflags", "hashbrown 0.15.5", "indexmap 2.13.0", "semver 1.0.27", @@ -8328,9 +8314,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -8412,32 +8398,33 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.53.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core 0.53.0", + "windows-core 0.57.0", "windows-targets 0.52.6", ] [[package]] name = "windows" -version = "0.57.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" dependencies = [ - "windows-core 0.57.0", - "windows-targets 0.52.6", + "windows-collections", + "windows-core 0.62.2", + "windows-future", + "windows-numerics", ] [[package]] -name = "windows-core" -version = "0.53.0" +name = "windows-collections" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" dependencies = [ - "windows-result 0.1.2", - "windows-targets 0.52.6", + "windows-core 0.62.2", ] [[package]] @@ -8465,6 +8452,17 @@ dependencies = [ "windows-strings", ] +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core 0.62.2", + "windows-link", + "windows-threading", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -8473,7 +8471,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8484,7 +8482,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8495,7 +8493,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8506,7 +8504,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -8515,6 +8513,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link", +] + [[package]] name = "windows-registry" version = "0.6.1" @@ -8661,6 +8669,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -8843,9 +8860,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" dependencies = [ "memchr", ] @@ -8913,7 +8930,7 @@ dependencies = [ "heck", "indexmap 2.13.0", "prettyplease", - "syn 2.0.116", + "syn 2.0.117", "wasm-metadata", "wit-bindgen-core", "wit-component", @@ -8929,7 +8946,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "wit-bindgen-core", "wit-bindgen-rust", ] @@ -8941,7 +8958,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags 2.11.0", + "bitflags", "indexmap 2.13.0", "log", "serde", @@ -9057,9 +9074,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.8" +version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deab71f2e20691b4728b349c6cee8fc7223880fa67b6b4f92225ec32225447e5" +checksum = "c650efd29044140aa63caaf80129996a9e2659a2ab7045a7e061807d02fc8549" dependencies = [ "futures", "log", @@ -9099,28 +9116,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "96e13bc581734df6250836c59a5f44f3c57db9f9acb9dc8e3eaabdaf6170254d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "3545ea9e86d12ab9bba9fcd99b54c1556fd3199007def5a03c375623d05fac1c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -9140,7 +9157,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", "synstructure", ] @@ -9161,7 +9178,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] @@ -9194,7 +9211,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.116", + "syn 2.0.117", ] [[package]] diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 924b1860..0d511157 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -10,15 +10,19 @@ publish.workspace = true cancellation.workspace = true chrono.workspace = true crossbeam.workspace = true +futures.workspace = true hex.workspace = true +libp2p.workspace = true +pluto-eth2api.workspace = true +prost.workspace = true +prost-types.workspace = true +regex.workspace = true serde.workspace = true serde_json.workspace = true thiserror.workspace = true tokio.workspace = true -libp2p.workspace = true -regex.workspace = true -prost.workspace = true -prost-types.workspace = true +tokio-util.workspace = true +tracing.workspace = true [dev-dependencies] rand.workspace = true @@ -27,6 +31,7 @@ prost.workspace = true prost-types.workspace = true hex.workspace = true chrono.workspace = true +test-case.workspace = true [build-dependencies] pluto-build-proto.workspace = true diff --git a/crates/core/src/deadline.rs b/crates/core/src/deadline.rs new file mode 100644 index 00000000..ac89c622 --- /dev/null +++ b/crates/core/src/deadline.rs @@ -0,0 +1,563 @@ +//! Duty deadline tracking and notification functionality. +//! +//! This module provides the [`Deadliner`] trait for tracking duty deadlines +//! and notifying when duties expire. It implements a background task that +//! manages timers for multiple duties and sends expired duties to a channel. +//! +//! # Example +//! +//! ```no_run +//! use chrono::{DateTime, Utc}; +//! use pluto_core::{ +//! deadline::{DeadlineFunc, new_deadliner}, +//! types::{Duty, DutyType, SlotNumber}, +//! }; +//! use std::sync::Arc; +//! use tokio_util::sync::CancellationToken; +//! +//! # async fn example() { +//! let cancel_token = CancellationToken::new(); +//! +//! // Define a deadline function +//! let deadline_func: DeadlineFunc = Arc::new(|_duty| { +//! let deadline = DateTime::from_timestamp(1000, 0).unwrap(); +//! Ok(Some(deadline)) +//! }); +//! +//! let deadliner = new_deadliner(cancel_token, "example", deadline_func); +//! +//! // Add a duty +//! let duty = Duty::new_attester_duty(SlotNumber::new(1)); +//! let added = deadliner.add(duty).await; +//! +//! // Receive expired duties +//! if let Some(mut rx) = deadliner.c() { +//! while let Some(expired_duty) = rx.recv().await { +//! println!("Duty expired: {}", expired_duty); +//! } +//! } +//! # } +//! ``` +use crate::types::{Duty, DutyType, SlotNumber}; +use chrono::{DateTime, Utc}; +use futures::future::BoxFuture; +use pluto_eth2api::{EthBeaconNodeApiClient, EthBeaconNodeApiClientError}; +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; +use futures::future::FutureExt; +use tokio_util::sync::CancellationToken; + +/// Fraction of slot duration to use as a margin for network delays. +const MARGIN_FACTOR: i32 = 12; + +/// Type alias for the deadline function. +/// +/// Takes a duty and returns an optional deadline. +/// Returns `Ok(Some(deadline))` if the duty expires at the given time. +/// Returns `Ok(None)` if the duty never expires. +pub type DeadlineFunc = Arc Result>> + Send + Sync>; + +/// Error types for deadline operations. +#[derive(Debug, thiserror::Error)] +pub enum DeadlineError { + /// Failed to fetch genesis time from beacon node. + #[error("Failed to fetch genesis time: {0}")] + FetchGenesisTime(#[from] EthBeaconNodeApiClientError), + + /// Deadliner has been shut down. + #[error("Deadliner has been shut down")] + Shutdown, + + /// Arithmetic overflow in deadline calculation. + #[error("Arithmetic overflow in deadline calculation")] + ArithmeticOverflow, + + /// Duration conversion failed. + #[error("Duration conversion failed")] + DurationConversion, + + /// DateTime calculation failed. + #[error("DateTime calculation failed")] + DateTimeCalculation, +} + +/// Result type for deadline operations. +pub type Result = std::result::Result; + +/// Converts a `std::time::Duration` to `chrono::Duration`. +fn to_chrono_duration(duration: std::time::Duration) -> Result { + chrono::Duration::from_std(duration).map_err(|_| DeadlineError::DurationConversion) +} + +/// Converts seconds (u64) to `chrono::Duration`. +fn secs_to_chrono(secs: u64) -> Result { + let secs_i64 = i64::try_from(secs).map_err(|_| DeadlineError::ArithmeticOverflow)?; + chrono::Duration::try_seconds(secs_i64).ok_or(DeadlineError::DurationConversion) +} + +/// Deadliner provides duty deadline functionality. +/// +/// The `c()` method returns a channel for receiving expired duties. +/// It may only be called once and the returned channel should be used +/// by a single task. Multiple instances are required for different +/// components and use cases. +pub trait Deadliner: Send + Sync { + /// Adds a duty for deadline scheduling. + /// + /// Returns `true` if the duty was added for future deadline scheduling. + /// This method is idempotent and returns `true` if the duty was previously + /// added and still awaits deadline scheduling. + /// + /// Returns `false` if: + /// - The duty has already expired and cannot be scheduled + /// - The duty never expires (e.g., Exit, BuilderRegistration) + fn add(&self, duty: Duty) -> BoxFuture<'_, bool>; + + /// Returns the channel for receiving deadlined duties. + /// + /// This method may only be called once and returns `None` on subsequent + /// calls. The returned channel should only be used by a single task. + fn c(&self) -> Option>; +} + +/// Creates a deadline function from the Ethereum 2.0 beacon node configuration. +/// +/// Fetches genesis time and slot duration from the beacon node and returns +/// a function that calculates deadlines for each duty type. +/// +/// # Errors +/// +/// Returns an error if fetching genesis time or slots config fails. +pub async fn new_duty_deadline_func(client: &EthBeaconNodeApiClient) -> Result { + let genesis_time = client.fetch_genesis_time().await?; + let (slot_duration, _slots_per_epoch) = client.fetch_slots_config().await?; + + // Convert std::time::Duration to chrono::Duration for slot_duration + let slot_duration = to_chrono_duration(slot_duration)?; + + Ok(Arc::new(move |duty: Duty| { + // Exit and BuilderRegistration duties never expire + match duty.duty_type { + DutyType::Exit | DutyType::BuilderRegistration => { + return Ok(None); + } + _ => {} + } + + // Calculate slot start time + // start = genesis_time + (slot * slot_duration) + let slot_secs = duty + .slot + .inner() + .checked_mul( + u64::try_from(slot_duration.num_seconds()) + .map_err(|_| DeadlineError::ArithmeticOverflow)?, + ) + .ok_or(DeadlineError::ArithmeticOverflow)?; + let slot_offset = secs_to_chrono(slot_secs)?; + + let start: DateTime = genesis_time + .checked_add_signed(slot_offset) + .ok_or(DeadlineError::DateTimeCalculation)?; + + // Calculate margin: slot_duration / MARGIN_FACTOR + let margin = slot_duration + .checked_div(MARGIN_FACTOR) + .ok_or(DeadlineError::ArithmeticOverflow)?; + + // Calculate duty-specific duration + let duration = match duty.duty_type { + DutyType::Proposer | DutyType::Randao => { + // duration = slot_duration / 3 + slot_duration + .checked_div(3) + .ok_or(DeadlineError::ArithmeticOverflow)? + } + DutyType::SyncMessage => { + // duration = 2 * slot_duration / 3 + slot_duration + .checked_mul(2) + .and_then(|s| s.checked_div(3)) + .ok_or(DeadlineError::ArithmeticOverflow)? + } + DutyType::Attester | DutyType::Aggregator | DutyType::PrepareAggregator => { + // duration = 2 * slot_duration + // Even though attestations and aggregations are acceptable after 2 slots, + // the rewards are heavily diminished. + slot_duration + .checked_mul(2) + .ok_or(DeadlineError::ArithmeticOverflow)? + } + _ => { + // Default: duration = slot_duration + slot_duration + } + }; + + // Calculate final deadline: start + duration + margin + let deadline = start + .checked_add_signed(duration) + .and_then(|t| t.checked_add_signed(margin)) + .ok_or(DeadlineError::DateTimeCalculation)?; + + Ok(Some(deadline)) + })) +} + +/// Gets the duty with the earliest deadline from the duties map. +/// +/// Returns a tuple of (duty, deadline). If no duties are available, +/// returns a sentinel far-future date (9999-01-01). +fn get_curr_duty(duties: &HashSet, deadline_func: &DeadlineFunc) -> (Duty, DateTime) { + let mut curr_duty = Duty::new(SlotNumber::new(0), DutyType::Unknown); + + // Use far-future sentinel date (9999-01-01) matching Go implementation + // This timestamp is a known constant and will never fail + let mut curr_deadline = + DateTime::from_timestamp(253402300799, 0).unwrap_or(DateTime::::MAX_UTC); + + for duty in duties.iter() { + let Ok(deadline_opt) = deadline_func(duty.clone()) else { + continue; + }; + + // Ignore duties that never expire + let Some(duty_deadline) = deadline_opt else { + continue; + }; + + // Update if this duty has an earlier deadline + if duty_deadline < curr_deadline { + curr_duty = duty.clone(); + curr_deadline = duty_deadline; + } + } + + (curr_duty, curr_deadline) +} + +/// Internal message type for adding duties to the deadliner. +struct DeadlineInput { + duty: Duty, + response_tx: tokio::sync::oneshot::Sender, +} + +/// Implementation of the Deadliner trait. +struct DeadlinerImpl { + cancel_token: CancellationToken, + input_tx: tokio::sync::mpsc::UnboundedSender, + output_rx: Arc>>>, +} + +impl Deadliner for DeadlinerImpl { + fn add(&self, duty: Duty) -> BoxFuture<'_, bool> { + Box::pin(async move { + // Check if shut down + if self.cancel_token.is_cancelled() { + return false; + } + + let (response_tx, response_rx) = tokio::sync::oneshot::channel(); + let input = DeadlineInput { duty, response_tx }; + + // Send the duty to the background task + if self.input_tx.send(input).is_err() { + return false; + } + + // Wait for response + response_rx.await.unwrap_or(false) + }) + } + + fn c(&self) -> Option> { + self.output_rx + .lock() + .ok() + .and_then(|mut guard| guard.take()) + } +} + +/// Clock trait for abstracting time operations. +trait Clock: Send + Sync { + /// Returns the current time. + fn now(&self) -> DateTime; + + /// Creates a sleep future that completes after the given duration. + fn sleep(&self, duration: std::time::Duration) -> BoxFuture<'static, ()>; +} + +/// Real clock implementation using tokio::time. +struct RealClock; + +impl Clock for RealClock { + fn now(&self) -> DateTime { + Utc::now() + } + + fn sleep(&self, duration: std::time::Duration) -> BoxFuture<'static, ()> { + tokio::time::sleep(duration).boxed() + } +} + +impl DeadlinerImpl { + /// Background task that manages duty deadlines. + /// + /// This is an associated function (not a method) because the DeadlinerImpl + /// is immediately wrapped in Arc, preventing mutable access. + async fn run_task( + cancel_token: CancellationToken, + label: String, + deadline_func: DeadlineFunc, + clock: Arc, + mut input_rx: tokio::sync::mpsc::UnboundedReceiver, + output_tx: tokio::sync::mpsc::Sender, + ) { + let mut duties: HashSet = HashSet::new(); + let (mut curr_duty, mut curr_deadline) = get_curr_duty(&duties, &deadline_func); + + // Create initial timer + let now = clock.now(); + let initial_duration = curr_deadline + .signed_duration_since(now) + .to_std() + .unwrap_or(std::time::Duration::ZERO); + let mut timer = clock.sleep(initial_duration); + + loop { + tokio::select! { + biased; + + _ = cancel_token.cancelled() => { + return; + } + + Some(input) = input_rx.recv() => { + let duty = input.duty; + let Ok(deadline_opt) = deadline_func(duty.clone()) else { + let _ = input.response_tx.send(false); + continue; + }; + + // Drop duties that never expire + let Some(deadline) = deadline_opt else { + let _ = input.response_tx.send(false); + continue; + }; + + let now = clock.now(); + let expired = deadline < now; + + let _ = input.response_tx.send(!expired); + + // Ignore expired duties + if expired { + continue; + } + + // Add duty to the map (idempotent) + duties.insert(duty); + + // Update timer if this deadline is earlier + if deadline < curr_deadline { + let (new_duty, new_deadline) = get_curr_duty(&duties, &deadline_func); + curr_duty = new_duty; + curr_deadline = new_deadline; + + let duration = curr_deadline + .signed_duration_since(clock.now()) + .to_std() + .unwrap_or(std::time::Duration::ZERO); + timer = clock.sleep(duration); + } + } + + _ = &mut timer => { + // Deadline expired - send duty to output channel + match output_tx.try_send(curr_duty.clone()) { + Ok(()) => {} + Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { + tracing::warn!( + label = %label, + duty = %curr_duty, + "Deadliner output channel full" + ); + } + Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { + return; + } + } + + // Remove duty from map + duties.remove(&curr_duty); + + // Update to next duty + let (new_duty, new_deadline) = get_curr_duty(&duties, &deadline_func); + curr_duty = new_duty; + curr_deadline = new_deadline; + + let duration = curr_deadline + .signed_duration_since(clock.now()) + .to_std() + .unwrap_or(std::time::Duration::ZERO); + timer = clock.sleep(duration); + } + } + } + } + + /// Internal constructor for creating a deadliner with a specific clock. + fn new_internal( + cancel_token: CancellationToken, + label: impl Into, + deadline_func: DeadlineFunc, + clock: Arc, + ) -> Arc { + const OUTPUT_BUFFER: usize = 10; + + let label = label.into(); + let (input_tx, input_rx) = tokio::sync::mpsc::unbounded_channel(); + let (output_tx, output_rx) = tokio::sync::mpsc::channel(OUTPUT_BUFFER); + + let impl_instance: Arc = Arc::new(DeadlinerImpl { + cancel_token: cancel_token.clone(), + input_tx, + output_rx: Arc::new(Mutex::new(Some(output_rx))), + }); + + // Spawn background task + tokio::spawn(Self::run_task( + cancel_token, + label, + deadline_func, + clock, + input_rx, + output_tx, + )); + + impl_instance + } +} + +/// Creates a new Deadliner instance. +/// +/// Starts a background task that manages duty deadlines and sends expired +/// duties to a channel. The background task runs until the cancellation token +/// is cancelled. +/// +/// # Arguments +/// +/// * `cancel_token` - Token to cancel the background task +/// * `label` - Label for logging purposes +/// * `deadline_func` - Function that calculates deadlines for duties +/// +/// # Returns +/// +/// An Arc-wrapped Deadliner trait object +pub fn new_deadliner( + cancel_token: CancellationToken, + label: impl Into, + deadline_func: DeadlineFunc, +) -> Arc { + DeadlinerImpl::new_internal(cancel_token, label, deadline_func, Arc::new(RealClock)) +} + +/// Creates a new Deadliner instance for testing with a fake clock. +/// +/// This constructor is intended for use in tests where you need to control +/// time progression. +/// +/// # Arguments +/// +/// * `cancel_token` - Token to cancel the background task +/// * `label` - Label for logging purposes +/// * `deadline_func` - Function that calculates deadlines for duties +/// * `clock` - Test clock for controlling time in tests +/// +/// # Returns +/// +/// An Arc-wrapped Deadliner trait object +#[cfg(test)] +fn new_deadliner_for_test( + cancel_token: CancellationToken, + label: impl Into, + deadline_func: DeadlineFunc, + clock: Arc, +) -> Arc { + DeadlinerImpl::new_internal(cancel_token, label, deadline_func, clock) +} + + +/// Fake clock implementation for testing. +#[cfg(test)] +type WakerList = Vec<(DateTime, std::task::Waker)>; + +#[cfg(test)] +struct TestClock { + start: std::sync::Arc>>, + wakers: std::sync::Arc>, +} + +#[cfg(test)] +impl TestClock { + fn new(start: DateTime) -> Self { + Self { + start: std::sync::Arc::new(std::sync::Mutex::new(start)), + wakers: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())), + } + } + + fn advance(&self, duration: std::time::Duration) { + let new_time = { + let mut start = self.start.lock().unwrap(); + let chrono_duration = chrono::Duration::from_std(duration).unwrap(); + *start = start.checked_add_signed(chrono_duration).unwrap(); + *start + }; + + // Wake all timers that have expired + let mut wakers = self.wakers.lock().unwrap(); + let (expired, pending): (Vec<_>, Vec<_>) = wakers + .drain(..) + .partition(|(deadline, _)| *deadline <= new_time); + *wakers = pending; + + // Wake expired futures + for (_, waker) in expired { + waker.wake(); + } + } +} + +#[cfg(test)] +impl Clock for TestClock { + fn now(&self) -> DateTime { + *self.start.lock().unwrap() + } + + fn sleep(&self, duration: std::time::Duration) -> BoxFuture<'static, ()> { + let deadline = self + .now() + .checked_add_signed(chrono::Duration::from_std(duration).unwrap()) + .unwrap(); + let wakers = Arc::clone(&self.wakers); + let start = Arc::clone(&self.start); + + Box::pin(std::future::poll_fn(move |cx| { + let now = *start.lock().unwrap(); + if now >= deadline { + std::task::Poll::Ready(()) + } else { + // Register waker + let mut wakers = wakers.lock().unwrap(); + // Check if this waker is already registered for this deadline + if !wakers.iter().any(|(d, _)| *d == deadline) { + wakers.push((deadline, cx.waker().clone())); + } + std::task::Poll::Pending + } + })) + } +} \ No newline at end of file diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 104b8b1b..d34e1782 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -16,3 +16,6 @@ pub mod corepb; /// Semver version parsing utilities. pub mod version; + +/// Duty deadline tracking and notification. +pub mod deadline; From 364977ef687973733d08edfbb6fb6ceb8eda69e3 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Wed, 11 Mar 2026 14:14:09 +0100 Subject: [PATCH 02/11] feat: deadline tests --- Cargo.lock | 1 + Cargo.toml | 1 + crates/core/Cargo.toml | 1 + crates/core/src/deadline.rs | 379 +++++++++++++++++++++++++++++++++++- 4 files changed, 377 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7373e0d1..c2e65f79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5474,6 +5474,7 @@ dependencies = [ name = "pluto-core" version = "1.7.1" dependencies = [ + "async-trait", "built", "cancellation", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 34b514cc..1f8ce1d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ license = "Apache-2.0" publish = false [workspace.dependencies] +async-trait = "0.1.89" alloy = { version = "1.3", features = ["essentials"] } built = { version = "0.8.0", features = ["git2", "chrono", "cargo-lock"] } blst = "0.3" diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 0d511157..c1f74cbf 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true publish.workspace = true [dependencies] +async-trait.workspace = true cancellation.workspace = true chrono.workspace = true crossbeam.workspace = true diff --git a/crates/core/src/deadline.rs b/crates/core/src/deadline.rs index ac89c622..567113ea 100644 --- a/crates/core/src/deadline.rs +++ b/crates/core/src/deadline.rs @@ -39,14 +39,14 @@ //! # } //! ``` use crate::types::{Duty, DutyType, SlotNumber}; +use async_trait::async_trait; use chrono::{DateTime, Utc}; -use futures::future::BoxFuture; +use futures::future::{BoxFuture, FutureExt}; use pluto_eth2api::{EthBeaconNodeApiClient, EthBeaconNodeApiClientError}; use std::{ collections::HashSet, sync::{Arc, Mutex}, }; -use futures::future::FutureExt; use tokio_util::sync::CancellationToken; /// Fraction of slot duration to use as a margin for network delays. @@ -122,6 +122,35 @@ pub trait Deadliner: Send + Sync { fn c(&self) -> Option>; } +/// Trait for beacon clients that can provide genesis time and slot +/// configuration. +/// +/// This trait abstracts the necessary beacon node API calls for deadline +/// calculation. +#[async_trait] +pub trait BeaconClientForDeadline { + /// Fetches the genesis time from the beacon node. + async fn fetch_genesis_time(&self) -> Result>; + + /// Fetches the slot duration and slots per epoch from the beacon node. + async fn fetch_slots_config(&self) -> Result<(std::time::Duration, u64)>; +} + +#[async_trait] +impl BeaconClientForDeadline for EthBeaconNodeApiClient { + async fn fetch_genesis_time(&self) -> Result> { + self.fetch_genesis_time() + .await + .map_err(DeadlineError::FetchGenesisTime) + } + + async fn fetch_slots_config(&self) -> Result<(std::time::Duration, u64)> { + self.fetch_slots_config() + .await + .map_err(DeadlineError::FetchGenesisTime) + } +} + /// Creates a deadline function from the Ethereum 2.0 beacon node configuration. /// /// Fetches genesis time and slot duration from the beacon node and returns @@ -130,7 +159,9 @@ pub trait Deadliner: Send + Sync { /// # Errors /// /// Returns an error if fetching genesis time or slots config fails. -pub async fn new_duty_deadline_func(client: &EthBeaconNodeApiClient) -> Result { +pub async fn new_duty_deadline_func( + client: &C, +) -> Result { let genesis_time = client.fetch_genesis_time().await?; let (slot_duration, _slots_per_epoch) = client.fetch_slots_config().await?; @@ -489,7 +520,6 @@ fn new_deadliner_for_test( DeadlinerImpl::new_internal(cancel_token, label, deadline_func, clock) } - /// Fake clock implementation for testing. #[cfg(test)] type WakerList = Vec<(DateTime, std::task::Waker)>; @@ -560,4 +590,343 @@ impl Clock for TestClock { } })) } -} \ No newline at end of file +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::SlotNumber; + use test_case::test_case; + + /// Helper function to create expired duties, non-expired duties, and + /// voluntary exits. + fn setup_data() -> (Vec, Vec, Vec) { + let expired_duties = vec![ + Duty::new_attester_duty(SlotNumber::new(1)), + Duty::new_proposer_duty(SlotNumber::new(2)), + Duty::new_randao_duty(SlotNumber::new(3)), + ]; + + let non_expired_duties = vec![ + Duty::new_proposer_duty(SlotNumber::new(1)), + Duty::new_attester_duty(SlotNumber::new(2)), + ]; + + let voluntary_exits = vec![ + Duty::new_voluntary_exit_duty(SlotNumber::new(2)), + Duty::new_voluntary_exit_duty(SlotNumber::new(4)), + ]; + + (expired_duties, non_expired_duties, voluntary_exits) + } + + /// Helper function to add duties to the deadliner and send results to a + /// channel. + async fn add_duties( + duties: Vec, + deadliner: Arc, + result_tx: tokio::sync::mpsc::Sender, + ) { + for duty in duties { + let added = deadliner.add(duty).await; + let _ = result_tx.send(added).await; + } + } + + #[tokio::test] + async fn test_deadliner() { + let (expired_duties, non_expired_duties, voluntary_exits) = setup_data(); + + let start_time = DateTime::from_timestamp(1000, 0).unwrap(); + let clock = Arc::new(TestClock::new(start_time)); + + // Create a deadline function provider + let expired_set: std::collections::HashSet<_> = expired_duties.iter().cloned().collect(); + let deadline_func: DeadlineFunc = { + Arc::new(move |duty: Duty| { + if duty.duty_type == DutyType::Exit { + // Voluntary exits expire after 1 hour + let deadline = start_time + .checked_add_signed(chrono::Duration::try_hours(1).unwrap()) + .ok_or(DeadlineError::DateTimeCalculation)?; + return Ok(Some(deadline)); + } + + if expired_set.contains(&duty) { + // Expired duties have deadline 1 hour in the past + let deadline = start_time + .checked_sub_signed(chrono::Duration::try_hours(1).unwrap()) + .ok_or(DeadlineError::DateTimeCalculation)?; + return Ok(Some(deadline)); + } + + // Non-expired duties expire after duty.slot seconds from start + let deadline = start_time + .checked_add_signed( + chrono::Duration::try_seconds(i64::try_from(duty.slot.inner()).unwrap()) + .unwrap(), + ) + .ok_or(DeadlineError::DateTimeCalculation)?; + Ok(Some(deadline)) + }) + }; + + let cancel_token = CancellationToken::new(); + let deadliner = new_deadliner_for_test( + cancel_token.clone(), + "test", + deadline_func, + Arc::clone(&clock), + ); + + // Get the output receiver + let mut output_rx = deadliner.c().expect("should get receiver"); + + // Separate channels for expired and non-expired results + let (expired_tx, mut expired_rx) = tokio::sync::mpsc::channel(100); + let (non_expired_tx, mut non_expired_rx) = tokio::sync::mpsc::channel(100); + + // Add all duties + let expired_len = expired_duties.len(); + let non_expired_len = non_expired_duties.len(); + let voluntary_exits_len = voluntary_exits.len(); + + let handler_expired = tokio::spawn(add_duties( + expired_duties, + Arc::clone(&deadliner), + expired_tx, + )); + let handler_non_expired = tokio::spawn(add_duties( + non_expired_duties.clone(), + Arc::clone(&deadliner), + non_expired_tx.clone(), + )); + let handler_voluntary_exits = tokio::spawn(add_duties( + voluntary_exits, + Arc::clone(&deadliner), + non_expired_tx, + )); + + // Wait for all handlers to complete + let (result_expired, result_non_expired, result_voluntary_exits) = tokio::join!( + handler_expired, + handler_non_expired, + handler_voluntary_exits + ); + result_expired.unwrap(); + result_non_expired.unwrap(); + result_voluntary_exits.unwrap(); + + for _ in 0..expired_len { + let result = expired_rx.recv().await.expect("should receive result"); + assert!(!result, "expired duties should return false"); + } + + for _ in 0..(non_expired_len.checked_add(voluntary_exits_len).unwrap()) { + let result = non_expired_rx.recv().await.expect("should receive result"); + assert!(result, "non-expired duties should return true"); + } + + // Find max slot from non-expired duties + let max_slot = non_expired_duties + .iter() + .map(|d| d.slot.inner()) + .max() + .unwrap(); + + // Advance clock to trigger deadline of all non-expired duties + clock.advance(std::time::Duration::from_secs(max_slot)); + + // Give the deadliner task time to wake up and process + // We need to yield multiple times to ensure the background task runs + for _ in 0..10 { + tokio::task::yield_now().await; + } + + // Collect expired duties from output channel + let mut actual_duties = Vec::new(); + for _ in 0..non_expired_len { + let duty = tokio::time::timeout(std::time::Duration::from_secs(1), output_rx.recv()) + .await + .expect("should receive within timeout") + .expect("should receive duty"); + actual_duties.push(duty); + } + + // Sort both for comparison + actual_duties.sort_by_key(|d| d.slot.inner()); + let mut expected_duties = non_expired_duties; + expected_duties.sort_by_key(|d| d.slot.inner()); + + assert_eq!(expected_duties, actual_duties); + + cancel_token.cancel(); + } + + #[test_case(DutyType::Exit ; "exit")] + #[test_case(DutyType::BuilderRegistration ; "builder_registration")] + #[tokio::test] + async fn test_never_expire_duties(duty_type: DutyType) { + // Create a simple mock client that returns fixed values + let mock_client = create_mock_client(); + + let deadline_func = new_duty_deadline_func(&mock_client) + .await + .expect("should create deadline func"); + + let duty = Duty::new(SlotNumber::new(100), duty_type); + let result = deadline_func(duty).expect("should compute deadline"); + + assert_eq!(result, None, "duty should never expire"); + } + + // todo: uses hardcode beacon client for testing, should be refactored to use a + // real beacon client (testutils/beaconmock) + #[test_case(DutyType::Proposer ; "proposer")] + #[test_case(DutyType::Attester ; "attester")] + #[test_case(DutyType::Aggregator ; "aggregator")] + #[test_case(DutyType::PrepareAggregator ; "prepare_aggregator")] + #[test_case(DutyType::SyncMessage ; "sync_message")] + #[test_case(DutyType::SyncContribution ; "sync_contribution")] + #[test_case(DutyType::Randao ; "randao")] + #[test_case(DutyType::InfoSync ; "info_sync")] + #[test_case(DutyType::PrepareSyncContribution ; "prepare_sync_contribution")] + #[tokio::test] + async fn test_duty_deadline_durations(duty_type: DutyType) { + let mock_client = create_mock_client(); + + let genesis_time = mock_client.fetch_genesis_time().await.unwrap(); + let (slot_duration, _) = mock_client.fetch_slots_config().await.unwrap(); + + let margin = slot_duration + .checked_div(12) + .expect("margin calculation should not fail"); + + let time_since_genesis = Utc::now().signed_duration_since(genesis_time); + let slot_duration_chrono = to_chrono_duration(slot_duration).unwrap(); + let current_slot = u64::try_from( + time_since_genesis + .num_seconds() + .checked_div(slot_duration_chrono.num_seconds()) + .expect("slot duration should not be zero"), + ) + .expect("current slot should be positive"); + + let slot_start = { + let offset_secs = current_slot + .checked_mul(slot_duration.as_secs()) + .expect("slot offset should not overflow"); + let offset = chrono::Duration::try_seconds( + i64::try_from(offset_secs).expect("offset should fit in i64"), + ) + .expect("offset should be valid duration"); + genesis_time + .checked_add_signed(offset) + .expect("slot start should not overflow") + }; + + let deadline_func = new_duty_deadline_func(&mock_client) + .await + .expect("should create deadline func"); + + // Calculate expected duration based on duty type (matches Go test cases) + let expected_duration = match duty_type { + DutyType::Proposer | DutyType::Randao => { + // slotDuration/3 + margin + slot_duration + .checked_div(3) + .and_then(|d| d.checked_add(margin)) + .expect("duration calculation should not fail") + } + DutyType::Attester | DutyType::Aggregator | DutyType::PrepareAggregator => { + // 2*slotDuration + margin + slot_duration + .checked_mul(2) + .and_then(|d| d.checked_add(margin)) + .expect("duration calculation should not fail") + } + DutyType::SyncMessage => { + // 2*slotDuration/3 + margin + slot_duration + .checked_mul(2) + .and_then(|d| d.checked_div(3)) + .and_then(|d| d.checked_add(margin)) + .expect("duration calculation should not fail") + } + DutyType::SyncContribution | DutyType::InfoSync | DutyType::PrepareSyncContribution => { + // slotDuration + margin + slot_duration + .checked_add(margin) + .expect("duration calculation should not fail") + } + _ => panic!("unexpected duty type: {:?}", duty_type), + }; + + let duty = Duty::new(SlotNumber::new(current_slot), duty_type.clone()); + + // Matches Go: now := now.Add(tt.expectedDuration - time.Millisecond) + // This sets "now" to 1ms before the expected deadline + let now_before_deadline = slot_start + .checked_add_signed(to_chrono_duration(expected_duration).unwrap()) + .and_then(|t| t.checked_sub_signed(chrono::Duration::try_milliseconds(1).unwrap())) + .expect("time calculation should not fail"); + + // Call deadline function (matches Go: end, ok := deadlineFunc(tt.duty)) + let deadline_opt = deadline_func(duty.clone()).expect("should compute deadline"); + + assert!( + deadline_opt.is_some(), + "duty {:?} should have a deadline", + duty_type + ); + + let deadline = deadline_opt.unwrap(); + + // Matches Go: require.True(t, now.Before(end), "wrong duty deadline") + assert!( + now_before_deadline < deadline, + "duty {:?}: now ({}) should be before deadline ({})", + duty_type, + now_before_deadline, + deadline + ); + + // Matches Go: require.Equal(t, time.Millisecond, end.Sub(now)) + let time_until_deadline = deadline.signed_duration_since(now_before_deadline); + assert_eq!( + time_until_deadline, + chrono::Duration::try_milliseconds(1).unwrap(), + "duty {:?}: deadline should be exactly 1ms after now (actual: {}ms)", + duty_type, + time_until_deadline.num_milliseconds() + ); + } + + /// Creates a mock EthBeaconNodeApiClient for testing. + fn create_mock_client() -> MockBeaconClient { + MockBeaconClient { + genesis_time: DateTime::from_timestamp(1646092800, 0).unwrap(), /* 2022-03-01 + * 00:00:00 UTC */ + slot_duration: std::time::Duration::from_secs(12), + slots_per_epoch: 16, + } + } + + /// Mock beacon client for testing. + struct MockBeaconClient { + genesis_time: DateTime, + slot_duration: std::time::Duration, + slots_per_epoch: u64, + } + + #[async_trait] + impl BeaconClientForDeadline for MockBeaconClient { + async fn fetch_genesis_time(&self) -> Result> { + Ok(self.genesis_time) + } + + async fn fetch_slots_config(&self) -> Result<(std::time::Duration, u64)> { + Ok((self.slot_duration, self.slots_per_epoch)) + } + } +} From 368177c23fb940c9507582f7aa8d076716a5d7f0 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Wed, 11 Mar 2026 14:22:00 +0100 Subject: [PATCH 03/11] fix: remove comments --- crates/core/src/deadline.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crates/core/src/deadline.rs b/crates/core/src/deadline.rs index 567113ea..47cd7fd4 100644 --- a/crates/core/src/deadline.rs +++ b/crates/core/src/deadline.rs @@ -767,7 +767,6 @@ mod tests { #[test_case(DutyType::BuilderRegistration ; "builder_registration")] #[tokio::test] async fn test_never_expire_duties(duty_type: DutyType) { - // Create a simple mock client that returns fixed values let mock_client = create_mock_client(); let deadline_func = new_duty_deadline_func(&mock_client) @@ -829,7 +828,6 @@ mod tests { .await .expect("should create deadline func"); - // Calculate expected duration based on duty type (matches Go test cases) let expected_duration = match duty_type { DutyType::Proposer | DutyType::Randao => { // slotDuration/3 + margin @@ -864,14 +862,11 @@ mod tests { let duty = Duty::new(SlotNumber::new(current_slot), duty_type.clone()); - // Matches Go: now := now.Add(tt.expectedDuration - time.Millisecond) - // This sets "now" to 1ms before the expected deadline let now_before_deadline = slot_start .checked_add_signed(to_chrono_duration(expected_duration).unwrap()) .and_then(|t| t.checked_sub_signed(chrono::Duration::try_milliseconds(1).unwrap())) .expect("time calculation should not fail"); - // Call deadline function (matches Go: end, ok := deadlineFunc(tt.duty)) let deadline_opt = deadline_func(duty.clone()).expect("should compute deadline"); assert!( @@ -882,7 +877,6 @@ mod tests { let deadline = deadline_opt.unwrap(); - // Matches Go: require.True(t, now.Before(end), "wrong duty deadline") assert!( now_before_deadline < deadline, "duty {:?}: now ({}) should be before deadline ({})", @@ -891,7 +885,6 @@ mod tests { deadline ); - // Matches Go: require.Equal(t, time.Millisecond, end.Sub(now)) let time_until_deadline = deadline.signed_duration_since(now_before_deadline); assert_eq!( time_until_deadline, From de6dba45dfe90f2f77325eebc4b3725197d25e10 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:37:22 +0100 Subject: [PATCH 04/11] wip: add parasig db and some tests --- Cargo.lock | 4 + crates/core/Cargo.toml | 3 + crates/core/src/lib.rs | 7 + crates/core/src/parasigdb/memory.rs | 282 ++++++++++++++++++ .../src/parasigdb/memory_internal_test.rs | 282 ++++++++++++++++++ crates/core/src/parasigdb/metrics.rs | 12 + crates/core/src/parasigdb/mod.rs | 5 + crates/core/src/testutils.rs | 141 +++++++++ crates/core/src/types.rs | 105 ++++--- crates/testutil/Cargo.toml | 1 + crates/testutil/src/random.rs | 192 ++++++++++++ 11 files changed, 997 insertions(+), 37 deletions(-) create mode 100644 crates/core/src/parasigdb/memory.rs create mode 100644 crates/core/src/parasigdb/memory_internal_test.rs create mode 100644 crates/core/src/parasigdb/metrics.rs create mode 100644 crates/core/src/parasigdb/mod.rs create mode 100644 crates/core/src/testutils.rs diff --git a/Cargo.lock b/Cargo.lock index ae2b7b21..03dbdbf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5480,6 +5480,8 @@ dependencies = [ "libp2p", "pluto-build-proto", "pluto-eth2api", + "pluto-eth2util", + "pluto-testutil", "prost 0.14.3", "prost-types 0.14.3", "rand 0.8.5", @@ -5491,6 +5493,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "vise", ] [[package]] @@ -5691,6 +5694,7 @@ dependencies = [ "hex", "k256", "pluto-crypto", + "pluto-eth2api", "rand 0.8.5", "rand_core 0.6.4", "thiserror 2.0.18", diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index c1f74cbf..1566510e 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -14,6 +14,7 @@ crossbeam.workspace = true futures.workspace = true hex.workspace = true libp2p.workspace = true +vise.workspace = true pluto-eth2api.workspace = true prost.workspace = true prost-types.workspace = true @@ -33,6 +34,8 @@ prost-types.workspace = true hex.workspace = true chrono.workspace = true test-case.workspace = true +pluto-eth2util.workspace = true +pluto-testutil.workspace = true [build-dependencies] pluto-build-proto.workspace = true diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index d34e1782..ee3a0360 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -19,3 +19,10 @@ pub mod version; /// Duty deadline tracking and notification. pub mod deadline; + +/// parasigdb +pub mod parasigdb; + +/// Test utilities. +#[cfg(test)] +pub mod testutils; diff --git a/crates/core/src/parasigdb/memory.rs b/crates/core/src/parasigdb/memory.rs new file mode 100644 index 00000000..dd1e0f68 --- /dev/null +++ b/crates/core/src/parasigdb/memory.rs @@ -0,0 +1,282 @@ +#![allow(missing_docs)] +use std::{collections::HashMap, pin::Pin, sync::Arc}; +use tokio::sync::Mutex; +use tokio_util::sync::CancellationToken; +use tracing::{debug, warn}; + +use crate::{ + deadline::Deadliner, + parasigdb::metrics::PARASIG_DB_METRICS, + types::{Duty, DutyType, ParSignedData, ParSignedDataSet, PubKey}, +}; +use chrono::{DateTime, Utc}; + +/// Metadata for the memory ParSigDB. +pub struct MemDBMetadata { + /// Slot duration in seconds + pub slot_duration: u64, + /// Genesis time + pub genesis_time: DateTime, +} + +impl MemDBMetadata { + /// Creates new memory ParSigDB metadata. + pub fn new(slot_duration: u64, genesis_time: DateTime) -> Self { + Self { + slot_duration, + genesis_time, + } + } +} + +pub type InternalSub = Box< + dyn Fn(&Duty, &ParSignedDataSet) -> Pin> + Send + Sync>> + + Send + + Sync + + 'static, +>; + +pub type ThreshSub = Box< + dyn Fn( + &Duty, + &HashMap>, + ) -> Pin> + Send + Sync>> + + Send + + Sync + + 'static, +>; + +#[derive(Debug, thiserror::Error)] +pub enum MemDBError { + #[error("mismatching partial signed data: pubkey {pubkey}, share_idx {share_idx}")] + ParsigDataMismatch { pubkey: PubKey, share_idx: u64 }, +} + +type Result = std::result::Result; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Key { + pub duty: Duty, + pub pub_key: PubKey, +} + +pub struct MemDBInner { + internal_subs: Vec, + thresh_subs: Vec, + + entries: HashMap>, + keys_by_duty: HashMap>, +} + +pub struct MemDB { + ct: CancellationToken, + inner: Arc>, + deadliner: Arc, + threshold: u64, +} + +impl MemDB { + pub fn new(ct: CancellationToken, threshold: u64, deadliner: Arc) -> Self { + Self { + ct, + inner: Arc::new(Mutex::new(MemDBInner { + internal_subs: Vec::new(), + thresh_subs: Vec::new(), + entries: HashMap::new(), + keys_by_duty: HashMap::new(), + })), + deadliner, + threshold, + } + } +} + +impl MemDB { + pub async fn subscribe_internal(&self, sub: InternalSub) -> Result<()> { + let mut inner = self.inner.lock().await; + inner.internal_subs.push(sub); + Ok(()) + } + + pub async fn subscribe_threshold(&self, sub: ThreshSub) -> Result<()> { + let mut inner = self.inner.lock().await; + inner.thresh_subs.push(sub); + Ok(()) + } + + pub async fn store_internal(&self, duty: &Duty, signed_set: &ParSignedDataSet) -> Result<()> { + let _ = self.store_external(duty, signed_set).await?; + + let inner = self.inner.lock().await; + for sub in &inner.internal_subs { + sub(&duty, &signed_set).await?; + } + drop(inner); + + Ok(()) + } + + pub async fn store_external(&self, duty: &Duty, signed_data: &ParSignedDataSet) -> Result<()> { + let _ = self.deadliner.add(duty.clone()).await; + + let mut output: HashMap> = HashMap::new(); + + for (pub_key, par_signed) in signed_data.inner().iter() { + let sigs = self + .store( + Key { + duty: duty.clone(), + pub_key: pub_key.clone(), + }, + par_signed.clone(), + ) + .await?; + + let Some(sigs) = sigs else { + debug!("Ignoring duplicate partial signature"); + + continue; + }; + + let psigs = get_threshold_matching(&duty.duty_type, &sigs, self.threshold).await?; + + let Some(psigs) = psigs else { + continue; + }; + + output.insert(pub_key.clone(), psigs); + } + + if output.is_empty() { + return Ok(()); + } + + let inner = self.inner.lock().await; + for sub in inner.thresh_subs.iter() { + sub(&duty, &output).await?; + } + drop(inner); + + Ok(()) + } + + pub async fn trim(&self) { + let deadliner_rx = self.deadliner.c(); + if deadliner_rx.is_none() { + warn!("Deadliner channel is not available"); + return; + } + + let mut deadliner_rx = deadliner_rx.unwrap(); + + loop { + tokio::select! { + biased; + + _ = self.ct.cancelled() => { + return; + } + + Some(duty) = deadliner_rx.recv() => { + let mut inner = self.inner.lock().await; + + for key in inner.keys_by_duty.get(&duty).cloned().unwrap_or_default() { + inner.entries.remove(&key); + } + + inner.keys_by_duty.remove(&duty); + + drop(inner); + } + } + } + } + + async fn store(&self, k: Key, value: ParSignedData) -> Result>> { + let mut inner = self.inner.lock().await; + + // Check if we already have an entry with this ShareIdx + if let Some(existing_entries) = inner.entries.get(&k) { + for s in existing_entries { + if s.share_idx == value.share_idx { + if s == &value { + // Duplicate, return None to indicate no new data + return Ok(None); + } else { + return Err(MemDBError::ParsigDataMismatch { + pubkey: k.pub_key, + share_idx: value.share_idx, + }); + } + } + } + } + + inner + .entries + .entry(k.clone()) + .or_insert_with(Vec::new) + .push(value.clone()); + inner + .keys_by_duty + .entry(k.duty.clone()) + .or_insert_with(Vec::new) + .push(k.clone()); + + if k.duty.duty_type == DutyType::Exit { + PARASIG_DB_METRICS.exit_total[&k.pub_key.to_string()].inc(); + } + + let result = inner + .entries + .get(&k) + .map(|entries| entries.clone()) + .unwrap_or_default(); + + Ok(Some(result)) + } +} + +async fn get_threshold_matching( + typ: &DutyType, + sigs: &[ParSignedData], + threshold: u64, +) -> Result>> { + // Not enough signatures to meet threshold + if (sigs.len() as u64) < threshold { + return Ok(None); + } + + if *typ == DutyType::Signature { + // Signatures do not support message roots. + if sigs.len() as u64 == threshold { + return Ok(Some(sigs.to_vec())); + } else { + return Ok(None); + } + } + + // Group signatures by their message root + let mut sigs_by_msg_root: HashMap<[u8; 32], Vec> = HashMap::new(); + + for sig in sigs { + let root = sig.signed_data.message_root(); + sigs_by_msg_root + .entry(root) + .or_insert_with(Vec::new) + .push(sig.clone()); + } + + // Return the first set that has exactly threshold number of signatures + for set in sigs_by_msg_root.values() { + if set.len() as u64 == threshold { + return Ok(Some(set.clone())); + } + } + + Ok(None) +} + +#[cfg(test)] +#[path = "memory_internal_test.rs"] +mod memory_internal_test; diff --git a/crates/core/src/parasigdb/memory_internal_test.rs b/crates/core/src/parasigdb/memory_internal_test.rs new file mode 100644 index 00000000..4761f223 --- /dev/null +++ b/crates/core/src/parasigdb/memory_internal_test.rs @@ -0,0 +1,282 @@ +//! Internal tests for memory ParSigDB. +//! Mirrors the structure of charon/core/parsigdb/memory_internal_test.go + +use std::sync::Arc; + +use test_case::test_case; +use tokio::sync::Mutex; +use tokio_util::sync::CancellationToken; + +use super::get_threshold_matching; +use crate::{ + parasigdb::memory::{MemDB, MemDBMetadata}, testutils, types::{Duty, DutyType, ParSignedData, Signature, SignedData, SlotNumber} +}; + +/// Test wrapper for SyncCommitteeMessage (mimics altair.SyncCommitteeMessage). +/// The message root is the BeaconBlockRoot field. +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct TestSyncCommitteeMessage { + slot: SlotNumber, + beacon_block_root: [u8; 32], + validator_index: u64, + signature: Signature, +} + +impl SignedData for TestSyncCommitteeMessage { + fn signature(&self) -> Signature { + self.signature.clone() + } + + fn set_signature( + &mut self, + signature: Signature, + ) -> std::result::Result<(), Box> { + self.signature = signature; + Ok(()) + } + + fn message_root(&self) -> [u8; 32] { + // For SyncCommitteeMessage, the message root is the BeaconBlockRoot + self.beacon_block_root + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn equals(&self, other: &dyn SignedData) -> bool { + self.message_root() == other.message_root() && self.signature() == other.signature() + } +} + +/// Test wrapper for BeaconCommitteeSelection (mimics +/// eth2v1.BeaconCommitteeSelection). The message root is computed from the Slot +/// field. +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct TestBeaconCommitteeSelection { + validator_index: u64, + slot: SlotNumber, + selection_proof: Signature, +} + +impl SignedData for TestBeaconCommitteeSelection { + fn signature(&self) -> Signature { + self.selection_proof.clone() + } + + fn set_signature( + &mut self, + signature: Signature, + ) -> std::result::Result<(), Box> { + self.selection_proof = signature; + Ok(()) + } + + fn message_root(&self) -> [u8; 32] { + // For BeaconCommitteeSelection, the message root is derived from the slot. + // We'll use a simple hash: slot number in the first 8 bytes. + let mut root = [0u8; 32]; + root[0..8].copy_from_slice(&self.slot.inner().to_le_bytes()); + root + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn equals(&self, other: &dyn SignedData) -> bool { + self.message_root() == other.message_root() && self.signature() == other.signature() + } +} + +/// Helper to create random roots for testing +fn random_root(seed: u8) -> [u8; 32] { + let mut root = [0u8; 32]; + root[0] = seed; + root +} + +/// Helper to create random signature for testing +fn random_signature(seed: u8) -> Signature { + let mut sig = [0u8; 96]; + sig[0] = seed; + Signature::new(sig) +} + +/// Copying function here, not using the pluto_cluster::helpers::threshold (not +/// implemented yet) because it would be huge unnecessary dependency for core. +#[allow(clippy::arithmetic_side_effects)] +fn threshold(n: u64) -> u64 { + (2 * n + 2) / 3 +} + +// Test cases for get_threshold_matching +// Matches Go test structure from +// memory_internal_test.go:TestGetThresholdMatching +#[test_case(vec![], None ; "empty")] +#[test_case(vec![0, 0, 0], Some(vec![0, 1, 2]) ; "all identical exact threshold")] +#[test_case(vec![0, 0, 0, 0], None ; "all identical above threshold")] +#[test_case(vec![0, 0, 1, 0], Some(vec![0, 1, 3]) ; "one odd")] +#[test_case(vec![0, 0, 1, 1], None ; "two odd")] +#[tokio::test] +async fn test_get_threshold_matching(input: Vec, output: Option>) { + const N: u64 = 4; + + let slot = SlotNumber::new(123456); + let val_idx = 42u64; + + // Two different roots to vary message roots + let roots = [random_root(1), random_root(2)]; + + // Test different message types using providers (matches Go approach) + let providers: Vec<(&str, Box Box>)> = vec![ + ( + "SyncCommitteeMessage", + Box::new(|i: usize| { + Box::new(TestSyncCommitteeMessage { + slot, + beacon_block_root: roots[input[i]], // Vary root based on input + validator_index: val_idx, + signature: random_signature(i as u8), + }) + }), + ), + ( + "Selection", + Box::new(|i: usize| { + Box::new(TestBeaconCommitteeSelection { + validator_index: val_idx, + slot: SlotNumber::new(input[i] as u64), // Vary slot based on input + selection_proof: random_signature(i as u8), + }) + }), + ), + ]; + + for (_, provider) in providers { + let mut par_sigs: Vec = Vec::new(); + for i in 0..input.len() { + let signed_data = provider(i); + let par_signed = ParSignedData::new(signed_data, i as u64); + par_sigs.push(par_signed); + } + + let th = threshold(N); + + let result = get_threshold_matching(&DutyType::Attester, &par_sigs, th) + .await + .expect("get_threshold_matching should not error"); + + // Check that if we got a result, it has the correct length (matches Go's ok + // check) + if let Some(ref vec) = result { + assert_eq!( + vec.len(), + th as usize, + "result length should match threshold" + ); + } + + let out = result.unwrap_or_default(); + + let mut expect = Vec::new(); + if let Some(output) = &output { + for &idx in output { + expect.push(par_sigs[idx].clone()); + } + } + + assert_eq!(out, expect, "result should match expected"); + } +} + +use pluto_testutil::random as tu_random; + +#[tokio::test] +async fn test_mem_db_threshold() { + const THRESHOLD: u64 = 7; + const N: u64 = 10; + + let deadliner = TestDeadliner::new(); + let ct = CancellationToken::new(); + + let db = Arc::new(MemDB::new(ct.child_token(), THRESHOLD, deadliner.clone())); + + let db_clone = db.clone(); + tokio::spawn(async move { + db_clone.trim().await; + }); + + let times_called = Arc::new(Mutex::new(0)); + + db.subscribe_threshold(Box::new({ + let times_called = times_called.clone(); + move |_duty, _output| { + let times_called = times_called.clone(); + Box::pin(async move { + *times_called.lock().await += 1; + Ok(()) + }) + } + })) + .await + .unwrap(); + + let pubkey = testutils::random_core_pub_key(); + let att = tu_random::random_deneb_versioned_attestation(); + +} + +/// Test deadliner for unit tests. +pub struct TestDeadliner { + added: Arc>>, + ch_tx: tokio::sync::mpsc::Sender, + ch_rx: Arc>>>, +} + +impl TestDeadliner { + /// Creates a new test deadliner. + #[allow(dead_code)] + pub fn new() -> Arc { + const CHANNEL_BUFFER: usize = 100; + let (tx, rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER); + Arc::new(Self { + added: Arc::new(tokio::sync::Mutex::new(Vec::new())), + ch_tx: tx, + ch_rx: Arc::new(tokio::sync::Mutex::new(Some(rx))), + }) + } + + /// Expires all added duties. + #[allow(dead_code)] + pub async fn expire(&self) -> bool { + let mut added = self.added.lock().await; + for duty in added.drain(..) { + if self.ch_tx.send(duty).await.is_err() { + return false; + } + } + // Send dummy duty to ensure all piped duties above were processed + self.ch_tx + .send(Duty::new(SlotNumber::new(0), DutyType::Unknown)) + .await + .is_ok() + } +} + +impl crate::deadline::Deadliner for TestDeadliner { + fn add(&self, duty: Duty) -> futures::future::BoxFuture<'_, bool> { + Box::pin(async move { + let mut added = self.added.lock().await; + added.push(duty); + true + }) + } + + fn c(&self) -> Option> { + let mut guard = self.ch_rx.blocking_lock(); + guard.take() + } +} diff --git a/crates/core/src/parasigdb/metrics.rs b/crates/core/src/parasigdb/metrics.rs new file mode 100644 index 00000000..c828725d --- /dev/null +++ b/crates/core/src/parasigdb/metrics.rs @@ -0,0 +1,12 @@ +use vise::*; + +/// Metrics for the ParSigDB. +#[derive(Debug, Clone, Metrics)] +pub struct ParasigDBMetrics { + /// Total number of partially signed voluntary exits per public key + #[metrics(labels = ["pubkey"])] + pub exit_total: LabeledFamily, +} + +/// Global metrics for the ParSigDB. +pub static PARASIG_DB_METRICS: Global = Global::new(); diff --git a/crates/core/src/parasigdb/mod.rs b/crates/core/src/parasigdb/mod.rs new file mode 100644 index 00000000..fd01b279 --- /dev/null +++ b/crates/core/src/parasigdb/mod.rs @@ -0,0 +1,5 @@ +/// Memory implementation of the ParSigDB. +pub mod memory; + +/// Metrics for the ParSigDB. +pub mod metrics; diff --git a/crates/core/src/testutils.rs b/crates/core/src/testutils.rs new file mode 100644 index 00000000..3183b146 --- /dev/null +++ b/crates/core/src/testutils.rs @@ -0,0 +1,141 @@ +//! Test utilities for the Charon core. + +use rand::{Rng, SeedableRng}; + +use crate::types::PubKey; + +/// The size of a BLS public key in bytes. +const PK_LEN: usize = 48; + +/// Creates a new seeded random number generator. +/// +/// Returns a new random number generator seeded with a random value. +/// This matches the Go implementation: `rand.New(rand.NewSource(rand.Int63()))`. +pub fn new_seed_rand() -> impl Rng { + let seed = rand::random::(); + rand::rngs::StdRng::seed_from_u64(seed) +} + +/// Returns a random core workflow pubkey. +/// +/// This is a convenience wrapper around `random_core_pub_key_seed` that creates +/// a new random seed for each call. +pub fn random_core_pub_key() -> PubKey { + random_core_pub_key_seed(new_seed_rand()) +} + +/// Returns a random core workflow pubkey using a provided random source. +/// +/// # Arguments +/// +/// * `rng` - A random number generator to use for generating the pubkey. +/// +/// # Panics +/// +/// Panics if the generated bytes cannot be converted to a valid PubKey. +/// This should never happen in practice as we generate exactly 48 bytes. +pub fn random_core_pub_key_seed(mut rng: R) -> PubKey { + let pubkey = deterministic_pub_key_seed(&mut rng); + PubKey::try_from(&pubkey[..]).expect("valid pubkey length") +} + +/// Generates a deterministic pubkey from a seeded RNG. +/// +/// This function creates a new RNG seeded from the input RNG, then fills +/// a 48-byte array with random data. This matches the Go implementation: +/// +/// ```go +/// random := rand.New(rand.NewSource(r.Int63())) +/// var key tbls.PublicKey +/// _, err := random.Read(key[:]) +/// ``` +/// +/// # Arguments +/// +/// * `rng` - A mutable reference to a random number generator. +/// +/// # Returns +/// +/// A 48-byte array containing random data suitable for use as a public key. +fn deterministic_pub_key_seed(rng: &mut R) -> [u8; PK_LEN] { + // Create a new RNG seeded from the input RNG (matching Go's rand.New(rand.NewSource(r.Int63()))) + let seed: u64 = rng.r#gen(); + let mut seeded_rng = rand::rngs::StdRng::seed_from_u64(seed); + + let mut key = [0u8; PK_LEN]; + // Fill the key with random bytes + for byte in &mut key { + *byte = seeded_rng.r#gen(); + } + + key +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_new_seed_rand_produces_different_values() { + let mut rng1 = new_seed_rand(); + let mut rng2 = new_seed_rand(); + + let val1: u64 = rng1.r#gen(); + let val2: u64 = rng2.r#gen(); + + // These should be different with very high probability + assert_ne!(val1, val2); + } + + #[test] + fn test_random_core_pub_key_generates_valid_keys() { + let pk1 = random_core_pub_key(); + let pk2 = random_core_pub_key(); + + // Keys should be different + assert_ne!(pk1, pk2); + + // Keys should have the correct length when serialized + assert_eq!(pk1.to_string().len(), 98); // 0x + 96 hex chars + assert_eq!(pk2.to_string().len(), 98); + } + + #[test] + fn test_random_core_pub_key_seed_is_deterministic() { + let seed = 12345u64; + let mut rng1 = rand::rngs::StdRng::seed_from_u64(seed); + let mut rng2 = rand::rngs::StdRng::seed_from_u64(seed); + + let pk1 = random_core_pub_key_seed(&mut rng1); + let pk2 = random_core_pub_key_seed(&mut rng2); + + // Same seed should produce same key + assert_eq!(pk1, pk2); + } + + #[test] + fn test_deterministic_pub_key_seed() { + let seed = 42u64; + let mut rng = rand::rngs::StdRng::seed_from_u64(seed); + + let key = deterministic_pub_key_seed(&mut rng); + + // Check that we got 48 bytes + assert_eq!(key.len(), PK_LEN); + + // Check that the key is not all zeros (very unlikely with a proper RNG) + assert!(key.iter().any(|&b| b != 0)); + } + + #[test] + fn test_random_core_pub_key_seed_different_rngs() { + let mut rng1 = rand::rngs::StdRng::seed_from_u64(1); + let mut rng2 = rand::rngs::StdRng::seed_from_u64(2); + + let pk1 = random_core_pub_key_seed(&mut rng1); + let pk2 = random_core_pub_key_seed(&mut rng2); + + // Different seeds should produce different keys + assert_ne!(pk1, pk2); + } +} diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 9cc95d85..7fa29b07 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -448,19 +448,22 @@ impl AsRef<[u8; SIG_LEN]> for Signature { } /// Signed data type -pub trait SignedData: Clone + Serialize + StdDebug { - /// The error type - type Error: std::error::Error; - +pub trait SignedData: StdDebug + Send + Sync { /// signature returns the signed duty data's signature. fn signature(&self) -> Signature; /// set_signature returns a copy of signed duty data with the signature /// replaced. - fn set_signature(&mut self, signature: Signature) -> Result<(), Self::Error>; + fn set_signature(&mut self, signature: Signature) -> Result<(), Box>; /// message_root returns the message root for the unsigned data. fn message_root(&self) -> [u8; 32]; + + /// clone_box returns a boxed clone of the signed data. + fn clone_box(&self) -> Box; + + /// equals checks if two signed data are equal. + fn equals(&self, other: &dyn SignedData) -> bool; } // todo: add Eth2SignedData type @@ -468,21 +471,35 @@ pub trait SignedData: Clone + Serialize + StdDebug { /// ParSignedData is a partially signed duty data only signed by a single /// threshold BLS share. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ParSignedData { +#[derive(Debug)] +pub struct ParSignedData { /// Partially signed duty data. - pub signed_data: T, + pub signed_data: Box, /// Threshold BLS share index. pub share_idx: u64, } -impl ParSignedData -where - T: SignedData, -{ +impl Clone for ParSignedData { + fn clone(&self) -> Self { + Self { + signed_data: self.signed_data.clone_box(), + share_idx: self.share_idx, + } + } +} + +impl PartialEq for ParSignedData { + fn eq(&self, other: &Self) -> bool { + self.share_idx == other.share_idx && self.signed_data.equals(other.signed_data.as_ref()) + } +} + +impl Eq for ParSignedData {} + +impl ParSignedData { /// Create a new partially signed data. - pub fn new(partially_signed_data: T, share_idx: u64) -> Self { + pub fn new(partially_signed_data: Box, share_idx: u64) -> Self { Self { signed_data: partially_signed_data, share_idx, @@ -492,49 +509,51 @@ where /// ParSignedDataSet is a set of partially signed duty data only signed by a /// single threshold BLS share. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ParSignedDataSet(HashMap>); +#[derive(Debug, Clone)] +pub struct ParSignedDataSet(HashMap); -impl Default for ParSignedDataSet -where - T: SignedData, -{ +impl PartialEq for ParSignedDataSet { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for ParSignedDataSet {} + +impl Default for ParSignedDataSet { fn default() -> Self { Self(HashMap::default()) } } -impl ParSignedDataSet -where - T: SignedData, -{ +impl ParSignedDataSet { /// Create a new partially signed data set. pub fn new() -> Self { Self::default() } /// Get a partially signed data by public key. - pub fn get(&self, pub_key: &PubKey) -> Option<&ParSignedData> { + pub fn get(&self, pub_key: &PubKey) -> Option<&ParSignedData> { self.inner().get(pub_key) } /// Insert a partially signed data. - pub fn insert(&mut self, pub_key: PubKey, partially_signed_data: ParSignedData) { + pub fn insert(&mut self, pub_key: PubKey, partially_signed_data: ParSignedData) { self.inner_mut().insert(pub_key, partially_signed_data); } /// Remove a partially signed data by public key. - pub fn remove(&mut self, pub_key: &PubKey) -> Option> { + pub fn remove(&mut self, pub_key: &PubKey) -> Option { self.inner_mut().remove(pub_key) } /// Inner partially signed data set. - pub fn inner(&self) -> &HashMap> { + pub fn inner(&self) -> &HashMap { &self.0 } /// Inner partially signed data set. - pub fn inner_mut(&mut self) -> &mut HashMap> { + pub fn inner_mut(&mut self) -> &mut HashMap { &mut self.0 } } @@ -855,31 +874,43 @@ mod tests { struct MockSignedData; impl SignedData for MockSignedData { - type Error = std::io::Error; - fn signature(&self) -> Signature { Signature::new([42u8; SIG_LEN]) } - fn set_signature(&mut self, _signature: Signature) -> Result<(), std::io::Error> { + fn set_signature( + &mut self, + _signature: Signature, + ) -> Result<(), Box> { Ok(()) } fn message_root(&self) -> [u8; 32] { [42u8; 32] } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn equals(&self, _other: &dyn SignedData) -> bool { + // For testing purposes, we consider all MockSignedData instances equal + true + } } #[test] fn test_partially_signed_data_set() { let mut partially_signed_data_set = ParSignedDataSet::new(); - partially_signed_data_set.insert( - PubKey::new([42u8; PK_LEN]), - ParSignedData::new(MockSignedData, 0), - ); + let par_signed = ParSignedData::new(Box::new(MockSignedData), 0); + partially_signed_data_set.insert(PubKey::new([42u8; PK_LEN]), par_signed.clone()); + let retrieved = partially_signed_data_set.get(&PubKey::new([42u8; PK_LEN])); + assert!(retrieved.is_some()); + let retrieved = retrieved.unwrap(); + assert_eq!(retrieved.share_idx, 0); assert_eq!( - partially_signed_data_set.get(&PubKey::new([42u8; PK_LEN])), - Some(&ParSignedData::new(MockSignedData, 0)) + retrieved.signed_data.signature(), + Signature::new([42u8; SIG_LEN]) ); } diff --git a/crates/testutil/Cargo.toml b/crates/testutil/Cargo.toml index 0a973d1f..720bcc86 100644 --- a/crates/testutil/Cargo.toml +++ b/crates/testutil/Cargo.toml @@ -9,6 +9,7 @@ publish.workspace = true [dependencies] k256.workspace = true pluto-crypto.workspace = true +pluto-eth2api.workspace = true rand.workspace = true rand_core.workspace = true thiserror.workspace = true diff --git a/crates/testutil/src/random.rs b/crates/testutil/src/random.rs index 718f59bc..1461aeb1 100644 --- a/crates/testutil/src/random.rs +++ b/crates/testutil/src/random.rs @@ -7,6 +7,11 @@ use k256::{ elliptic_curve::rand_core::{CryptoRng, Error, RngCore}, }; use pluto_crypto::{blst_impl::BlstImpl, tbls::Tbls, types::PrivateKey}; +use pluto_eth2api::types::{ + AltairBeaconStateCurrentJustifiedCheckpoint, ConsensusVersion, Data, + GetAggregatedAttestationV2ResponseResponse, GetAggregatedAttestationV2ResponseResponseData, + GetBlockAttestationsV2ResponseResponseDataArray2, +}; use rand::{Rng, SeedableRng, rngs::StdRng}; /// A deterministic RNG that always returns the same byte value. @@ -67,6 +72,107 @@ pub fn generate_test_bls_key(seed: u64) -> PrivateKey { .expect("deterministic key generation should not fail") } +/// Generates a random BLS signature as a hex string for testing. +/// +/// Returns a 96-byte (192 hex characters) BLS signature encoded as a hex string +/// with "0x" prefix. +pub fn random_eth2_signature() -> String { + let mut bytes = [0u8; 96]; + let mut rng = rand::thread_rng(); + for byte in &mut bytes { + *byte = rng.r#gen(); + } + format!("0x{}", hex::encode(bytes)) +} + +/// Generates a random 32-byte root as a hex string for testing. +/// +/// Returns a 32-byte (64 hex characters) root encoded as a hex string with "0x" prefix. +pub fn random_root() -> String { + let mut bytes = [0u8; 32]; + let mut rng = rand::thread_rng(); + for byte in &mut bytes { + *byte = rng.r#gen(); + } + format!("0x{}", hex::encode(bytes)) +} + +/// Generates a random bitlist as a hex string for testing. +/// +/// # Arguments +/// +/// * `length` - The number of bits to set in the bitlist +/// +/// Returns a hex-encoded bitlist string with "0x" prefix. +pub fn random_bit_list(length: usize) -> String { + // Create a byte array large enough to hold the bits + // For simplicity, use 32 bytes (256 bits) + let mut bytes = [0u8; 32]; + let mut rng = rand::thread_rng(); + + // Set 'length' random bits + for _ in 0..length { + let bit_idx = rng.r#gen::() % 256; + let byte_idx = bit_idx / 8; + let bit_offset = bit_idx % 8; + bytes[byte_idx] |= 1 << bit_offset; + } + + format!("0x{}", hex::encode(bytes)) +} + +/// Generates a random checkpoint for testing. +fn random_checkpoint() -> AltairBeaconStateCurrentJustifiedCheckpoint { + let mut rng = rand::thread_rng(); + AltairBeaconStateCurrentJustifiedCheckpoint { + epoch: rng.r#gen::().to_string(), + root: random_root(), + } +} + +/// Generates random attestation data for Phase 0. +fn random_attestation_data_phase0() -> Data { + let mut rng = rand::thread_rng(); + Data { + slot: rng.r#gen::().to_string(), + index: rng.r#gen::().to_string(), + beacon_block_root: random_root(), + source: random_checkpoint(), + target: random_checkpoint(), + } +} + +/// Generates a random Phase 0 attestation. +/// +/// Returns an attestation with random aggregation bits, attestation data, and signature. +pub fn random_phase0_attestation() -> GetBlockAttestationsV2ResponseResponseDataArray2 { + GetBlockAttestationsV2ResponseResponseDataArray2 { + aggregation_bits: random_bit_list(1), + data: random_attestation_data_phase0(), + signature: random_eth2_signature(), + } +} + +/// Generates a random Deneb versioned attestation. +/// +/// Returns a versioned attestation containing a Phase 0 attestation with the Deneb version tag. +/// This matches the Go implementation: +/// +/// ```go +/// func RandomDenebVersionedAttestation() *eth2spec.VersionedAttestation { +/// return ð2spec.VersionedAttestation{ +/// Version: eth2spec.DataVersionDeneb, +/// Deneb: RandomPhase0Attestation(), +/// } +/// } +/// ``` +pub fn random_deneb_versioned_attestation() -> GetAggregatedAttestationV2ResponseResponse { + GetAggregatedAttestationV2ResponseResponse { + version: ConsensusVersion::Deneb, + data: GetAggregatedAttestationV2ResponseResponseData::Object2(random_phase0_attestation()), + } +} + #[cfg(test)] mod tests { use super::*; @@ -143,4 +249,90 @@ mod tests { "Different seeds should produce different BLS keys" ); } + + #[test] + fn test_random_eth2_signature() { + let sig1 = random_eth2_signature(); + let sig2 = random_eth2_signature(); + + // Check format + assert!(sig1.starts_with("0x")); + // 96 bytes = 192 hex chars + "0x" prefix = 194 total + assert_eq!(sig1.len(), 194); + + // Different calls should produce different signatures + assert_ne!(sig1, sig2); + } + + #[test] + fn test_random_root() { + let root1 = random_root(); + let root2 = random_root(); + + // Check format + assert!(root1.starts_with("0x")); + // 32 bytes = 64 hex chars + "0x" prefix = 66 total + assert_eq!(root1.len(), 66); + + // Different calls should produce different roots + assert_ne!(root1, root2); + } + + #[test] + fn test_random_bit_list() { + let bitlist = random_bit_list(5); + + // Check format + assert!(bitlist.starts_with("0x")); + // 32 bytes = 64 hex chars + "0x" prefix = 66 total + assert_eq!(bitlist.len(), 66); + } + + #[test] + fn test_random_phase0_attestation() { + let att = random_phase0_attestation(); + + // Check that all fields are populated + assert!(att.aggregation_bits.starts_with("0x")); + assert!(att.signature.starts_with("0x")); + assert!(att.data.beacon_block_root.starts_with("0x")); + assert!(!att.data.slot.is_empty()); + assert!(!att.data.index.is_empty()); + } + + #[test] + fn test_random_deneb_versioned_attestation() { + let versioned_att = random_deneb_versioned_attestation(); + + // Check version is Deneb + assert!(matches!(versioned_att.version, ConsensusVersion::Deneb)); + + // Check that data is populated + match versioned_att.data { + GetAggregatedAttestationV2ResponseResponseData::Object2(att) => { + assert!(att.aggregation_bits.starts_with("0x")); + assert!(att.signature.starts_with("0x")); + } + _ => panic!("Expected Object2 variant"), + } + } + + #[test] + fn test_random_deneb_versioned_attestation_different() { + let att1 = random_deneb_versioned_attestation(); + let att2 = random_deneb_versioned_attestation(); + + // Different calls should produce different attestations + // Check signatures are different + let sig1 = match &att1.data { + GetAggregatedAttestationV2ResponseResponseData::Object2(a) => &a.signature, + _ => panic!("Expected Object2"), + }; + let sig2 = match &att2.data { + GetAggregatedAttestationV2ResponseResponseData::Object2(a) => &a.signature, + _ => panic!("Expected Object2"), + }; + + assert_ne!(sig1, sig2); + } } From d71c8850970d3f44a9f7b18a9b13379bbf313ff6 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Thu, 12 Mar 2026 16:17:24 +0100 Subject: [PATCH 05/11] refactor: remove old app/deadline --- crates/app/src/deadline/mod.rs | 53 ---------------------------------- crates/app/src/lib.rs | 3 -- 2 files changed, 56 deletions(-) delete mode 100644 crates/app/src/deadline/mod.rs diff --git a/crates/app/src/deadline/mod.rs b/crates/app/src/deadline/mod.rs deleted file mode 100644 index a8c39809..00000000 --- a/crates/app/src/deadline/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -use pluto_core::types::{Duty, DutyType}; -use pluto_eth2api::{EthBeaconNodeApiClient, EthBeaconNodeApiClientError}; - -/// Defines the fraction of the slot duration to use as a margin. -/// This is to consider network delays and other factors that may affect the -/// timing. -pub const MARGIN_FACTOR: u32 = 12; - -/// A function that returns the deadline for a duty. -pub type DeadlineFunc = Box Option> + Send + Sync>; - -/// Error type for deadline-related operations. -#[derive(Debug, thiserror::Error)] -pub enum DeadlineError { - /// Beacon client API error. - #[error("Beacon client error: {0}")] - BeaconClientError(#[from] EthBeaconNodeApiClientError), -} - -type Result = std::result::Result; - -/// Create a function that provides duty deadline or [`None`] if the duty never -/// deadlines. -pub async fn new_duty_deadline_func(eth2_cl: &EthBeaconNodeApiClient) -> Result { - let genesis_time = eth2_cl.fetch_genesis_time().await?; - let (slot_duration, _) = eth2_cl.fetch_slots_config().await?; - - #[allow( - clippy::arithmetic_side_effects, - reason = "Matches original implementation" - )] - Ok(Box::new(move |duty: Duty| match duty.duty_type { - DutyType::Exit | DutyType::BuilderRegistration => None, - _ => { - #[allow( - clippy::cast_possible_truncation, - reason = "TODO: unsupported operation in u64" - )] - let start = genesis_time + (slot_duration * (u64::from(duty.slot)) as u32); - let margin = slot_duration / MARGIN_FACTOR; - - let duration = match duty.duty_type { - DutyType::Proposer | DutyType::Randao => slot_duration / 3, - DutyType::SyncMessage => 2 * slot_duration / 3, - DutyType::Attester | DutyType::Aggregator | DutyType::PrepareAggregator => { - 2 * slot_duration - } - _ => slot_duration, - }; - Some(start + duration + margin) - } - })) -} diff --git a/crates/app/src/lib.rs b/crates/app/src/lib.rs index 3252fe67..5602c21c 100644 --- a/crates/app/src/lib.rs +++ b/crates/app/src/lib.rs @@ -13,9 +13,6 @@ pub mod log; /// until the deadline has elapsed. pub mod retry; -/// Deadline -pub mod deadline; - /// Featureset defines a set of global features and their rollout status. pub mod featureset; From 13de57a98dd18d60a9edec7d10e7e9184bba4138 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Tue, 17 Mar 2026 12:50:13 +0100 Subject: [PATCH 06/11] feat: add rust docs --- crates/core/src/parasigdb/memory.rs | 164 +++++++++++++++--- .../src/parasigdb/memory_internal_test.rs | 54 +++++- 2 files changed, 189 insertions(+), 29 deletions(-) diff --git a/crates/core/src/parasigdb/memory.rs b/crates/core/src/parasigdb/memory.rs index dd1e0f68..9de14c97 100644 --- a/crates/core/src/parasigdb/memory.rs +++ b/crates/core/src/parasigdb/memory.rs @@ -1,5 +1,4 @@ -#![allow(missing_docs)] -use std::{collections::HashMap, pin::Pin, sync::Arc}; +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; use tokio::sync::Mutex; use tokio_util::sync::CancellationToken; use tracing::{debug, warn}; @@ -29,14 +28,22 @@ impl MemDBMetadata { } } -pub type InternalSub = Box< +/// Subscriber callback for internally generated partial signed data. +/// +/// Called when the node generates partial signed data that needs to be +/// exchanged with peers. +pub type InternalSub = Arc< dyn Fn(&Duty, &ParSignedDataSet) -> Pin> + Send + Sync>> + Send + Sync + 'static, >; -pub type ThreshSub = Box< +/// Subscriber callback for threshold-reached partial signed data. +/// +/// Called when enough matching partial signatures have been collected +/// to meet the threshold requirement. +pub type ThreshSub = Arc< dyn Fn( &Duty, &HashMap>, @@ -46,20 +53,95 @@ pub type ThreshSub = Box< + 'static, >; +/// Helper to create an internal subscriber from a closure. +/// +/// The closure receives owned copies of the duty and data set. Since the closure +/// is `Fn` (can be called multiple times), you need to clone any captured Arc values +/// before the `async move` block. +/// +/// # Example +/// ```ignore +/// let counter = Arc::new(Mutex::new(0)); +/// let sub = internal_subscriber({ +/// let counter = counter.clone(); +/// move |_duty, _set| { +/// let counter = counter.clone(); +/// async move { +/// *counter.lock().await += 1; +/// Ok(()) +/// } +/// } +/// }); +/// db.subscribe_internal(sub).await?; +/// ``` +pub fn internal_subscriber(f: F) -> InternalSub +where + F: Fn(Duty, ParSignedDataSet) -> Fut + Send + Sync + 'static, + Fut: Future> + Send + Sync + 'static, +{ + Arc::new(move |duty, set| { + let fut = f(duty.clone(), set.clone()); + Box::pin(fut) + }) +} + +/// Helper to create a threshold subscriber from a closure. +/// +/// The closure receives owned copies of the duty and data. Since the closure +/// is `Fn` (can be called multiple times), you need to clone any captured Arc values +/// before the `async move` block. +/// +/// # Example +/// ```ignore +/// let counter = Arc::new(Mutex::new(0)); +/// let sub = threshold_subscriber({ +/// let counter = counter.clone(); +/// move |_duty, _data| { +/// let counter = counter.clone(); +/// async move { +/// *counter.lock().await += 1; +/// Ok(()) +/// } +/// } +/// }); +/// db.subscribe_threshold(sub).await?; +/// ``` +pub fn threshold_subscriber(f: F) -> ThreshSub +where + F: Fn(Duty, HashMap>) -> Fut + Send + Sync + 'static, + Fut: Future> + Send + Sync + 'static, +{ + Arc::new(move |duty, data| { + let fut = f(duty.clone(), data.clone()); + Box::pin(fut) + }) +} + +/// Error type for the memory ParSigDB. #[derive(Debug, thiserror::Error)] pub enum MemDBError { + /// Mismatching partial signed data. #[error("mismatching partial signed data: pubkey {pubkey}, share_idx {share_idx}")] - ParsigDataMismatch { pubkey: PubKey, share_idx: u64 }, + ParsigDataMismatch { + /// Public key of the validator + pubkey: PubKey, + /// Share index of the mismatched signature + share_idx: u64, + }, } type Result = std::result::Result; +/// Key for indexing partial signed data in the database. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Key { + /// The duty this partial signature is for pub duty: Duty, + /// The public key of the validator pub pub_key: PubKey, } +/// Internal state of the in-memory partial signature database. pub struct MemDBInner { internal_subs: Vec, thresh_subs: Vec, @@ -68,6 +150,10 @@ pub struct MemDBInner { keys_by_duty: HashMap>, } +/// In-memory partial signature database. +/// +/// Stores partial signed data from cluster nodes and triggers callbacks +/// when threshold is reached or when internal data is generated. pub struct MemDB { ct: CancellationToken, inner: Arc>, @@ -76,6 +162,12 @@ pub struct MemDB { } impl MemDB { + /// Creates a new in-memory partial signature database. + /// + /// # Arguments + /// * `ct` - Cancellation token for graceful shutdown + /// * `threshold` - Number of matching partial signatures required + /// * `deadliner` - Deadliner for managing duty expiration pub fn new(ct: CancellationToken, threshold: u64, deadliner: Arc) -> Self { Self { ct, @@ -92,30 +184,53 @@ impl MemDB { } impl MemDB { + /// Registers a subscriber for internally generated partial signed data. + /// + /// The subscriber will be called when the node generates partial signed data + /// that needs to be exchanged with peers. pub async fn subscribe_internal(&self, sub: InternalSub) -> Result<()> { let mut inner = self.inner.lock().await; inner.internal_subs.push(sub); Ok(()) } + /// Registers a subscriber for threshold-reached partial signed data. + /// + /// The subscriber will be called when enough matching partial signatures + /// have been collected to meet the threshold requirement. pub async fn subscribe_threshold(&self, sub: ThreshSub) -> Result<()> { let mut inner = self.inner.lock().await; inner.thresh_subs.push(sub); Ok(()) } + /// Stores internally generated partial signed data and notifies subscribers. + /// + /// This is called when the node generates partial signed data that needs to be + /// stored and exchanged with peers. It first stores the data (via `store_external`), + /// then calls all internal subscribers to trigger peer exchange. pub async fn store_internal(&self, duty: &Duty, signed_set: &ParSignedDataSet) -> Result<()> { - let _ = self.store_external(duty, signed_set).await?; + self.store_external(duty, signed_set).await?; - let inner = self.inner.lock().await; - for sub in &inner.internal_subs { - sub(&duty, &signed_set).await?; + // Collect subscribers first, then release lock before calling them + let subs = { + let inner = self.inner.lock().await; + inner.internal_subs.clone() + }; + + // Call subscribers without holding lock + for sub in &subs { + sub(duty, signed_set).await?; } - drop(inner); Ok(()) } + /// Stores externally received partial signed data and checks for threshold. + /// + /// This is called when the node receives partial signed data from peers. It stores + /// the data, checks if enough matching signatures have been collected to meet the + /// threshold, and calls threshold subscribers when the threshold is reached. pub async fn store_external(&self, duty: &Duty, signed_data: &ParSignedDataSet) -> Result<()> { let _ = self.deadliner.add(duty.clone()).await; @@ -126,7 +241,7 @@ impl MemDB { .store( Key { duty: duty.clone(), - pub_key: pub_key.clone(), + pub_key: *pub_key, }, par_signed.clone(), ) @@ -144,30 +259,37 @@ impl MemDB { continue; }; - output.insert(pub_key.clone(), psigs); + output.insert(*pub_key, psigs); } if output.is_empty() { return Ok(()); } - let inner = self.inner.lock().await; - for sub in inner.thresh_subs.iter() { - sub(&duty, &output).await?; + // Collect subscribers first, then release lock before calling them + let subs = { + let inner = self.inner.lock().await; + inner.thresh_subs.clone() + }; + + // Call subscribers without holding lock + for sub in &subs { + sub(duty, &output).await?; } - drop(inner); Ok(()) } + /// Trims expired duties from the database. + /// + /// This method runs in a loop, listening for expired duties from the deadliner + /// and removing their associated data from the database. It should be spawned + /// as a background task and will run until the cancellation token is triggered. pub async fn trim(&self) { - let deadliner_rx = self.deadliner.c(); - if deadliner_rx.is_none() { + let Some(mut deadliner_rx) = self.deadliner.c() else { warn!("Deadliner channel is not available"); return; - } - - let mut deadliner_rx = deadliner_rx.unwrap(); + }; loop { tokio::select! { diff --git a/crates/core/src/parasigdb/memory_internal_test.rs b/crates/core/src/parasigdb/memory_internal_test.rs index 4761f223..d4727719 100644 --- a/crates/core/src/parasigdb/memory_internal_test.rs +++ b/crates/core/src/parasigdb/memory_internal_test.rs @@ -9,7 +9,9 @@ use tokio_util::sync::CancellationToken; use super::get_threshold_matching; use crate::{ - parasigdb::memory::{MemDB, MemDBMetadata}, testutils, types::{Duty, DutyType, ParSignedData, Signature, SignedData, SlotNumber} + parasigdb::memory::MemDB, + testutils, + types::{Duty, DutyType, ParSignedData, Signature, SignedData, SlotNumber}, }; /// Test wrapper for SyncCommitteeMessage (mimics altair.SyncCommitteeMessage). @@ -197,7 +199,6 @@ use pluto_testutil::random as tu_random; #[tokio::test] async fn test_mem_db_threshold() { const THRESHOLD: u64 = 7; - const N: u64 = 10; let deadliner = TestDeadliner::new(); let ct = CancellationToken::new(); @@ -211,22 +212,59 @@ async fn test_mem_db_threshold() { let times_called = Arc::new(Mutex::new(0)); - db.subscribe_threshold(Box::new({ + // Using the helper function + // Note: We need to clone inside because the outer closure is Fn (not FnOnce), + // so it can be called multiple times + db.subscribe_threshold(super::threshold_subscriber({ let times_called = times_called.clone(); - move |_duty, _output| { + move |_duty, _data| { let times_called = times_called.clone(); - Box::pin(async move { + async move { *times_called.lock().await += 1; Ok(()) - }) + } } })) .await .unwrap(); - let pubkey = testutils::random_core_pub_key(); - let att = tu_random::random_deneb_versioned_attestation(); + let _pubkey = testutils::random_core_pub_key(); + let _att = tu_random::random_deneb_versioned_attestation(); +} + +/// Test using the helper function for internal subscriber. +#[tokio::test] +async fn test_mem_db_with_internal_helper() { + const THRESHOLD: u64 = 7; + + let deadliner = TestDeadliner::new(); + let ct = CancellationToken::new(); + + let db = Arc::new(MemDB::new(ct.child_token(), THRESHOLD, deadliner.clone())); + + let db_clone = db.clone(); + tokio::spawn(async move { + db_clone.trim().await; + }); + + let counter = Arc::new(Mutex::new(0u64)); + + // Using the helper function + // Note: We need to clone inside because the outer closure is Fn (not FnOnce) + db.subscribe_internal(super::internal_subscriber({ + let counter = counter.clone(); + move |_duty, _set| { + let counter = counter.clone(); + async move { + *counter.lock().await += 1; + Ok(()) + } + } + })) + .await + .unwrap(); + assert_eq!(*counter.lock().await, 0); } /// Test deadliner for unit tests. From fc85e63637ad0de75f77eb1ce2e4e0eacd067415 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Tue, 17 Mar 2026 22:57:58 +0100 Subject: [PATCH 07/11] feat: add clone box and clone eq --- Cargo.lock | 8 ++++++++ Cargo.toml | 2 ++ crates/core/Cargo.toml | 2 ++ crates/core/src/signeddata.rs | 3 +++ crates/core/src/types.rs | 7 ++++++- 5 files changed, 21 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2275275a..7b2cda4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2375,6 +2375,12 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "dyn-eq" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2d035d21af5cde1a6f5c7b444a5bf963520a9f142e5d06931178433d7d5388" + [[package]] name = "ecdsa" version = "0.16.9" @@ -5518,6 +5524,8 @@ dependencies = [ "cancellation", "chrono", "crossbeam", + "dyn-clone", + "dyn-eq", "hex", "libp2p", "pluto-build-proto", diff --git a/Cargo.toml b/Cargo.toml index 93eddca0..c263399f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,8 @@ cancellation = "0.1.0" chrono = { version = "0.4", features = ["serde"] } clap = { version = "4.5.53", features = ["derive", "env", "cargo"] } crossbeam = "0.8.4" +dyn-clone = "1.0" +dyn-eq = "0.1.3" either = "1.13" futures = "0.3" futures-timer = "3.0" diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index ea060e56..72609a4c 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -10,6 +10,8 @@ publish.workspace = true cancellation.workspace = true chrono.workspace = true crossbeam.workspace = true +dyn-clone.workspace = true +dyn-eq.workspace = true hex.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/core/src/signeddata.rs b/crates/core/src/signeddata.rs index 09865daf..27cd676a 100644 --- a/crates/core/src/signeddata.rs +++ b/crates/core/src/signeddata.rs @@ -48,6 +48,9 @@ pub enum SignedDataError { /// Invalid attestation wrapper JSON. #[error("unmarshal attestation")] AttestationJson, + /// Custom error. + #[error("{0}")] + Custom(Box), } fn hash_root(value: &T) -> [u8; 32] { diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 2d0f3b37..5f66e7f4 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -3,6 +3,8 @@ use std::{collections::HashMap, fmt::Display, iter}; use chrono::{DateTime, Duration, Utc}; +use dyn_clone::DynClone; +use dyn_eq::DynEq; use serde::{Deserialize, Serialize}; use std::fmt::Debug as StdDebug; @@ -448,7 +450,7 @@ impl AsRef<[u8; SIG_LEN]> for Signature { } /// Signed data type -pub trait SignedData: Clone + Serialize + StdDebug { +pub trait SignedData: DynClone + DynEq + StdDebug { /// The error type type Error: std::error::Error; @@ -464,6 +466,9 @@ pub trait SignedData: Clone + Serialize + StdDebug { fn message_root(&self) -> Result<[u8; 32], Self::Error>; } +dyn_eq::eq_trait_object!(SignedData); +dyn_clone::clone_trait_object!(SignedData); + // todo: add Eth2SignedData type // https://github.com/ObolNetwork/charon/blob/b3008103c5429b031b63518195f4c49db4e9a68d/core/types.go#L396 From 288f63cabc3c36921924edbe97172a3cd6d3a15e Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:22:26 +0100 Subject: [PATCH 08/11] feat: finish tests --- .../src/parasigdb/memory_internal_test.rs | 383 +++++++----------- crates/testutil/src/lib.rs | 5 + crates/testutil/src/random.rs | 90 +++- 3 files changed, 223 insertions(+), 255 deletions(-) diff --git a/crates/core/src/parasigdb/memory_internal_test.rs b/crates/core/src/parasigdb/memory_internal_test.rs index c36c7f58..58f4c474 100644 --- a/crates/core/src/parasigdb/memory_internal_test.rs +++ b/crates/core/src/parasigdb/memory_internal_test.rs @@ -1,203 +1,122 @@ -//! Internal tests for memory ParSigDB. -//! Mirrors the structure of charon/core/parsigdb/memory_internal_test.go - -use std::sync::Arc; +use std::{ + sync::{Arc, Mutex as StdMutex}, + time::Duration, +}; +use futures::future::{BoxFuture, FutureExt}; +use pluto_eth2api::{spec::altair, v1}; +use pluto_testutil as testutil; use test_case::test_case; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, mpsc}; use tokio_util::sync::CancellationToken; -use super::get_threshold_matching; +use super::{MemDB, get_threshold_matching, threshold_subscriber}; use crate::{ - parasigdb::memory::MemDB, - signeddata, testutils, - types::{self, Duty, DutyType, ParSignedData, Signature, SignedData, SlotNumber}, + deadline::Deadliner, + signeddata::{BeaconCommitteeSelection, SignedSyncMessage, VersionedAttestation}, + testutils::random_core_pub_key, + types::{Duty, DutyType, ParSignedData, ParSignedDataSet, SlotNumber}, }; -/// Test wrapper for SyncCommitteeMessage (mimics altair.SyncCommitteeMessage). -/// The message root is the BeaconBlockRoot field. -#[derive(Debug, Clone, PartialEq, Eq)] -#[allow(dead_code)] -struct TestSyncCommitteeMessage { - slot: SlotNumber, - beacon_block_root: [u8; 32], - validator_index: u64, - signature: Signature, +fn threshold(nodes: usize) -> u64 { + (2_u64 + .checked_mul(u64::try_from(nodes).expect("nodes overflow")) + .expect("nodes overflow")) + .div_ceil(3) } -impl SignedData for TestSyncCommitteeMessage { - fn signature(&self) -> Result { - Ok(self.signature.clone()) - } - - fn set_signature(&self, signature: Signature) -> Result { - let mut out = self.clone(); - out.signature = signature; - Ok(out) - } - - fn message_root(&self) -> Result<[u8; 32], signeddata::SignedDataError> { - // For SyncCommitteeMessage, the message root is the BeaconBlockRoot - Ok(self.beacon_block_root) - } -} - -/// Test wrapper for BeaconCommitteeSelection (mimics -/// eth2v1.BeaconCommitteeSelection). The message root is computed from the Slot -/// field. -#[derive(Debug, Clone, PartialEq, Eq)] -#[allow(dead_code)] -struct TestBeaconCommitteeSelection { - validator_index: u64, - slot: SlotNumber, - selection_proof: Signature, -} - -impl SignedData for TestBeaconCommitteeSelection { - fn signature(&self) -> Result { - Ok(self.selection_proof.clone()) - } - - fn set_signature(&self, signature: Signature) -> Result { - let mut out = self.clone(); - out.selection_proof = signature; - Ok(out) - } - - fn message_root(&self) -> Result<[u8; 32], signeddata::SignedDataError> { - // For BeaconCommitteeSelection, the message root is derived from the slot. - // We'll use a simple hash: slot number in the first 8 bytes. - let mut root = [0u8; 32]; - root[0..8].copy_from_slice(&self.slot.inner().to_le_bytes()); - Ok(root) - } -} - -/// Helper to create random roots for testing -fn random_root(seed: u8) -> [u8; 32] { - let mut root = [0u8; 32]; - root[0] = seed; - root -} - -/// Helper to create random signature for testing -fn random_signature(seed: u8) -> Signature { - let mut sig = [0u8; 96]; - sig[0] = seed; - Signature::new(sig) -} - -/// Copying function here, not using the pluto_cluster::helpers::threshold (not -/// implemented yet) because it would be huge unnecessary dependency for core. -#[allow(clippy::arithmetic_side_effects, clippy::manual_div_ceil)] -fn threshold(n: u64) -> u64 { - (2 * n + 2) / 3 -} - -// Test cases for get_threshold_matching -// Matches Go test structure from -// memory_internal_test.go:TestGetThresholdMatching -#[test_case(vec![], None ; "empty")] -#[test_case(vec![0, 0, 0], Some(vec![0, 1, 2]) ; "all identical exact threshold")] -#[test_case(vec![0, 0, 0, 0], None ; "all identical above threshold")] -#[test_case(vec![0, 0, 1, 0], Some(vec![0, 1, 3]) ; "one odd")] -#[test_case(vec![0, 0, 1, 1], None ; "two odd")] +#[test_case(Vec::new(), Vec::new() ; "empty")] +#[test_case(vec![0, 0, 0], vec![0, 1, 2] ; "all identical exact threshold")] +#[test_case(vec![0, 0, 0, 0], Vec::new() ; "all identical above threshold")] +#[test_case(vec![0, 0, 1, 0], vec![0, 1, 3] ; "one odd")] +#[test_case(vec![0, 0, 1, 1], Vec::new() ; "two odd")] #[tokio::test] -async fn test_get_threshold_matching(input: Vec, output: Option>) { - const N: u64 = 4; +async fn test_get_threshold_matching(input: Vec, output: Vec) { + const N: usize = 4; - let slot = SlotNumber::new(123456); - let val_idx = 42u64; + let slot = testutil::random_slot(); + let validator_index = testutil::random_v_idx(); + let roots = [testutil::random_root_bytes(), testutil::random_root_bytes()]; + let threshold = threshold(N); - // Two different roots to vary message roots - let roots = [random_root(1), random_root(2)]; + type Providers<'a> = [(&'a str, Box ParSignedData + 'a>); 2]; - // Test different message types using providers (matches Go approach) - #[allow(clippy::type_complexity)] - let providers: Vec<(&str, Box Box>)> = vec![ + let providers: Providers<'_> = [ ( - "SyncCommitteeMessage", - Box::new(|i: usize| { - Box::new(TestSyncCommitteeMessage { + "sync_committee_message", + Box::new(|i| { + let message = altair::SyncCommitteeMessage { slot, - beacon_block_root: roots[input[i]], // Vary root based on input - validator_index: val_idx, - signature: random_signature(u8::try_from(i).unwrap()), - }) + beacon_block_root: roots[input[i]], + validator_index, + signature: testutil::random_eth2_signature_bytes(), + }; + + SignedSyncMessage::new_partial(message, u64::try_from(i.wrapping_add(1)).unwrap()) }), ), ( - "Selection", - Box::new(|i: usize| { - Box::new(TestBeaconCommitteeSelection { - validator_index: val_idx, - slot: SlotNumber::new(u64::try_from(input[i]).unwrap()), /* Vary slot based - * on input */ - selection_proof: random_signature(u8::try_from(i).unwrap()), - }) + "selection", + Box::new(|i| { + let selection = v1::BeaconCommitteeSelection { + validator_index, + slot: u64::try_from(input[i]).unwrap(), + selection_proof: testutil::random_eth2_signature_bytes(), + }; + + BeaconCommitteeSelection::new_partial( + selection, + u64::try_from(i.wrapping_add(1)).unwrap(), + ) }), ), ]; - for (_, provider) in providers { - let mut par_sigs: Vec = Vec::new(); + for (name, provider) in providers { + let mut data = Vec::new(); for i in 0..input.len() { - let signed_data = provider(i); - let par_signed = ParSignedData::new_boxed(signed_data, i as u64); - par_sigs.push(par_signed); + data.push(provider(i)); } - let th = threshold(N); - - let result = get_threshold_matching(&DutyType::Attester, &par_sigs, th) + let out = get_threshold_matching(&DutyType::SyncMessage, &data, threshold) .await - .expect("get_threshold_matching should not error"); - - // Check that if we got a result, it has the correct length (matches Go's ok - // check) - if let Some(ref vec) = result { - assert_eq!( - vec.len(), - usize::try_from(th).unwrap(), - "result length should match threshold" - ); - } - - let out = result.unwrap_or_default(); - - let mut expect = Vec::new(); - if let Some(output) = &output { - for &idx in output { - expect.push(par_sigs[idx].clone()); - } - } - - assert_eq!(out, expect, "result should match expected"); + .expect("threshold matching should succeed"); + let expect: Vec<_> = output.iter().map(|idx| data[*idx].clone()).collect(); + let expected_out = if expect.is_empty() { + None + } else { + Some(expect.clone()) + }; + + assert_eq!(expected_out, out, "{name}/output mismatch"); + assert_eq!( + out.as_ref() + .map(|matches| u64::try_from(matches.len()).unwrap() == threshold) + .unwrap_or(false), + expect.len() as u64 == threshold, + "{name}/ok mismatch" + ); } } -use pluto_testutil::random as tu_random; - #[tokio::test] -async fn test_mem_db_threshold() { +async fn test_memdb_threshold() { const THRESHOLD: u64 = 7; + const N: usize = 10; - let deadliner = TestDeadliner::new(); - let ct = CancellationToken::new(); - - let db = Arc::new(MemDB::new(ct.child_token(), THRESHOLD, deadliner.clone())); + let deadliner = Arc::new(TestDeadliner::new()); + let cancel = CancellationToken::new(); + let db = Arc::new(MemDB::new(cancel.clone(), THRESHOLD, deadliner.clone())); - let db_clone = db.clone(); - tokio::spawn(async move { - db_clone.trim().await; + let trim_handle = tokio::spawn({ + let db = db.clone(); + async move { + db.trim().await; + } }); - let times_called = Arc::new(Mutex::new(0)); - - // Using the helper function - // Note: We need to clone inside because the outer closure is Fn (not FnOnce), - // so it can be called multiple times - db.subscribe_threshold(super::threshold_subscriber({ + let times_called = Arc::new(Mutex::new(0usize)); + db.subscribe_threshold(threshold_subscriber({ let times_called = times_called.clone(); move |_duty, _data| { let times_called = times_called.clone(); @@ -208,95 +127,89 @@ async fn test_mem_db_threshold() { } })) .await - .unwrap(); - - let _pubkey = testutils::random_core_pub_key(); - let _att = tu_random::random_deneb_versioned_attestation(); -} - -/// Test using the helper function for internal subscriber. -#[tokio::test] -async fn test_mem_db_with_internal_helper() { - const THRESHOLD: u64 = 7; - - let deadliner = TestDeadliner::new(); - let ct = CancellationToken::new(); - - let db = Arc::new(MemDB::new(ct.child_token(), THRESHOLD, deadliner.clone())); + .expect("subscription should succeed"); + + let pubkey = random_core_pub_key(); + let attestation = testutil::random_deneb_versioned_attestation(); + let duty = Duty::new_attester_duty(SlotNumber::new(123)); + + let enqueue_n = || async { + for i in 0..N { + let partial = VersionedAttestation::new_partial( + attestation.clone(), + u64::try_from(i + 1).unwrap(), + ) + .expect("versioned attestation should be valid"); + + let mut set = ParSignedDataSet::new(); + set.insert(pubkey, partial); + + db.store_external(&duty, &set) + .await + .expect("store_external should succeed"); + } + }; - let db_clone = db.clone(); - tokio::spawn(async move { - db_clone.trim().await; - }); + enqueue_n().await; + assert_eq!(1, *times_called.lock().await); - let counter = Arc::new(Mutex::new(0u64)); + deadliner.expire().await; + tokio::time::sleep(Duration::from_millis(20)).await; - // Using the helper function - // Note: We need to clone inside because the outer closure is Fn (not FnOnce) - db.subscribe_internal(super::internal_subscriber({ - let counter = counter.clone(); - move |_duty, _set| { - let counter = counter.clone(); - async move { - *counter.lock().await += 1; - Ok(()) - } - } - })) - .await - .unwrap(); + enqueue_n().await; + assert_eq!(2, *times_called.lock().await); - assert_eq!(*counter.lock().await, 0); + cancel.cancel(); + trim_handle + .await + .expect("trim task should shut down cleanly"); } -/// Test deadliner for unit tests. -pub struct TestDeadliner { - added: Arc>>, - ch_tx: tokio::sync::mpsc::Sender, - ch_rx: Arc>>>, +struct TestDeadliner { + added: StdMutex>, + tx: mpsc::Sender, + rx: StdMutex>>, } impl TestDeadliner { - /// Creates a new test deadliner. - #[allow(dead_code)] - pub fn new() -> Arc { - const CHANNEL_BUFFER: usize = 100; - let (tx, rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER); - Arc::new(Self { - added: Arc::new(tokio::sync::Mutex::new(Vec::new())), - ch_tx: tx, - ch_rx: Arc::new(tokio::sync::Mutex::new(Some(rx))), - }) + fn new() -> Self { + let (tx, rx) = mpsc::channel(32); + Self { + added: StdMutex::new(Vec::new()), + tx, + rx: StdMutex::new(Some(rx)), + } } - /// Expires all added duties. - #[allow(dead_code)] - pub async fn expire(&self) -> bool { - let mut added = self.added.lock().await; - for duty in added.drain(..) { - if self.ch_tx.send(duty).await.is_err() { + async fn expire(&self) -> bool { + let duties = { + let mut added = self.added.lock().expect("test deadliner lock poisoned"); + std::mem::take(&mut *added) + }; + + for duty in duties { + if self.tx.send(duty).await.is_err() { return false; } } - // Send dummy duty to ensure all piped duties above were processed - self.ch_tx - .send(Duty::new(SlotNumber::new(0), DutyType::Unknown)) - .await - .is_ok() + + true } } -impl crate::deadline::Deadliner for TestDeadliner { - fn add(&self, duty: Duty) -> futures::future::BoxFuture<'_, bool> { - Box::pin(async move { - let mut added = self.added.lock().await; - added.push(duty); +impl Deadliner for TestDeadliner { + fn add(&self, duty: Duty) -> BoxFuture<'_, bool> { + async move { + self.added + .lock() + .expect("test deadliner lock poisoned") + .push(duty); true - }) + } + .boxed() } - fn c(&self) -> Option> { - let mut guard = self.ch_rx.blocking_lock(); - guard.take() + fn c(&self) -> Option> { + self.rx.lock().expect("test deadliner lock poisoned").take() } } diff --git a/crates/testutil/src/lib.rs b/crates/testutil/src/lib.rs index abc00e7a..686c8c7a 100644 --- a/crates/testutil/src/lib.rs +++ b/crates/testutil/src/lib.rs @@ -6,3 +6,8 @@ /// Random utilities. pub mod random; + +pub use random::{ + random_deneb_versioned_attestation, random_eth2_signature, random_eth2_signature_bytes, + random_root, random_root_bytes, random_slot, random_v_idx, +}; diff --git a/crates/testutil/src/random.rs b/crates/testutil/src/random.rs index a1cd8e44..8e4a8eeb 100644 --- a/crates/testutil/src/random.rs +++ b/crates/testutil/src/random.rs @@ -7,10 +7,13 @@ use k256::{ elliptic_curve::rand_core::{CryptoRng, Error, RngCore}, }; use pluto_crypto::{blst_impl::BlstImpl, tbls::Tbls, types::PrivateKey}; -use pluto_eth2api::types::{ - AltairBeaconStateCurrentJustifiedCheckpoint, ConsensusVersion, Data, - GetAggregatedAttestationV2ResponseResponse, GetAggregatedAttestationV2ResponseResponseData, - GetBlockAttestationsV2ResponseResponseDataArray2, +use pluto_eth2api::{ + spec::phase0, + types::{ + AltairBeaconStateCurrentJustifiedCheckpoint, Data, + GetBlockAttestationsV2ResponseResponseDataArray2, + }, + versioned::{self, AttestationPayload}, }; use rand::{Rng, SeedableRng, rngs::StdRng}; @@ -85,6 +88,13 @@ pub fn random_eth2_signature() -> String { format!("0x{}", hex::encode(bytes)) } +/// Generates a random Ethereum consensus signature for testing. +pub fn random_eth2_signature_bytes() -> phase0::BLSSignature { + let mut signature = [0u8; 96]; + rand::thread_rng().fill(&mut signature[..]); + signature +} + /// Generate random Ethereum address for testing. pub fn random_eth_address(rand: &mut impl Rng) -> [u8; 20] { let mut bytes = [0u8; 20]; @@ -105,6 +115,23 @@ pub fn random_root() -> String { format!("0x{}", hex::encode(bytes)) } +/// Generates a random Ethereum consensus root for testing. +pub fn random_root_bytes() -> phase0::Root { + let mut root = [0u8; 32]; + rand::thread_rng().fill(&mut root); + root +} + +/// Generates a random slot for testing. +pub fn random_slot() -> phase0::Slot { + rand::thread_rng().r#gen() +} + +/// Generates a random validator index for testing. +pub fn random_v_idx() -> phase0::ValidatorIndex { + rand::thread_rng().r#gen() +} + /// Generates a random bitlist as a hex string for testing. /// /// # Arguments @@ -175,10 +202,31 @@ pub fn random_phase0_attestation() -> GetBlockAttestationsV2ResponseResponseData /// } /// } /// ``` -pub fn random_deneb_versioned_attestation() -> GetAggregatedAttestationV2ResponseResponse { - GetAggregatedAttestationV2ResponseResponse { - version: ConsensusVersion::Deneb, - data: GetAggregatedAttestationV2ResponseResponseData::Object2(random_phase0_attestation()), +pub fn random_deneb_versioned_attestation() -> versioned::VersionedAttestation { + let mut rng = rand::thread_rng(); + + let attestation = phase0::Attestation { + aggregation_bits: phase0::BitList::default(), + data: phase0::AttestationData { + slot: rng.r#gen(), + index: rng.r#gen(), + beacon_block_root: random_root_bytes(), + source: phase0::Checkpoint { + epoch: rng.r#gen(), + root: random_root_bytes(), + }, + target: phase0::Checkpoint { + epoch: rng.r#gen(), + root: random_root_bytes(), + }, + }, + signature: random_eth2_signature_bytes(), + }; + + versioned::VersionedAttestation { + version: versioned::DataVersion::Deneb, + validator_index: Some(rng.r#gen()), + attestation: Some(AttestationPayload::Deneb(attestation)), } } @@ -314,15 +362,17 @@ mod tests { let versioned_att = random_deneb_versioned_attestation(); // Check version is Deneb - assert!(matches!(versioned_att.version, ConsensusVersion::Deneb)); + assert!(matches!( + versioned_att.version, + versioned::DataVersion::Deneb + )); // Check that data is populated - match versioned_att.data { - GetAggregatedAttestationV2ResponseResponseData::Object2(att) => { - assert!(att.aggregation_bits.starts_with("0x")); - assert!(att.signature.starts_with("0x")); + match versioned_att.attestation { + Some(AttestationPayload::Deneb(att)) => { + assert_eq!(att.signature.len(), 96); } - _ => panic!("Expected Object2 variant"), + _ => panic!("Expected Deneb attestation"), } } @@ -333,13 +383,13 @@ mod tests { // Different calls should produce different attestations // Check signatures are different - let sig1 = match &att1.data { - GetAggregatedAttestationV2ResponseResponseData::Object2(a) => &a.signature, - _ => panic!("Expected Object2"), + let sig1 = match &att1.attestation { + Some(AttestationPayload::Deneb(a)) => &a.signature, + _ => panic!("Expected Deneb attestation"), }; - let sig2 = match &att2.data { - GetAggregatedAttestationV2ResponseResponseData::Object2(a) => &a.signature, - _ => panic!("Expected Object2"), + let sig2 = match &att2.attestation { + Some(AttestationPayload::Deneb(a)) => &a.signature, + _ => panic!("Expected Deneb attestation"), }; assert_ne!(sig1, sig2); From 8a3780e790b88b1cd2fd2556af573a754bf5e12b Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Thu, 19 Mar 2026 16:33:36 +0100 Subject: [PATCH 09/11] feat: add parsigex [wip] --- Cargo.lock | 8 + crates/core/Cargo.toml | 8 + crates/core/examples/parasigex.rs | 448 ++++++++++++++++++++++++ crates/core/src/lib.rs | 3 + crates/core/src/parsigex/behaviour.rs | 395 +++++++++++++++++++++ crates/core/src/parsigex/handler.rs | 328 +++++++++++++++++ crates/core/src/parsigex/mod.rs | 81 +++++ crates/core/src/parsigex/protocol.rs | 83 +++++ crates/core/src/parsigex/signed_data.rs | 78 +++++ crates/core/src/types.rs | 147 +++++++- crates/p2p/src/p2p.rs | 2 +- crates/p2p/src/relay.rs | 8 +- 12 files changed, 1583 insertions(+), 6 deletions(-) create mode 100644 crates/core/examples/parasigex.rs create mode 100644 crates/core/src/parsigex/behaviour.rs create mode 100644 crates/core/src/parsigex/handler.rs create mode 100644 crates/core/src/parsigex/mod.rs create mode 100644 crates/core/src/parsigex/protocol.rs create mode 100644 crates/core/src/parsigex/signed_data.rs diff --git a/Cargo.lock b/Cargo.lock index 2a28c0b9..fa25ee6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5519,21 +5519,28 @@ name = "pluto-core" version = "1.7.1" dependencies = [ "alloy", + "anyhow", "async-trait", "base64 0.22.1", "built", "cancellation", "chrono", + "clap", "crossbeam", "dyn-clone", "dyn-eq", "futures", + "futures-timer", "hex", + "k256", "libp2p", "pluto-build-proto", + "pluto-cluster", "pluto-eth2api", "pluto-eth2util", + "pluto-p2p", "pluto-testutil", + "pluto-tracing", "prost 0.14.3", "prost-types 0.14.3", "rand 0.8.5", @@ -5547,6 +5554,7 @@ dependencies = [ "tokio-util", "tracing", "tree_hash", + "unsigned-varint 0.8.0", "vise", ] diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index ccbf2235..3559cafd 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -12,6 +12,7 @@ cancellation.workspace = true chrono.workspace = true crossbeam.workspace = true futures.workspace = true +futures-timer.workspace = true dyn-clone.workspace = true dyn-eq.workspace = true hex.workspace = true @@ -31,18 +32,25 @@ tokio-util.workspace = true tracing.workspace = true pluto-eth2util.workspace = true tree_hash.workspace = true +unsigned-varint.workspace = true [dev-dependencies] +anyhow.workspace = true alloy.workspace = true +clap.workspace = true rand.workspace = true libp2p.workspace = true +k256.workspace = true prost.workspace = true prost-types.workspace = true hex.workspace = true chrono.workspace = true test-case.workspace = true pluto-eth2util.workspace = true +pluto-cluster.workspace = true +pluto-p2p.workspace = true pluto-testutil.workspace = true +pluto-tracing.workspace = true [build-dependencies] pluto-build-proto.workspace = true diff --git a/crates/core/examples/parasigex.rs b/crates/core/examples/parasigex.rs new file mode 100644 index 00000000..5341bf4e --- /dev/null +++ b/crates/core/examples/parasigex.rs @@ -0,0 +1,448 @@ +#![allow(missing_docs)] + +use std::{collections::HashSet, path::PathBuf, time::Duration}; + +use anyhow::{Context, Result, anyhow}; +use clap::Parser; +use futures::StreamExt; +use libp2p::{ + identify, ping, + relay::{self}, + swarm::{NetworkBehaviour, SwarmEvent}, +}; +use pluto_cluster::lock::Lock; +use pluto_core::{ + parsigex::{self, DutyGater, Event, Handle, Verifier}, + signeddata::SignedRandao, + types::{Duty, DutyType, ParSignedDataSet, PubKey, SlotNumber}, +}; +use pluto_p2p::{ + behaviours::pluto::PlutoBehaviourEvent, + bootnode, + config::P2PConfig, + gater, k1, + p2p::{Node, NodeType}, + peer::peer_id_from_key, + relay::{MutableRelayReservation, RelayRouter}, +}; +use pluto_tracing::TracingConfig; +use tokio::fs; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +#[derive(NetworkBehaviour)] +#[behaviour(to_swarm = "CombinedBehaviourEvent")] +struct CombinedBehaviour { + relay: relay::client::Behaviour, + relay_reservation: MutableRelayReservation, + relay_router: RelayRouter, + parasigex: parsigex::Behaviour, +} + +#[derive(Debug)] +enum CombinedBehaviourEvent { + ParSigEx(Event), + Relay(relay::client::Event), +} + +impl From for CombinedBehaviourEvent { + fn from(event: Event) -> Self { + Self::ParSigEx(event) + } +} + +impl From for CombinedBehaviourEvent { + fn from(event: relay::client::Event) -> Self { + Self::Relay(event) + } +} + +impl From for CombinedBehaviourEvent { + fn from(value: std::convert::Infallible) -> Self { + match value {} + } +} + +#[derive(Debug, Parser)] +#[command(name = "parasigex-example")] +#[command(about = "Demonstrates partial signature exchange over the bootnode/relay P2P path")] +struct Args { + /// Relay URLs or multiaddrs. + #[arg(long, value_delimiter = ',')] + relays: Vec, + + /// Directory holding the p2p private key and cluster lock. + #[arg(long)] + data_dir: PathBuf, + + /// TCP listen addresses. + #[arg(long, value_delimiter = ',', default_value = "0.0.0.0:0")] + tcp_addrs: Vec, + + /// UDP listen addresses used for QUIC. + #[arg(long, value_delimiter = ',', default_value = "0.0.0.0:0")] + udp_addrs: Vec, + + /// Whether to filter private addresses from advertisements. + #[arg(long, default_value_t = false)] + filter_private_addrs: bool, + + /// External IP address to advertise. + #[arg(long)] + external_ip: Option, + + /// External hostname to advertise. + #[arg(long)] + external_host: Option, + + /// Whether to disable socket reuse-port. + #[arg(long, default_value_t = false)] + disable_reuse_port: bool, + + /// Emit a sample partial signature every N seconds. + #[arg(long, default_value_t = 10)] + broadcast_every: u64, + + /// Share index to use in the sample partial signature. + #[arg(long, default_value_t = 1)] + share_idx: u64, + + /// Log level. + #[arg(long, default_value = "info")] + log_level: String, +} + +fn make_sample_set(slot: u64, share_idx: u64) -> ParSignedDataSet { + let share_byte = u8::try_from(share_idx % 255).unwrap_or(1); + let pub_key = PubKey::new([share_byte; 48]); + + let mut set = ParSignedDataSet::new(); + set.insert( + pub_key, + SignedRandao::new_partial(slot / 32, [share_byte; 96], share_idx), + ); + set +} + +fn log_received(duty: &Duty, set: &ParSignedDataSet, peer: &libp2p::PeerId) { + let entries = set + .inner() + .iter() + .map(|(pub_key, data)| format!("{pub_key}:share_idx={}", data.share_idx)) + .collect::>() + .join(", "); + + info!(peer = %peer, duty = %duty, entries = %entries, "received partial signature set"); +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + pluto_tracing::init( + &TracingConfig::builder() + .with_default_console() + .override_env_filter(&args.log_level) + .build(), + )?; + + let key = k1::load_priv_key(&args.data_dir).with_context(|| { + format!( + "failed to load private key from {}", + args.data_dir.display() + ) + })?; + let local_peer_id = peer_id_from_key(key.public_key()) + .context("failed to derive local peer ID from private key")?; + + let lock_path = args.data_dir.join("cluster-lock.json"); + let lock_str = fs::read_to_string(&lock_path) + .await + .with_context(|| format!("failed to read {}", lock_path.display()))?; + let lock: Lock = serde_json::from_str(&lock_str) + .with_context(|| format!("failed to parse {}", lock_path.display()))?; + + let cancel = CancellationToken::new(); + let lock_hash_hex = hex::encode(&lock.lock_hash); + let relays = bootnode::new_relays(cancel.child_token(), &args.relays, &lock_hash_hex) + .await + .context("failed to resolve relays")?; + + let known_peers = lock + .peer_ids() + .context("failed to derive peer IDs from lock")?; + let self_index = known_peers + .iter() + .position(|peer_id| *peer_id == local_peer_id) + .ok_or_else(|| anyhow!("local peer ID {local_peer_id} not found in cluster lock"))?; + let conn_gater = gater::ConnGater::new( + gater::Config::closed() + .with_relays(relays.clone()) + .with_peer_ids(known_peers.clone()), + ); + + let verifier: Verifier = + std::sync::Arc::new(|_duty, _pubkey, _data| Box::pin(async { Ok(()) })); + let duty_gater: DutyGater = std::sync::Arc::new(|duty| duty.duty_type != DutyType::Unknown); + let handle_slot = std::sync::Arc::new(tokio::sync::Mutex::new(1_u64)); + + let p2p_config = P2PConfig { + relays: vec![], + external_ip: args.external_ip.clone(), + external_host: args.external_host.clone(), + tcp_addrs: args.tcp_addrs.clone(), + udp_addrs: args.udp_addrs.clone(), + disable_reuse_port: args.disable_reuse_port, + }; + + let relay_peer_ids: HashSet<_> = relays + .iter() + .filter_map(|relay| relay.peer().ok().flatten().map(|peer| peer.id)) + .collect(); + + let mut parasigex_handle: Option = None; + let mut node: Node = Node::new( + p2p_config, + key, + NodeType::QUIC, + args.filter_private_addrs, + known_peers.clone(), + |builder, keypair, relay_client| { + let p2p_context = builder.p2p_context(); + let broadcast_context = p2p_context.clone(); + let local_peer_id = keypair.public().to_peer_id(); + let config = parsigex::Config::new( + known_peers.clone(), + self_index, + verifier.clone(), + duty_gater.clone(), + std::sync::Arc::new(move |peer| { + !broadcast_context + .peer_store_lock() + .connections_to_peer(peer) + .is_empty() + }), + ) + .with_timeout(Duration::from_secs(10)); + let (parasigex, handle) = parsigex::Behaviour::new(config); + parasigex_handle = Some(handle); + + builder + .with_gater(conn_gater) + .with_inner(CombinedBehaviour { + parasigex, + relay: relay_client, + relay_reservation: MutableRelayReservation::new(relays.clone()), + relay_router: RelayRouter::new(relays.clone(), p2p_context, local_peer_id), + }) + }, + )?; + + let parasigex_handle = + parasigex_handle.ok_or_else(|| anyhow!("parasigex handle should be created"))?; + + info!( + peer_id = %node.local_peer_id(), + data_dir = %args.data_dir.display(), + known_peers = ?known_peers, + relays = ?args.relays, + "parasigex example started" + ); + + let mut ticker = tokio::time::interval(Duration::from_secs(args.broadcast_every)); + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("ctrl+c received, shutting down"); + break; + } + _ = ticker.tick() => { + info!("broadcasting sample partial signature set"); + let mut slot = handle_slot.lock().await; + let duty = Duty::new(SlotNumber::new(*slot), DutyType::Randao); + let data_set = make_sample_set(*slot, args.share_idx); + + match parasigex_handle.broadcast(duty.clone(), data_set.clone()).await { + Ok(()) => { + info!(duty = %duty, share_idx = args.share_idx, "broadcasted sample partial signature set"); + *slot = slot.saturating_add(1); + } + Err(error) => { + warn!(%error, "broadcast failed"); + } + } + } + event = node.select_next_some() => { + info!("received swarm event"); + let peer_type = |peer_id: &libp2p::PeerId| { + if relay_peer_ids.contains(peer_id) { + "RELAY" + } else if known_peers.contains(peer_id) { + "PEER" + } else { + "UNKNOWN" + } + }; + + match event { + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::Relay(relay::client::Event::ReservationReqAccepted { + relay_peer_id, + renewal, + limit, + }), + )) => { + info!( + relay_peer_id = %relay_peer_id, + peer_type = peer_type(&relay_peer_id), + renewal, + limit = ?limit, + "relay reservation accepted" + ); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::Relay(relay::client::Event::OutboundCircuitEstablished { + relay_peer_id, + limit, + }), + )) => { + info!( + relay_peer_id = %relay_peer_id, + peer_type = peer_type(&relay_peer_id), + limit = ?limit, + "outbound relay circuit established" + ); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::Relay(relay::client::Event::InboundCircuitEstablished { + src_peer_id, + limit, + }), + )) => { + info!( + src_peer_id = %src_peer_id, + peer_type = peer_type(&src_peer_id), + limit = ?limit, + "inbound relay circuit established" + ); + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + .. + } => { + let address = match &endpoint { + libp2p::core::ConnectedPoint::Dialer { address, .. } => address, + libp2p::core::ConnectedPoint::Listener { send_back_addr, .. } => { + send_back_addr + } + }; + info!( + peer_id = %peer_id, + peer_type = peer_type(&peer_id), + address = %address, + num_established, + "connection established" + ); + } + SwarmEvent::ConnectionClosed { + peer_id, + endpoint, + num_established, + cause, + .. + } => { + let address = match &endpoint { + libp2p::core::ConnectedPoint::Dialer { address, .. } => address, + libp2p::core::ConnectedPoint::Listener { send_back_addr, .. } => { + send_back_addr + } + }; + info!( + peer_id = %peer_id, + peer_type = peer_type(&peer_id), + address = %address, + num_established, + cause = ?cause, + "connection closed" + ); + } + SwarmEvent::OutgoingConnectionError { + peer_id, + error, + connection_id, + } => { + warn!( + peer_id = ?peer_id, + connection_id = ?connection_id, + error = %error, + "outgoing connection failed" + ); + } + SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error, + .. + } => { + warn!( + connection_id = ?connection_id, + local_addr = %local_addr, + send_back_addr = %send_back_addr, + error = %error, + "incoming connection failed" + ); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Identify( + identify::Event::Received { peer_id, info, .. }, + )) => { + info!( + peer_id = %peer_id, + peer_type = peer_type(&peer_id), + agent_version = %info.agent_version, + protocol_version = %info.protocol_version, + listen_addrs = ?info.listen_addrs, + "identify received" + ); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Ping(ping::Event { + peer, + result, + .. + })) => match result { + Ok(rtt) => { + info!(peer_id = %peer, peer_type = peer_type(&peer), rtt = ?rtt, "ping succeeded"); + } + Err(error) => { + warn!(peer_id = %peer, peer_type = peer_type(&peer), error = %error, "ping failed"); + } + }, + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::ParSigEx(Event::Received { + peer, + duty, + data_set, + .. + }), + )) => { + log_received(&duty, &data_set, &peer); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::ParSigEx(Event::Error { peer, error, .. }), + )) => { + warn!(peer = %peer, error = %error, "parasigex protocol error"); + } + SwarmEvent::NewListenAddr { address, .. } => { + info!(address = %address, "listening"); + } + _ => {} + } + } + } + } + + Ok(()) +} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 62460d0d..e83d6b74 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -26,6 +26,9 @@ pub mod deadline; /// parasigdb pub mod parasigdb; +/// Partial signature exchange. +pub mod parsigex; + /// Test utilities. #[cfg(test)] pub mod testutils; diff --git a/crates/core/src/parsigex/behaviour.rs b/crates/core/src/parsigex/behaviour.rs new file mode 100644 index 00000000..3a8f7d1d --- /dev/null +++ b/crates/core/src/parsigex/behaviour.rs @@ -0,0 +1,395 @@ +//! Network behaviour and control handle for partial signature exchange. + +use std::{ + collections::{HashMap, VecDeque}, + future::Future, + pin::Pin, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p::{ + Multiaddr, PeerId, + swarm::{ + ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, + }, +}; +use tokio::sync::{mpsc, oneshot}; + +use crate::types::{Duty, ParSignedData, ParSignedDataSet, PubKey}; + +use super::{ + Error as CodecError, Handler, encode_message, + handler::{Failure as HandlerFailure, FromHandler, ToHandler}, +}; + +/// Future returned by verifier callbacks. +pub type VerifyFuture = + Pin> + Send + 'static>>; + +/// Verifier callback type. +pub type Verifier = + Arc VerifyFuture + Send + Sync + 'static>; + +/// Duty gate callback type. +pub type DutyGater = Arc bool + Send + Sync + 'static>; + +/// Peer connection callback type. +pub type PeerConnectionChecker = Arc bool + Send + Sync + 'static>; + +/// Error type for signature verification callbacks. +#[derive(Debug, thiserror::Error)] +pub enum VerifyError { + /// Unknown validator public key. + #[error("unknown pubkey, not part of cluster lock")] + UnknownPubKey, + + /// Invalid share index for the validator. + #[error("invalid shareIdx")] + InvalidShareIndex, + + /// Invalid signed-data family for the duty. + #[error("invalid eth2 signed data")] + InvalidSignedDataFamily, + + /// Generic verification error. + #[error("{0}")] + Other(String), +} + +/// Error type for behaviour operations. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Message conversion failed. + #[error(transparent)] + Codec(#[from] CodecError), + + /// Channel closed. + #[error("parsigex handle closed")] + Closed, + + /// Broadcast failed for a peer. + #[error("broadcast to peer {peer} failed: {source}")] + BroadcastPeer { + /// Peer for which the broadcast failed. + peer: PeerId, + /// Source error. + #[source] + source: HandlerFailure, + }, + + /// Peer is not currently connected. + #[error("peer {0} is not connected")] + PeerNotConnected(PeerId), +} + +/// Result type for partial signature exchange behaviour operations. +pub type Result = std::result::Result; + +/// Event emitted by the partial signature exchange behaviour. +#[derive(Debug, Clone)] +pub enum Event { + /// A verified partial signature set was received from a peer. + Received { + /// The remote peer. + peer: PeerId, + /// Connection on which it was received. + connection: ConnectionId, + /// Duty associated with the data set. + duty: Duty, + /// Partial signature set. + data_set: ParSignedDataSet, + }, + /// A peer sent invalid data or verification failed. + Error { + /// The remote peer. + peer: PeerId, + /// Connection on which the error occurred. + connection: ConnectionId, + /// Failure reason. + error: HandlerFailure, + }, +} + +#[derive(Debug)] +struct PendingBroadcast { + remaining: usize, + responder: oneshot::Sender>, +} + +#[derive(Debug)] +enum Command { + Broadcast { + request_id: u64, + duty: Duty, + data_set: ParSignedDataSet, + responder: oneshot::Sender>, + }, +} + +/// Async handle for outbound partial signature broadcasts. +#[derive(Debug, Clone)] +pub struct Handle { + tx: mpsc::UnboundedSender, + next_request_id: Arc, +} + +impl Handle { + /// Broadcasts a partial signature set to all peers except self. + pub async fn broadcast(&self, duty: Duty, data_set: ParSignedDataSet) -> Result<()> { + let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); + let (tx, rx) = oneshot::channel(); + self.tx + .send(Command::Broadcast { + request_id, + duty, + data_set, + responder: tx, + }) + .map_err(|_| Error::Closed)?; + + Ok(()) + } +} + +/// Configuration for the partial signature exchange behaviour. +#[derive(Clone)] +pub struct Config { + peers: Vec, + self_index: usize, + verifier: Verifier, + duty_gater: DutyGater, + is_peer_connected: PeerConnectionChecker, + timeout: Duration, +} + +impl Config { + /// Creates a new configuration. + pub fn new( + peers: Vec, + self_index: usize, + verifier: Verifier, + duty_gater: DutyGater, + is_peer_connected: PeerConnectionChecker, + ) -> Self { + Self { + peers, + self_index, + verifier, + duty_gater, + is_peer_connected, + timeout: Duration::from_secs(20), + } + } + + /// Sets the send/receive timeout. + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } +} + +/// Behaviour for partial signature exchange. +pub struct Behaviour { + config: Config, + rx: mpsc::UnboundedReceiver, + pending_actions: VecDeque>>, + events: VecDeque, + pending_broadcasts: HashMap, +} + +impl Behaviour { + /// Creates a behaviour and a clonable broadcast handle. + pub fn new(config: Config) -> (Self, Handle) { + let (tx, rx) = mpsc::unbounded_channel(); + let handle = Handle { + tx, + next_request_id: Arc::new(AtomicU64::new(0)), + }; + + ( + Self { + config, + rx, + pending_actions: VecDeque::new(), + events: VecDeque::new(), + pending_broadcasts: HashMap::new(), + }, + handle, + ) + } + + fn handle_command(&mut self, command: Command) { + match command { + Command::Broadcast { + request_id, + duty, + data_set, + responder, + } => { + let message = match encode_message(&duty, &data_set) { + Ok(message) => message, + Err(err) => { + let _ = responder.send(Err(Error::from(err))); + return; + } + }; + + let mut targeted = 0usize; + for (idx, peer) in self.config.peers.iter().enumerate() { + if idx == self.config.self_index { + continue; + } + + if !(self.config.is_peer_connected)(peer) { + let _ = responder.send(Err(Error::PeerNotConnected(*peer))); + return; + } + + self.pending_actions.push_back(ToSwarm::NotifyHandler { + peer_id: *peer, + handler: NotifyHandler::Any, + event: ToHandler::Send { + request_id, + payload: message.clone(), + }, + }); + targeted = targeted.saturating_add(1); + } + + if targeted == 0 { + let _ = responder.send(Ok(())); + return; + } + + self.pending_broadcasts.insert( + request_id, + PendingBroadcast { + remaining: targeted, + responder, + }, + ); + } + } + } + + fn finish_broadcast_success(&mut self, request_id: u64) { + let Some(entry) = self.pending_broadcasts.get_mut(&request_id) else { + return; + }; + + entry.remaining = entry.remaining.saturating_sub(1); + if entry.remaining == 0 { + if let Some(entry) = self.pending_broadcasts.remove(&request_id) { + let _ = entry.responder.send(Ok(())); + } + } + } + + fn finish_broadcast_error(&mut self, request_id: u64, peer: PeerId, error: HandlerFailure) { + if let Some(entry) = self.pending_broadcasts.remove(&request_id) { + let _ = entry.responder.send(Err(Error::BroadcastPeer { + peer, + source: error, + })); + } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = Handler; + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> std::result::Result, ConnectionDenied> { + tracing::trace!("establishing inbound connection to peer: {:?}", peer); + Ok(Handler::new( + self.config.timeout, + self.config.verifier.clone(), + self.config.duty_gater.clone(), + peer, + )) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + peer: PeerId, + _addr: &Multiaddr, + _role_override: libp2p::core::Endpoint, + _port_use: libp2p::core::transport::PortUse, + ) -> std::result::Result, ConnectionDenied> { + tracing::trace!("establishing outbound connection to peer: {:?}", peer); + Ok(Handler::new( + self.config.timeout, + self.config.verifier.clone(), + self.config.duty_gater.clone(), + peer, + )) + } + + fn on_swarm_event(&mut self, _event: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + tracing::trace!("received connection handler event: {:?}", event); + match event { + FromHandler::Received { duty, data_set } => { + self.events.push_back(Event::Received { + peer: peer_id, + connection: connection_id, + duty, + data_set, + }); + } + FromHandler::InboundError(error) => { + self.events.push_back(Event::Error { + peer: peer_id, + connection: connection_id, + error, + }); + } + FromHandler::OutboundSuccess { request_id } => { + self.finish_broadcast_success(request_id); + } + FromHandler::OutboundError { request_id, error } => { + self.finish_broadcast_error(request_id, peer_id, error); + } + } + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + tracing::trace!("polling parsigex behaviour"); + + if let Some(event) = self.events.pop_front() { + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + if let Poll::Ready(Some(command)) = self.rx.poll_recv(cx) { + self.handle_command(command); + } + + if let Some(action) = self.pending_actions.pop_front() { + return Poll::Ready(action); + } + + Poll::Pending + } +} diff --git a/crates/core/src/parsigex/handler.rs b/crates/core/src/parsigex/handler.rs new file mode 100644 index 00000000..b1cf0bb7 --- /dev/null +++ b/crates/core/src/parsigex/handler.rs @@ -0,0 +1,328 @@ +//! Connection handler for the partial signature exchange protocol. + +use std::{ + collections::VecDeque, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{future::BoxFuture, prelude::*}; +use futures_timer::Delay; +use libp2p::{ + PeerId, + core::upgrade::ReadyUpgrade, + swarm::{ + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, + handler::{ + ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + }, + }, +}; + +use crate::types::{Duty, ParSignedDataSet}; + +use super::{DutyGater, PROTOCOL_NAME, Verifier, protocol}; + +/// Failure type for the partial signature exchange handler. +#[derive(Debug, Clone, thiserror::Error)] +pub enum Failure { + /// Stream negotiation timed out. + #[error("parsigex protocol negotiation timed out")] + Timeout, + /// Invalid payload. + #[error("invalid parsigex payload")] + InvalidPayload, + /// Duty not accepted by the gater. + #[error("invalid duty")] + InvalidDuty, + /// Signature verification failed. + #[error("invalid partial signature")] + InvalidPartialSignature, + /// I/O error. + #[error("{0}")] + Io(String), +} + +impl Failure { + fn io(error: impl std::fmt::Display) -> Self { + Self::Io(error.to_string()) + } +} + +/// Command sent from the behaviour to a handler. +#[derive(Debug, Clone)] +pub enum ToHandler { + /// Send the encoded payload to the remote peer. + Send { + /// Request identifier used to correlate broadcast completions. + request_id: u64, + /// Encoded protobuf payload. + payload: Vec, + }, +} + +/// Event sent from the handler back to the behaviour. +#[derive(Debug, Clone)] +pub enum FromHandler { + /// A verified message was received. + Received { + /// Duty from the message. + duty: Duty, + /// Verified partial signature set. + data_set: ParSignedDataSet, + }, + /// An inbound message failed decoding, gating, or verification. + InboundError(Failure), + /// Outbound send completed successfully. + OutboundSuccess { + /// Request identifier. + request_id: u64, + }, + /// Outbound send failed. + OutboundError { + /// Request identifier. + request_id: u64, + /// Failure reason. + error: Failure, + }, +} + +type SendFuture = BoxFuture<'static, Result<(), Failure>>; +type RecvFuture = BoxFuture<'static, Result<(Duty, ParSignedDataSet), Failure>>; + +enum OutboundState { + OpenStream { request_id: u64, payload: Vec }, + Sending { request_id: u64, future: SendFuture }, +} + +/// Connection handler for parsigex. +pub struct Handler { + timeout: Duration, + verifier: Verifier, + duty_gater: DutyGater, + peer: PeerId, + outbound_queue: VecDeque<(u64, Vec)>, + outbound: Option, + inbound: Option, + pending_events: VecDeque, +} + +impl Handler { + /// Creates a new handler for one connection. + pub fn new(timeout: Duration, verifier: Verifier, duty_gater: DutyGater, peer: PeerId) -> Self { + Self { + timeout, + verifier, + duty_gater, + peer, + outbound_queue: VecDeque::new(), + outbound: None, + inbound: None, + pending_events: VecDeque::new(), + } + } + + fn on_dial_upgrade_error( + &mut self, + error: DialUpgradeError<(), ::OutboundProtocol>, + ) { + let Some(OutboundState::OpenStream { request_id, .. }) = self.outbound.take() else { + return; + }; + + let failure = match error.error { + StreamUpgradeError::Timeout => Failure::Timeout, + StreamUpgradeError::NegotiationFailed => Failure::io("protocol negotiation failed"), + StreamUpgradeError::Apply(e) => libp2p::core::util::unreachable(e), + StreamUpgradeError::Io(e) => Failure::io(e), + }; + + self.pending_events.push_back(FromHandler::OutboundError { + request_id, + error: failure, + }); + } +} + +impl ConnectionHandler for Handler { + type FromBehaviour = ToHandler; + type InboundOpenInfo = (); + type InboundProtocol = ReadyUpgrade; + type OutboundOpenInfo = (); + type OutboundProtocol = ReadyUpgrade; + type ToBehaviour = FromHandler; + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) + } + + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + match event { + ToHandler::Send { + request_id, + payload, + } => self.outbound_queue.push_back((request_id, payload)), + } + } + + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent, + > { + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + + if let Some(fut) = self.inbound.as_mut() { + match fut.poll_unpin(cx) { + Poll::Pending => {} + Poll::Ready(Ok((duty, data_set))) => { + self.inbound = None; + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + FromHandler::Received { duty, data_set }, + )); + } + Poll::Ready(Err(error)) => { + self.inbound = None; + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + FromHandler::InboundError(error), + )); + } + } + } + + if let Some(outbound) = self.outbound.take() { + match outbound { + OutboundState::OpenStream { + request_id, + payload, + } => { + self.outbound = Some(OutboundState::OpenStream { + request_id, + payload, + }); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), + }); + } + OutboundState::Sending { + request_id, + mut future, + } => match future.poll_unpin(cx) { + Poll::Pending => { + self.outbound = Some(OutboundState::Sending { request_id, future }); + } + Poll::Ready(Ok(())) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + FromHandler::OutboundSuccess { request_id }, + )); + } + Poll::Ready(Err(error)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + FromHandler::OutboundError { request_id, error }, + )); + } + }, + } + } + + if let Some((request_id, payload)) = self.outbound_queue.pop_front() { + self.outbound = Some(OutboundState::OpenStream { + request_id, + payload, + }); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), + }); + } + + Poll::Pending + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: mut stream, + .. + }) => { + stream.ignore_for_keep_alive(); + let verifier = self.verifier.clone(); + let duty_gater = self.duty_gater.clone(); + let timeout = self.timeout; + self.inbound = Some( + async move { + let recv = async { + let bytes = protocol::recv_message(&mut stream) + .await + .map_err(Failure::io)?; + let (duty, data_set) = protocol::decode_message(&bytes) + .map_err(|_| Failure::InvalidPayload)?; + if !(duty_gater)(&duty) { + return Err(Failure::InvalidDuty); + } + + for (pub_key, par_sig) in data_set.inner() { + verifier(duty.clone(), *pub_key, par_sig.clone()) + .await + .map_err(|_| Failure::InvalidPartialSignature)?; + } + + Ok((duty, data_set)) + }; + + futures::pin_mut!(recv); + match futures::future::select(recv, Delay::new(timeout)).await { + futures::future::Either::Left((result, _)) => result, + futures::future::Either::Right(((), _)) => Err(Failure::Timeout), + } + } + .boxed(), + ); + } + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol: mut stream, + .. + }) => { + stream.ignore_for_keep_alive(); + let Some(OutboundState::OpenStream { + request_id, + payload, + }) = self.outbound.take() + else { + self.pending_events.push_back(FromHandler::OutboundError { + request_id: 0, + error: Failure::io(format!( + "unexpected outbound stream state for peer {}", + self.peer + )), + }); + return; + }; + + let timeout = self.timeout; + self.outbound = Some(OutboundState::Sending { + request_id, + future: async move { + let send = protocol::send_message(&mut stream, &payload) + .map(|result| result.map_err(Failure::io)); + futures::pin_mut!(send); + match futures::future::select(send, Delay::new(timeout)).await { + futures::future::Either::Left((result, _)) => result, + futures::future::Either::Right(((), _)) => Err(Failure::Timeout), + } + } + .boxed(), + }); + } + ConnectionEvent::DialUpgradeError(error) => self.on_dial_upgrade_error(error), + _ => {} + } + } +} diff --git a/crates/core/src/parsigex/mod.rs b/crates/core/src/parsigex/mod.rs new file mode 100644 index 00000000..312cadc1 --- /dev/null +++ b/crates/core/src/parsigex/mod.rs @@ -0,0 +1,81 @@ +//! Partial signature exchange protocol. +//! +//! In-memory exchange test helpers are intentionally not part of this module. +//! We should revisit that only when wiring higher-level integration coverage in +//! `testutil/integration`. +//! +//! The reason is dependency direction: `core` sits above `testutil` in the +//! dependency tree, so test scaffolding for integration-style exchange should +//! not live in `core`. + +pub mod behaviour; +mod handler; +mod protocol; +pub(crate) mod signed_data; + +use libp2p::PeerId; + +pub use behaviour::{ + Behaviour, Config, DutyGater, Error as BehaviourError, Event, Handle, Verifier, VerifyError, +}; +pub use handler::Handler; +pub use protocol::{decode_message, encode_message}; + +/// The protocol name for partial signature exchange (version 2.0.0). +pub const PROTOCOL_NAME: libp2p::swarm::StreamProtocol = + libp2p::swarm::StreamProtocol::new("/charon/parsigex/2.0.0"); + +/// Returns the supported protocols in precedence order. +pub fn protocols() -> Vec { + vec![PROTOCOL_NAME] +} + +/// Error type for proto and conversion operations. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Missing duty or data set fields. + #[error("invalid parsigex msg fields")] + InvalidMessageFields, + + /// Invalid partial signed data set proto. + #[error("invalid partial signed data set proto fields")] + InvalidParSignedDataSetFields, + + /// Invalid partial signed proto. + #[error("invalid partial signed proto")] + InvalidParSignedProto, + + /// Invalid duty type. + #[error("invalid duty")] + InvalidDuty, + + /// Unsupported duty type. + #[error("unsupported duty type")] + UnsupportedDutyType, + + /// Deprecated builder proposer duty. + #[error("deprecated duty builder proposer")] + DeprecatedBuilderProposer, + + /// Failed to parse a public key. + #[error("invalid public key: {0}")] + InvalidPubKey(String), + + /// Invalid share index. + #[error("invalid share index")] + InvalidShareIndex, + + /// Serialization failed. + #[error("marshal signed data: {0}")] + Serialize(#[from] serde_json::Error), + + /// Broadcast failed for a peer. + #[error("broadcast to peer {peer} failed")] + BroadcastPeer { + /// Peer for which the broadcast failed. + peer: PeerId, + }, +} + +/// Result type for partial signature exchange operations. +pub type Result = std::result::Result; diff --git a/crates/core/src/parsigex/protocol.rs b/crates/core/src/parsigex/protocol.rs new file mode 100644 index 00000000..d3bb812a --- /dev/null +++ b/crates/core/src/parsigex/protocol.rs @@ -0,0 +1,83 @@ +//! Wire protocol helpers for partial signature exchange. + +use std::io; + +use futures::prelude::*; +use libp2p::swarm::Stream; +use prost::Message; +use unsigned_varint::aio::read_usize; + +use crate::{ + corepb::v1::{core as pbcore, parsigex as pbparsigex}, + types::{Duty, ParSignedDataSet}, +}; + +use super::{Error, Result as ParasigexResult}; + +/// Maximum accepted message size. +const MAX_MESSAGE_SIZE: usize = 16 * 1024 * 1024; + +/// Encodes a protobuf message to bytes. +pub fn encode_protobuf(message: &M) -> Vec { + let mut buf = Vec::with_capacity(message.encoded_len()); + message + .encode(&mut buf) + .expect("vec-backed protobuf encoding cannot fail"); + buf +} + +/// Decodes a protobuf message from bytes. +pub fn decode_protobuf( + bytes: &[u8], +) -> std::result::Result { + M::decode(bytes) +} + +/// Encodes a partial signature exchange message. +pub fn encode_message(duty: &Duty, data_set: &ParSignedDataSet) -> ParasigexResult> { + let pb = pbparsigex::ParSigExMsg { + duty: Some(pbcore::Duty::from(duty)), + data_set: Some(pbcore::ParSignedDataSet::try_from(data_set)?), + }; + + Ok(encode_protobuf(&pb)) +} + +/// Decodes a partial signature exchange message. +pub fn decode_message(bytes: &[u8]) -> ParasigexResult<(Duty, ParSignedDataSet)> { + let pb: pbparsigex::ParSigExMsg = + decode_protobuf(bytes).map_err(|_| Error::InvalidMessageFields)?; + let duty_pb = pb.duty.ok_or(Error::InvalidMessageFields)?; + let data_set_pb = pb.data_set.ok_or(Error::InvalidMessageFields)?; + let duty = Duty::try_from(&duty_pb)?; + let data_set = ParSignedDataSet::try_from((&duty.duty_type, &data_set_pb))?; + Ok((duty, data_set)) +} + +/// Sends one protobuf message on the stream. +pub async fn send_message(stream: &mut Stream, payload: &[u8]) -> io::Result<()> { + let mut len_buf = unsigned_varint::encode::usize_buffer(); + let encoded_len = unsigned_varint::encode::usize(payload.len(), &mut len_buf); + stream.write_all(encoded_len).await?; + stream.write_all(payload).await?; + stream.flush().await +} + +/// Receives one protobuf payload from the stream. +pub async fn recv_message(stream: &mut Stream) -> io::Result> { + let length = read_usize(&mut *stream).await.map_err(|err| match err { + unsigned_varint::io::ReadError::Io(err) => err, + other => io::Error::new(io::ErrorKind::InvalidData, other), + })?; + + if length > MAX_MESSAGE_SIZE { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("message too large: {length} bytes"), + )); + } + + let mut buf = vec![0_u8; length]; + stream.read_exact(&mut buf).await?; + Ok(buf) +} diff --git a/crates/core/src/parsigex/signed_data.rs b/crates/core/src/parsigex/signed_data.rs new file mode 100644 index 00000000..b6e9bed2 --- /dev/null +++ b/crates/core/src/parsigex/signed_data.rs @@ -0,0 +1,78 @@ +//! Message and type conversion helpers for partial signature exchange. + +use std::any::Any; + +use crate::{ + signeddata::{ + Attestation, BeaconCommitteeSelection, SignedAggregateAndProof, SignedRandao, + SignedSyncContributionAndProof, SignedSyncMessage, SignedVoluntaryExit, + SyncCommitteeSelection, VersionedAttestation, VersionedSignedAggregateAndProof, + VersionedSignedProposal, VersionedSignedValidatorRegistration, + }, + types::{DutyType, Signature, SignedData}, +}; + +use super::Error; + +pub(crate) fn serialize_signed_data(data: &dyn SignedData) -> Result, Error> { + let any = data as &dyn Any; + + macro_rules! serialize_as { + ($ty:ty) => { + if let Some(value) = any.downcast_ref::<$ty>() { + return Ok(serde_json::to_vec(value)?); + } + }; + } + + serialize_as!(Attestation); + serialize_as!(VersionedAttestation); + serialize_as!(VersionedSignedProposal); + serialize_as!(VersionedSignedValidatorRegistration); + serialize_as!(SignedVoluntaryExit); + serialize_as!(SignedRandao); + serialize_as!(Signature); + serialize_as!(BeaconCommitteeSelection); + serialize_as!(SignedAggregateAndProof); + serialize_as!(VersionedSignedAggregateAndProof); + serialize_as!(SignedSyncMessage); + serialize_as!(SyncCommitteeSelection); + serialize_as!(SignedSyncContributionAndProof); + + Err(Error::UnsupportedDutyType) +} + +pub(crate) fn deserialize_signed_data( + duty_type: &DutyType, + bytes: &[u8], +) -> Result, Error> { + macro_rules! deserialize_json { + ($ty:ty) => { + serde_json::from_slice::<$ty>(bytes) + .map(|value| Box::new(value) as Box) + .map_err(Error::from) + }; + } + + match duty_type { + DutyType::Attester => deserialize_json!(VersionedAttestation) + .or_else(|_| deserialize_json!(Attestation)) + .map_err(|_| Error::UnsupportedDutyType), + DutyType::Proposer => deserialize_json!(VersionedSignedProposal), + DutyType::BuilderProposer => Err(Error::DeprecatedBuilderProposer), + DutyType::BuilderRegistration => deserialize_json!(VersionedSignedValidatorRegistration), + DutyType::Exit => deserialize_json!(SignedVoluntaryExit), + DutyType::Randao => deserialize_json!(SignedRandao), + DutyType::Signature => deserialize_json!(Signature), + DutyType::PrepareAggregator => deserialize_json!(BeaconCommitteeSelection), + DutyType::Aggregator => deserialize_json!(VersionedSignedAggregateAndProof) + .or_else(|_| deserialize_json!(SignedAggregateAndProof)) + .map_err(|_| Error::UnsupportedDutyType), + DutyType::SyncMessage => deserialize_json!(SignedSyncMessage), + DutyType::PrepareSyncContribution => deserialize_json!(SyncCommitteeSelection), + DutyType::SyncContribution => deserialize_json!(SignedSyncContributionAndProof), + DutyType::Unknown | DutyType::InfoSync | DutyType::DutySentinel(_) => { + Err(Error::UnsupportedDutyType) + } + } +} diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 78e2bc62..bae0d93c 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -1,6 +1,6 @@ //! Types for the Charon core. -use std::{collections::HashMap, fmt::Display, iter}; +use std::{any::Any, collections::HashMap, fmt::Display, iter}; use chrono::{DateTime, Duration, Utc}; use dyn_clone::DynClone; @@ -8,7 +8,14 @@ use dyn_eq::DynEq; use serde::{Deserialize, Serialize}; use std::fmt::Debug as StdDebug; -use crate::signeddata::SignedDataError; +use crate::{ + corepb::v1::core as pbcore, + parsigex::{ + Error as ParSigExCodecError, + signed_data::{deserialize_signed_data, serialize_signed_data}, + }, + signeddata::SignedDataError, +}; /// The type of duty. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] @@ -66,6 +73,52 @@ impl DutyType { } } +impl From<&DutyType> for i32 { + fn from(duty_type: &DutyType) -> Self { + match duty_type { + DutyType::Unknown => 0, + DutyType::Proposer => 1, + DutyType::Attester => 2, + DutyType::Signature => 3, + DutyType::Exit => 4, + DutyType::BuilderProposer => 5, + DutyType::BuilderRegistration => 6, + DutyType::Randao => 7, + DutyType::PrepareAggregator => 8, + DutyType::Aggregator => 9, + DutyType::SyncMessage => 10, + DutyType::PrepareSyncContribution => 11, + DutyType::SyncContribution => 12, + DutyType::InfoSync => 13, + DutyType::DutySentinel(_) => 14, + } + } +} + +impl TryFrom for DutyType { + type Error = ParSigExCodecError; + + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(DutyType::Unknown), + 1 => Ok(DutyType::Proposer), + 2 => Ok(DutyType::Attester), + 3 => Ok(DutyType::Signature), + 4 => Ok(DutyType::Exit), + 5 => Ok(DutyType::BuilderProposer), + 6 => Ok(DutyType::BuilderRegistration), + 7 => Ok(DutyType::Randao), + 8 => Ok(DutyType::PrepareAggregator), + 9 => Ok(DutyType::Aggregator), + 10 => Ok(DutyType::SyncMessage), + 11 => Ok(DutyType::PrepareSyncContribution), + 12 => Ok(DutyType::SyncContribution), + 13 => Ok(DutyType::InfoSync), + _ => Err(ParSigExCodecError::InvalidDuty), + } + } +} + /// SlotNumber struct #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct SlotNumber(u64); @@ -192,6 +245,28 @@ impl Duty { } } +impl From<&Duty> for pbcore::Duty { + fn from(duty: &Duty) -> Self { + Self { + slot: duty.slot.inner(), + r#type: i32::from(&duty.duty_type), + } + } +} + +impl TryFrom<&pbcore::Duty> for Duty { + type Error = ParSigExCodecError; + + fn try_from(duty: &pbcore::Duty) -> Result { + let duty_type = DutyType::try_from(duty.r#type)?; + if !duty_type.is_valid() { + return Err(ParSigExCodecError::InvalidDuty); + } + + Ok(Self::new(duty.slot.into(), duty_type)) + } +} + /// The type of proposal. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] @@ -452,7 +527,7 @@ impl AsRef<[u8; SIG_LEN]> for Signature { } /// Signed data type -pub trait SignedData: DynClone + DynEq + StdDebug + Send + Sync { +pub trait SignedData: Any + DynClone + DynEq + StdDebug + Send + Sync { /// signature returns the signed duty data's signature. fn signature(&self) -> Result; @@ -517,6 +592,39 @@ impl ParSignedData { } } +impl TryFrom<&ParSignedData> for pbcore::ParSignedData { + type Error = ParSigExCodecError; + + fn try_from(data: &ParSignedData) -> Result { + let encoded = serialize_signed_data(data.signed_data.as_ref())?; + let share_idx = + i32::try_from(data.share_idx).map_err(|_| ParSigExCodecError::InvalidShareIndex)?; + let signature = data.signed_data.signature().map_err(|err| { + ParSigExCodecError::Serialize(serde_json::Error::io(std::io::Error::other( + err.to_string(), + ))) + })?; + + Ok(Self { + data: encoded.into(), + signature: signature.as_ref().to_vec().into(), + share_idx, + }) + } +} + +impl TryFrom<(&DutyType, &pbcore::ParSignedData)> for ParSignedData { + type Error = ParSigExCodecError; + + fn try_from(value: (&DutyType, &pbcore::ParSignedData)) -> Result { + let (duty_type, data) = value; + let share_idx = + u64::try_from(data.share_idx).map_err(|_| ParSigExCodecError::InvalidShareIndex)?; + let signed_data = deserialize_signed_data(duty_type, &data.data)?; + Ok(Self::new_boxed(signed_data, share_idx)) + } +} + /// ParSignedDataSet is a set of partially signed duty data only signed by a /// single threshold BLS share. #[derive(Debug, Clone, PartialEq, Eq, Default)] @@ -554,6 +662,39 @@ impl ParSignedDataSet { } } +impl TryFrom<&ParSignedDataSet> for pbcore::ParSignedDataSet { + type Error = ParSigExCodecError; + + fn try_from(set: &ParSignedDataSet) -> Result { + let mut out = std::collections::BTreeMap::new(); + for (pub_key, value) in set.inner() { + out.insert(pub_key.to_string(), pbcore::ParSignedData::try_from(value)?); + } + + Ok(Self { set: out }) + } +} + +impl TryFrom<(&DutyType, &pbcore::ParSignedDataSet)> for ParSignedDataSet { + type Error = ParSigExCodecError; + + fn try_from(value: (&DutyType, &pbcore::ParSignedDataSet)) -> Result { + let (duty_type, set) = value; + if set.set.is_empty() { + return Err(ParSigExCodecError::InvalidParSignedDataSetFields); + } + + let mut out = Self::new(); + for (pub_key, value) in &set.set { + let pub_key = PubKey::try_from(pub_key.as_str()) + .map_err(|_| ParSigExCodecError::InvalidPubKey(pub_key.clone()))?; + out.insert(pub_key, ParSignedData::try_from((duty_type, value))?); + } + + Ok(out) + } +} + /// SignedDataSet is a set of signed duty data. #[derive(Debug, Clone, PartialEq, Eq)] pub struct SignedDataSet(HashMap); diff --git a/crates/p2p/src/p2p.rs b/crates/p2p/src/p2p.rs index a35a2775..7a52840b 100644 --- a/crates/p2p/src/p2p.rs +++ b/crates/p2p/src/p2p.rs @@ -336,7 +336,7 @@ impl Node { .map_err(P2PError::failed_to_build_swarm)? .with_behaviour(|key, relay_client| { let builder = - PlutoBehaviourBuilder::default().with_p2p_context(p2p_context.clone()); + PlutoBehaviourBuilder::default().with_p2p_context(p2p_context.clone()).with_quic_enabled(true); behaviour_fn(builder, key, relay_client).build(key) }) .map_err(P2PError::failed_to_build_swarm)? diff --git a/crates/p2p/src/relay.rs b/crates/p2p/src/relay.rs index c334670f..e74c536f 100644 --- a/crates/p2p/src/relay.rs +++ b/crates/p2p/src/relay.rs @@ -31,9 +31,10 @@ use libp2p::{ ToSwarm, dial_opts::DialOpts, dummy, }, }; -use tokio::time::Interval; +use tokio::time::{Instant, Interval}; const RELAY_ROUTER_INTERVAL: Duration = Duration::from_secs(60); +const RELAY_ROUTER_INITIAL_DELAY: Duration = Duration::from_secs(10); /// Mutable relay reservation behaviour. /// @@ -246,7 +247,10 @@ pub struct RelayRouter { impl RelayRouter { /// Creates a new relay router. pub fn new(relays: Vec, p2p_context: P2PContext, local_peer_id: PeerId) -> Self { - let mut interval = tokio::time::interval(RELAY_ROUTER_INTERVAL); + let mut interval = tokio::time::interval_at( + Instant::now() + RELAY_ROUTER_INITIAL_DELAY, + RELAY_ROUTER_INTERVAL, + ); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); Self { From a4c4bb21af3ba6266f907e3ee73b6f953059a969 Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Thu, 19 Mar 2026 16:37:43 +0100 Subject: [PATCH 10/11] fix: typo --- crates/core/src/lib.rs | 4 ++-- crates/core/src/{parasigdb => parsigdb}/memory.rs | 4 ++-- .../core/src/{parasigdb => parsigdb}/memory_internal_test.rs | 0 crates/core/src/{parasigdb => parsigdb}/metrics.rs | 4 ++-- crates/core/src/{parasigdb => parsigdb}/mod.rs | 0 5 files changed, 6 insertions(+), 6 deletions(-) rename crates/core/src/{parasigdb => parsigdb}/memory.rs (99%) rename crates/core/src/{parasigdb => parsigdb}/memory_internal_test.rs (100%) rename crates/core/src/{parasigdb => parsigdb}/metrics.rs (72%) rename crates/core/src/{parasigdb => parsigdb}/mod.rs (100%) diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 62460d0d..ac709968 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -23,8 +23,8 @@ pub mod version; /// Duty deadline tracking and notification. pub mod deadline; -/// parasigdb -pub mod parasigdb; +/// parsigdb +pub mod parsigdb; /// Test utilities. #[cfg(test)] diff --git a/crates/core/src/parasigdb/memory.rs b/crates/core/src/parsigdb/memory.rs similarity index 99% rename from crates/core/src/parasigdb/memory.rs rename to crates/core/src/parsigdb/memory.rs index f693a80a..e4d025e5 100644 --- a/crates/core/src/parasigdb/memory.rs +++ b/crates/core/src/parsigdb/memory.rs @@ -5,7 +5,7 @@ use tracing::{debug, warn}; use crate::{ deadline::Deadliner, - parasigdb::metrics::PARASIG_DB_METRICS, + parsigdb::metrics::PARSIG_DB_METRICS, signeddata::SignedDataError, types::{Duty, DutyType, ParSignedData, ParSignedDataSet, PubKey}, }; @@ -351,7 +351,7 @@ impl MemDB { .push(k.clone()); if k.duty.duty_type == DutyType::Exit { - PARASIG_DB_METRICS.exit_total[&k.pub_key.to_string()].inc(); + PARSIG_DB_METRICS.exit_total[&k.pub_key.to_string()].inc(); } let result = inner.entries.get(&k).cloned().unwrap_or_default(); diff --git a/crates/core/src/parasigdb/memory_internal_test.rs b/crates/core/src/parsigdb/memory_internal_test.rs similarity index 100% rename from crates/core/src/parasigdb/memory_internal_test.rs rename to crates/core/src/parsigdb/memory_internal_test.rs diff --git a/crates/core/src/parasigdb/metrics.rs b/crates/core/src/parsigdb/metrics.rs similarity index 72% rename from crates/core/src/parasigdb/metrics.rs rename to crates/core/src/parsigdb/metrics.rs index c828725d..24a05fd8 100644 --- a/crates/core/src/parasigdb/metrics.rs +++ b/crates/core/src/parsigdb/metrics.rs @@ -2,11 +2,11 @@ use vise::*; /// Metrics for the ParSigDB. #[derive(Debug, Clone, Metrics)] -pub struct ParasigDBMetrics { +pub struct ParsigDBMetrics { /// Total number of partially signed voluntary exits per public key #[metrics(labels = ["pubkey"])] pub exit_total: LabeledFamily, } /// Global metrics for the ParSigDB. -pub static PARASIG_DB_METRICS: Global = Global::new(); +pub static PARSIG_DB_METRICS: Global = Global::new(); diff --git a/crates/core/src/parasigdb/mod.rs b/crates/core/src/parsigdb/mod.rs similarity index 100% rename from crates/core/src/parasigdb/mod.rs rename to crates/core/src/parsigdb/mod.rs From 6e9d22499bb6d3457ae398aa17daea81289ca99a Mon Sep 17 00:00:00 2001 From: Bohdan Ohorodnii <35969035+varex83@users.noreply.github.com> Date: Fri, 20 Mar 2026 14:42:03 +0100 Subject: [PATCH 11/11] fix: parsigex --- Cargo.lock | 26 ++ Cargo.toml | 2 + crates/core/src/lib.rs | 5 +- crates/core/src/parsigex/mod.rs | 81 ------- .../signed_data.rs => parsigex_codec.rs} | 58 ++++- crates/core/src/types.rs | 6 +- crates/p2p/Cargo.toml | 2 + crates/p2p/src/lib.rs | 3 + crates/p2p/src/p2p.rs | 41 ++-- crates/p2p/src/proto.rs | 86 +++++++ crates/p2p/src/relay.rs | 4 +- crates/parsigex/Cargo.toml | 32 +++ .../examples/parsigex.rs} | 122 +++++++--- .../parsigex => parsigex/src}/behaviour.rs | 161 ++++++++----- .../src/parsigex => parsigex/src}/handler.rs | 227 ++++++++++++------ crates/parsigex/src/lib.rs | 41 ++++ .../src/parsigex => parsigex/src}/protocol.rs | 39 +-- crates/peerinfo/src/protocol.rs | 99 ++------ 18 files changed, 648 insertions(+), 387 deletions(-) delete mode 100644 crates/core/src/parsigex/mod.rs rename crates/core/src/{parsigex/signed_data.rs => parsigex_codec.rs} (61%) create mode 100644 crates/p2p/src/proto.rs create mode 100644 crates/parsigex/Cargo.toml rename crates/{core/examples/parasigex.rs => parsigex/examples/parsigex.rs} (77%) rename crates/{core/src/parsigex => parsigex/src}/behaviour.rs (70%) rename crates/{core/src/parsigex => parsigex/src}/handler.rs (56%) create mode 100644 crates/parsigex/src/lib.rs rename crates/{core/src/parsigex => parsigex/src}/protocol.rs (61%) diff --git a/Cargo.lock b/Cargo.lock index fa25ee6b..d752c4d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5688,6 +5688,7 @@ dependencies = [ "pluto-k1util", "pluto-testutil", "pluto-tracing", + "prost 0.14.3", "rand 0.8.5", "reqwest 0.13.2", "serde", @@ -5697,11 +5698,36 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "unsigned-varint 0.8.0", "url", "vise", "vise-exporter", ] +[[package]] +name = "pluto-parsigex" +version = "1.7.1" +dependencies = [ + "anyhow", + "clap", + "either", + "futures", + "futures-timer", + "hex", + "libp2p", + "pluto-cluster", + "pluto-core", + "pluto-p2p", + "pluto-tracing", + "prost 0.14.3", + "serde_json", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "tracing", + "unsigned-varint 0.8.0", +] + [[package]] name = "pluto-peerinfo" version = "1.7.1" diff --git a/Cargo.toml b/Cargo.toml index 3a7a2e19..c0c3f4f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "crates/app", + "crates/parsigex", "crates/build-proto", "crates/cli", "crates/cluster", @@ -98,6 +99,7 @@ wiremock = "0.6" # Crates in the workspace pluto-app = { path = "crates/app" } +pluto-parsigex = { path = "crates/parasigex" } pluto-build-proto = { path = "crates/build-proto" } pluto-cli = { path = "crates/cli" } pluto-cluster = { path = "crates/cluster" } diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 2fad3458..2e356963 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -26,8 +26,9 @@ pub mod deadline; /// parsigdb pub mod parsigdb; -/// Partial signature exchange. -pub mod parsigex; +mod parsigex_codec; + +pub use parsigex_codec::ParSigExCodecError; /// Test utilities. #[cfg(test)] diff --git a/crates/core/src/parsigex/mod.rs b/crates/core/src/parsigex/mod.rs deleted file mode 100644 index 312cadc1..00000000 --- a/crates/core/src/parsigex/mod.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! Partial signature exchange protocol. -//! -//! In-memory exchange test helpers are intentionally not part of this module. -//! We should revisit that only when wiring higher-level integration coverage in -//! `testutil/integration`. -//! -//! The reason is dependency direction: `core` sits above `testutil` in the -//! dependency tree, so test scaffolding for integration-style exchange should -//! not live in `core`. - -pub mod behaviour; -mod handler; -mod protocol; -pub(crate) mod signed_data; - -use libp2p::PeerId; - -pub use behaviour::{ - Behaviour, Config, DutyGater, Error as BehaviourError, Event, Handle, Verifier, VerifyError, -}; -pub use handler::Handler; -pub use protocol::{decode_message, encode_message}; - -/// The protocol name for partial signature exchange (version 2.0.0). -pub const PROTOCOL_NAME: libp2p::swarm::StreamProtocol = - libp2p::swarm::StreamProtocol::new("/charon/parsigex/2.0.0"); - -/// Returns the supported protocols in precedence order. -pub fn protocols() -> Vec { - vec![PROTOCOL_NAME] -} - -/// Error type for proto and conversion operations. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Missing duty or data set fields. - #[error("invalid parsigex msg fields")] - InvalidMessageFields, - - /// Invalid partial signed data set proto. - #[error("invalid partial signed data set proto fields")] - InvalidParSignedDataSetFields, - - /// Invalid partial signed proto. - #[error("invalid partial signed proto")] - InvalidParSignedProto, - - /// Invalid duty type. - #[error("invalid duty")] - InvalidDuty, - - /// Unsupported duty type. - #[error("unsupported duty type")] - UnsupportedDutyType, - - /// Deprecated builder proposer duty. - #[error("deprecated duty builder proposer")] - DeprecatedBuilderProposer, - - /// Failed to parse a public key. - #[error("invalid public key: {0}")] - InvalidPubKey(String), - - /// Invalid share index. - #[error("invalid share index")] - InvalidShareIndex, - - /// Serialization failed. - #[error("marshal signed data: {0}")] - Serialize(#[from] serde_json::Error), - - /// Broadcast failed for a peer. - #[error("broadcast to peer {peer} failed")] - BroadcastPeer { - /// Peer for which the broadcast failed. - peer: PeerId, - }, -} - -/// Result type for partial signature exchange operations. -pub type Result = std::result::Result; diff --git a/crates/core/src/parsigex/signed_data.rs b/crates/core/src/parsigex_codec.rs similarity index 61% rename from crates/core/src/parsigex/signed_data.rs rename to crates/core/src/parsigex_codec.rs index b6e9bed2..1859be72 100644 --- a/crates/core/src/parsigex/signed_data.rs +++ b/crates/core/src/parsigex_codec.rs @@ -1,4 +1,4 @@ -//! Message and type conversion helpers for partial signature exchange. +//! Partial signature exchange codec helpers used by core types. use std::any::Any; @@ -12,9 +12,47 @@ use crate::{ types::{DutyType, Signature, SignedData}, }; -use super::Error; +/// Error type for partial signature exchange codec operations. +#[derive(Debug, thiserror::Error)] +pub enum ParSigExCodecError { + /// Missing duty or data set fields. + #[error("invalid parsigex msg fields")] + InvalidMessageFields, -pub(crate) fn serialize_signed_data(data: &dyn SignedData) -> Result, Error> { + /// Invalid partial signed data set proto. + #[error("invalid partial signed data set proto fields")] + InvalidParSignedDataSetFields, + + /// Invalid partial signed proto. + #[error("invalid partial signed proto")] + InvalidParSignedProto, + + /// Invalid duty type. + #[error("invalid duty")] + InvalidDuty, + + /// Unsupported duty type. + #[error("unsupported duty type")] + UnsupportedDutyType, + + /// Deprecated builder proposer duty. + #[error("deprecated duty builder proposer")] + DeprecatedBuilderProposer, + + /// Failed to parse a public key. + #[error("invalid public key: {0}")] + InvalidPubKey(String), + + /// Invalid share index. + #[error("invalid share index")] + InvalidShareIndex, + + /// Serialization failed. + #[error("marshal signed data: {0}")] + Serialize(#[from] serde_json::Error), +} + +pub(crate) fn serialize_signed_data(data: &dyn SignedData) -> Result, ParSigExCodecError> { let any = data as &dyn Any; macro_rules! serialize_as { @@ -39,27 +77,27 @@ pub(crate) fn serialize_signed_data(data: &dyn SignedData) -> Result, Er serialize_as!(SyncCommitteeSelection); serialize_as!(SignedSyncContributionAndProof); - Err(Error::UnsupportedDutyType) + Err(ParSigExCodecError::UnsupportedDutyType) } pub(crate) fn deserialize_signed_data( duty_type: &DutyType, bytes: &[u8], -) -> Result, Error> { +) -> Result, ParSigExCodecError> { macro_rules! deserialize_json { ($ty:ty) => { serde_json::from_slice::<$ty>(bytes) .map(|value| Box::new(value) as Box) - .map_err(Error::from) + .map_err(ParSigExCodecError::from) }; } match duty_type { DutyType::Attester => deserialize_json!(VersionedAttestation) .or_else(|_| deserialize_json!(Attestation)) - .map_err(|_| Error::UnsupportedDutyType), + .map_err(|_| ParSigExCodecError::UnsupportedDutyType), DutyType::Proposer => deserialize_json!(VersionedSignedProposal), - DutyType::BuilderProposer => Err(Error::DeprecatedBuilderProposer), + DutyType::BuilderProposer => Err(ParSigExCodecError::DeprecatedBuilderProposer), DutyType::BuilderRegistration => deserialize_json!(VersionedSignedValidatorRegistration), DutyType::Exit => deserialize_json!(SignedVoluntaryExit), DutyType::Randao => deserialize_json!(SignedRandao), @@ -67,12 +105,12 @@ pub(crate) fn deserialize_signed_data( DutyType::PrepareAggregator => deserialize_json!(BeaconCommitteeSelection), DutyType::Aggregator => deserialize_json!(VersionedSignedAggregateAndProof) .or_else(|_| deserialize_json!(SignedAggregateAndProof)) - .map_err(|_| Error::UnsupportedDutyType), + .map_err(|_| ParSigExCodecError::UnsupportedDutyType), DutyType::SyncMessage => deserialize_json!(SignedSyncMessage), DutyType::PrepareSyncContribution => deserialize_json!(SyncCommitteeSelection), DutyType::SyncContribution => deserialize_json!(SignedSyncContributionAndProof), DutyType::Unknown | DutyType::InfoSync | DutyType::DutySentinel(_) => { - Err(Error::UnsupportedDutyType) + Err(ParSigExCodecError::UnsupportedDutyType) } } } diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index bae0d93c..05a73f03 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -9,11 +9,9 @@ use serde::{Deserialize, Serialize}; use std::fmt::Debug as StdDebug; use crate::{ + ParSigExCodecError, corepb::v1::core as pbcore, - parsigex::{ - Error as ParSigExCodecError, - signed_data::{deserialize_signed_data, serialize_signed_data}, - }, + parsigex_codec::{deserialize_signed_data, serialize_signed_data}, signeddata::SignedDataError, }; diff --git a/crates/p2p/Cargo.toml b/crates/p2p/Cargo.toml index 750609b1..935dbaf7 100644 --- a/crates/p2p/Cargo.toml +++ b/crates/p2p/Cargo.toml @@ -16,6 +16,7 @@ thiserror.workspace = true k256.workspace = true pluto-eth2util.workspace = true pluto-k1util.workspace = true +prost.workspace = true vise.workspace = true tokio.workspace = true tokio-util.workspace = true @@ -29,6 +30,7 @@ pluto-core.workspace = true backon.workspace = true reqwest.workspace = true url.workspace = true +unsigned-varint.workspace = true [dev-dependencies] pluto-testutil.workspace = true diff --git a/crates/p2p/src/lib.rs b/crates/p2p/src/lib.rs index 0c06608b..5e85afc4 100644 --- a/crates/p2p/src/lib.rs +++ b/crates/p2p/src/lib.rs @@ -51,3 +51,6 @@ pub mod relay; /// Force direct connection behaviour. pub mod force_direct; + +/// Protobuf utilities. +pub mod proto; diff --git a/crates/p2p/src/p2p.rs b/crates/p2p/src/p2p.rs index 7a52840b..33ba5ab1 100644 --- a/crates/p2p/src/p2p.rs +++ b/crates/p2p/src/p2p.rs @@ -110,6 +110,14 @@ use crate::{ utils, }; +const YAMUX_MAX_NUM_STREAMS: usize = 2_048; + +fn yamux_config() -> yamux::Config { + let mut config = yamux::Config::default(); + config.set_max_num_streams(YAMUX_MAX_NUM_STREAMS); + config +} + /// P2P error. #[derive(Debug, thiserror::Error)] pub enum P2PError { @@ -323,20 +331,17 @@ impl Node { { let swarm = SwarmBuilder::with_existing_identity(keypair) .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ) + .with_tcp(tcp::Config::default(), noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_quic() .with_dns() .map_err(P2PError::failed_to_build_swarm)? - .with_relay_client(noise::Config::new, yamux::Config::default) + .with_relay_client(noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_behaviour(|key, relay_client| { - let builder = - PlutoBehaviourBuilder::default().with_p2p_context(p2p_context.clone()).with_quic_enabled(true); + let builder = PlutoBehaviourBuilder::default() + .with_p2p_context(p2p_context.clone()) + .with_quic_enabled(true); behaviour_fn(builder, key, relay_client).build(key) }) .map_err(P2PError::failed_to_build_swarm)? @@ -364,15 +369,11 @@ impl Node { { let swarm = SwarmBuilder::with_existing_identity(keypair) .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ) + .with_tcp(tcp::Config::default(), noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_dns() .map_err(P2PError::failed_to_build_swarm)? - .with_relay_client(noise::Config::new, yamux::Config::default) + .with_relay_client(noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_behaviour(|key, relay_client| { let builder = @@ -400,11 +401,7 @@ impl Node { { let swarm = SwarmBuilder::with_existing_identity(keypair) .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ) + .with_tcp(tcp::Config::default(), noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_quic() .with_dns() @@ -435,11 +432,7 @@ impl Node { { let swarm = SwarmBuilder::with_existing_identity(keypair) .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ) + .with_tcp(tcp::Config::default(), noise::Config::new, yamux_config) .map_err(P2PError::failed_to_build_swarm)? .with_quic() .with_dns() diff --git a/crates/p2p/src/proto.rs b/crates/p2p/src/proto.rs new file mode 100644 index 00000000..57825b4a --- /dev/null +++ b/crates/p2p/src/proto.rs @@ -0,0 +1,86 @@ +use std::io; + +use futures::prelude::*; +use prost::Message; +use unsigned_varint::aio::read_usize; + +/// Default maximum message size (64KB should be plenty for peer info). +pub const MAX_MESSAGE_SIZE: usize = 64 * 1024; + +/// Writes a length-delimited payload with an unsigned varint length prefix. +/// +/// Wire format: `[uvarint length][payload bytes]` +pub async fn write_length_delimited( + stream: &mut S, + payload: &[u8], +) -> io::Result<()> { + let mut len_buf = unsigned_varint::encode::usize_buffer(); + let encoded_len = unsigned_varint::encode::usize(payload.len(), &mut len_buf); + stream.write_all(encoded_len).await?; + stream.write_all(payload).await?; + stream.flush().await +} + +/// Reads a length-delimited payload with an unsigned varint length prefix. +/// +/// Wire format: `[uvarint length][payload bytes]` +/// +/// Returns an error if the payload exceeds `max_message_size`. +pub async fn read_length_delimited( + stream: &mut S, + max_message_size: usize, +) -> io::Result> { + let msg_len = read_usize(&mut *stream).await.map_err(|e| match e { + unsigned_varint::io::ReadError::Io(io_err) => io_err, + other => io::Error::new(io::ErrorKind::InvalidData, other), + })?; + + if msg_len > max_message_size { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("message too large: {msg_len} bytes (max: {max_message_size})"), + )); + } + + let mut buf = vec![0u8; msg_len]; + stream.read_exact(&mut buf).await?; + Ok(buf) +} + +/// Writes a protobuf message with unsigned varint length prefix to the stream. +/// +/// Wire format: `[uvarint length][protobuf bytes]` +pub async fn write_protobuf( + stream: &mut S, + msg: &M, +) -> io::Result<()> { + let mut buf = Vec::with_capacity(msg.encoded_len()); + msg.encode(&mut buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + write_length_delimited(stream, &buf).await +} + +/// Reads a protobuf message with unsigned varint length prefix from the stream. +/// +/// Wire format: `[uvarint length][protobuf bytes]` +/// +/// Returns an error if the message exceeds `MAX_MESSAGE_SIZE`. +pub async fn read_protobuf( + stream: &mut S, +) -> io::Result { + read_protobuf_with_max_size(stream, MAX_MESSAGE_SIZE).await +} + +/// Reads a protobuf message with unsigned varint length prefix from the stream. +/// +/// Wire format: `[uvarint length][protobuf bytes]` +/// +/// Returns an error if the message exceeds `max_message_size`. +pub async fn read_protobuf_with_max_size( + stream: &mut S, + max_message_size: usize, +) -> io::Result { + let buf = read_length_delimited(stream, max_message_size).await?; + + M::decode(&buf[..]).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) +} diff --git a/crates/p2p/src/relay.rs b/crates/p2p/src/relay.rs index e74c536f..946785e1 100644 --- a/crates/p2p/src/relay.rs +++ b/crates/p2p/src/relay.rs @@ -248,7 +248,9 @@ impl RelayRouter { /// Creates a new relay router. pub fn new(relays: Vec, p2p_context: P2PContext, local_peer_id: PeerId) -> Self { let mut interval = tokio::time::interval_at( - Instant::now() + RELAY_ROUTER_INITIAL_DELAY, + Instant::now() + .checked_add(RELAY_ROUTER_INITIAL_DELAY) + .expect("should not overflow"), RELAY_ROUTER_INTERVAL, ); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); diff --git a/crates/parsigex/Cargo.toml b/crates/parsigex/Cargo.toml new file mode 100644 index 00000000..c89d7fbe --- /dev/null +++ b/crates/parsigex/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "pluto-parsigex" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true +publish.workspace = true + +[dependencies] +either.workspace = true +futures.workspace = true +futures-timer.workspace = true +libp2p.workspace = true +prost.workspace = true +thiserror.workspace = true +tokio.workspace = true +tracing.workspace = true +unsigned-varint.workspace = true +pluto-core.workspace = true +pluto-p2p.workspace = true + +[dev-dependencies] +anyhow.workspace = true +clap.workspace = true +hex.workspace = true +pluto-cluster.workspace = true +pluto-tracing.workspace = true +tokio-util.workspace = true +serde_json.workspace = true + +[lints] +workspace = true diff --git a/crates/core/examples/parasigex.rs b/crates/parsigex/examples/parsigex.rs similarity index 77% rename from crates/core/examples/parasigex.rs rename to crates/parsigex/examples/parsigex.rs index 5341bf4e..af7d4287 100644 --- a/crates/core/examples/parasigex.rs +++ b/crates/parsigex/examples/parsigex.rs @@ -1,6 +1,10 @@ #![allow(missing_docs)] -use std::{collections::HashSet, path::PathBuf, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + time::Duration, +}; use anyhow::{Context, Result, anyhow}; use clap::Parser; @@ -12,7 +16,6 @@ use libp2p::{ }; use pluto_cluster::lock::Lock; use pluto_core::{ - parsigex::{self, DutyGater, Event, Handle, Verifier}, signeddata::SignedRandao, types::{Duty, DutyType, ParSignedDataSet, PubKey, SlotNumber}, }; @@ -25,6 +28,7 @@ use pluto_p2p::{ peer::peer_id_from_key, relay::{MutableRelayReservation, RelayRouter}, }; +use pluto_parsigex::{self as parsigex, DutyGater, Event, Handle, Verifier}; use pluto_tracing::TracingConfig; use tokio::fs; use tokio_util::sync::CancellationToken; @@ -36,7 +40,7 @@ struct CombinedBehaviour { relay: relay::client::Behaviour, relay_reservation: MutableRelayReservation, relay_router: RelayRouter, - parasigex: parsigex::Behaviour, + parsigex: parsigex::Behaviour, } #[derive(Debug)] @@ -64,7 +68,7 @@ impl From for CombinedBehaviourEvent { } #[derive(Debug, Parser)] -#[command(name = "parasigex-example")] +#[command(name = "parsigex-example")] #[command(about = "Demonstrates partial signature exchange over the bootnode/relay P2P path")] struct Args { /// Relay URLs or multiaddrs. @@ -171,10 +175,11 @@ async fn main() -> Result<()> { let known_peers = lock .peer_ids() .context("failed to derive peer IDs from lock")?; - let self_index = known_peers - .iter() - .position(|peer_id| *peer_id == local_peer_id) - .ok_or_else(|| anyhow!("local peer ID {local_peer_id} not found in cluster lock"))?; + if !known_peers.contains(&local_peer_id) { + return Err(anyhow!( + "local peer ID {local_peer_id} not found in cluster lock" + )); + } let conn_gater = gater::ConnGater::new( gater::Config::closed() .with_relays(relays.clone()) @@ -200,7 +205,7 @@ async fn main() -> Result<()> { .filter_map(|relay| relay.peer().ok().flatten().map(|peer| peer.id)) .collect(); - let mut parasigex_handle: Option = None; + let mut parsigex_handle: Option = None; let mut node: Node = Node::new( p2p_config, key, @@ -209,28 +214,21 @@ async fn main() -> Result<()> { known_peers.clone(), |builder, keypair, relay_client| { let p2p_context = builder.p2p_context(); - let broadcast_context = p2p_context.clone(); let local_peer_id = keypair.public().to_peer_id(); let config = parsigex::Config::new( - known_peers.clone(), - self_index, + local_peer_id, + p2p_context.clone(), verifier.clone(), duty_gater.clone(), - std::sync::Arc::new(move |peer| { - !broadcast_context - .peer_store_lock() - .connections_to_peer(peer) - .is_empty() - }), ) .with_timeout(Duration::from_secs(10)); - let (parasigex, handle) = parsigex::Behaviour::new(config); - parasigex_handle = Some(handle); + let (parsigex, handle) = parsigex::Behaviour::new(config, local_peer_id); + parsigex_handle = Some(handle); builder .with_gater(conn_gater) .with_inner(CombinedBehaviour { - parasigex, + parsigex, relay: relay_client, relay_reservation: MutableRelayReservation::new(relays.clone()), relay_router: RelayRouter::new(relays.clone(), p2p_context, local_peer_id), @@ -238,18 +236,19 @@ async fn main() -> Result<()> { }, )?; - let parasigex_handle = - parasigex_handle.ok_or_else(|| anyhow!("parasigex handle should be created"))?; + let parsigex_handle = + parsigex_handle.ok_or_else(|| anyhow!("parsigex handle should be created"))?; info!( peer_id = %node.local_peer_id(), data_dir = %args.data_dir.display(), known_peers = ?known_peers, relays = ?args.relays, - "parasigex example started" + "parsigex example started" ); let mut ticker = tokio::time::interval(Duration::from_secs(args.broadcast_every)); + let mut pending_broadcasts: HashMap = HashMap::new(); loop { tokio::select! { @@ -263,9 +262,15 @@ async fn main() -> Result<()> { let duty = Duty::new(SlotNumber::new(*slot), DutyType::Randao); let data_set = make_sample_set(*slot, args.share_idx); - match parasigex_handle.broadcast(duty.clone(), data_set.clone()).await { - Ok(()) => { - info!(duty = %duty, share_idx = args.share_idx, "broadcasted sample partial signature set"); + match parsigex_handle.broadcast(duty.clone(), data_set.clone()).await { + Ok(request_id) => { + pending_broadcasts.insert(request_id, (duty.clone(), args.share_idx)); + info!( + request_id, + duty = %duty, + share_idx = args.share_idx, + "queued sample partial signature set for broadcast" + ); *slot = slot.saturating_add(1); } Err(error) => { @@ -274,7 +279,6 @@ async fn main() -> Result<()> { } } event = node.select_next_some() => { - info!("received swarm event"); let peer_type = |peer_id: &libp2p::PeerId| { if relay_peer_ids.contains(peer_id) { "RELAY" @@ -433,7 +437,67 @@ async fn main() -> Result<()> { SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( CombinedBehaviourEvent::ParSigEx(Event::Error { peer, error, .. }), )) => { - warn!(peer = %peer, error = %error, "parasigex protocol error"); + warn!(peer = %peer, error = %error, "parsigex protocol error"); + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::ParSigEx(Event::BroadcastError { + request_id, + peer, + error, + }), + )) => { + match pending_broadcasts.get(&request_id) { + Some((duty, share_idx)) => { + warn!( + request_id, + duty = %duty, + share_idx, + peer = ?peer, + error = %error, + "sample partial signature broadcast failed" + ); + } + None => { + warn!( + request_id, + peer = ?peer, + error = %error, + "partial signature broadcast failed" + ); + } + } + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::ParSigEx(Event::BroadcastComplete { + request_id, + }), + )) => { + if let Some((duty, share_idx)) = pending_broadcasts.remove(&request_id) { + info!( + request_id, + duty = %duty, + share_idx, + "broadcasted sample partial signature set" + ); + } else { + info!(request_id, "partial signature broadcast completed"); + } + } + SwarmEvent::Behaviour(PlutoBehaviourEvent::Inner( + CombinedBehaviourEvent::ParSigEx(Event::BroadcastFinished { + request_id, + }), + )) => { + if let Some((duty, share_idx)) = pending_broadcasts.remove(&request_id) { + warn!( + request_id, + duty = %duty, + share_idx, + "sample partial signature broadcast finished with failures" + ); + } else { + warn!(request_id, "partial signature broadcast finished with failures"); + } } SwarmEvent::NewListenAddr { address, .. } => { info!(address = %address, "listening"); diff --git a/crates/core/src/parsigex/behaviour.rs b/crates/parsigex/src/behaviour.rs similarity index 70% rename from crates/core/src/parsigex/behaviour.rs rename to crates/parsigex/src/behaviour.rs index 3a8f7d1d..4f9d6c5e 100644 --- a/crates/core/src/parsigex/behaviour.rs +++ b/crates/parsigex/src/behaviour.rs @@ -12,21 +12,21 @@ use std::{ time::Duration, }; +use either::Either; use libp2p::{ Multiaddr, PeerId, swarm::{ ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + THandlerInEvent, THandlerOutEvent, ToSwarm, dummy, }, }; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::mpsc; -use crate::types::{Duty, ParSignedData, ParSignedDataSet, PubKey}; +use pluto_core::types::{Duty, ParSignedData, ParSignedDataSet, PubKey}; +use pluto_p2p::p2p_context::P2PContext; -use super::{ - Error as CodecError, Handler, encode_message, - handler::{Failure as HandlerFailure, FromHandler, ToHandler}, -}; +use super::{Error as CodecError, Handler, encode_message}; +use crate::handler::{Failure as HandlerFailure, FromHandler, ToHandler}; /// Future returned by verifier callbacks. pub type VerifyFuture = @@ -39,9 +39,6 @@ pub type Verifier = /// Duty gate callback type. pub type DutyGater = Arc bool + Send + Sync + 'static>; -/// Peer connection callback type. -pub type PeerConnectionChecker = Arc bool + Send + Sync + 'static>; - /// Error type for signature verification callbacks. #[derive(Debug, thiserror::Error)] pub enum VerifyError { @@ -92,7 +89,7 @@ pub enum Error { pub type Result = std::result::Result; /// Event emitted by the partial signature exchange behaviour. -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum Event { /// A verified partial signature set was received from a peer. Received { @@ -114,12 +111,31 @@ pub enum Event { /// Failure reason. error: HandlerFailure, }, + /// Broadcast failed. + BroadcastError { + /// Request identifier. + request_id: u64, + /// Peer for which the broadcast failed. + peer: Option, + /// Failure reason. + error: HandlerFailure, + }, + /// Broadcast completed successfully for all targeted peers. + BroadcastComplete { + /// Request identifier. + request_id: u64, + }, + /// Broadcast finished after one or more peer failures. + BroadcastFinished { + /// Request identifier. + request_id: u64, + }, } #[derive(Debug)] struct PendingBroadcast { remaining: usize, - responder: oneshot::Sender>, + failed: bool, } #[derive(Debug)] @@ -128,7 +144,6 @@ enum Command { request_id: u64, duty: Duty, data_set: ParSignedDataSet, - responder: oneshot::Sender>, }, } @@ -141,48 +156,43 @@ pub struct Handle { impl Handle { /// Broadcasts a partial signature set to all peers except self. - pub async fn broadcast(&self, duty: Duty, data_set: ParSignedDataSet) -> Result<()> { + pub async fn broadcast(&self, duty: Duty, data_set: ParSignedDataSet) -> Result { let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); - let (tx, rx) = oneshot::channel(); self.tx .send(Command::Broadcast { request_id, duty, data_set, - responder: tx, }) .map_err(|_| Error::Closed)?; - Ok(()) + Ok(request_id) } } /// Configuration for the partial signature exchange behaviour. #[derive(Clone)] pub struct Config { - peers: Vec, - self_index: usize, + peer_id: PeerId, + p2p_context: P2PContext, verifier: Verifier, duty_gater: DutyGater, - is_peer_connected: PeerConnectionChecker, timeout: Duration, } impl Config { /// Creates a new configuration. pub fn new( - peers: Vec, - self_index: usize, + peer_id: PeerId, + p2p_context: P2PContext, verifier: Verifier, duty_gater: DutyGater, - is_peer_connected: PeerConnectionChecker, ) -> Self { Self { - peers, - self_index, + peer_id, + p2p_context, verifier, duty_gater, - is_peer_connected, timeout: Duration::from_secs(20), } } @@ -205,7 +215,8 @@ pub struct Behaviour { impl Behaviour { /// Creates a behaviour and a clonable broadcast handle. - pub fn new(config: Config) -> (Self, Handle) { + pub fn new(config: Config, peer_id: PeerId) -> (Self, Handle) { + debug_assert_eq!(config.peer_id, peer_id); let (tx, rx) = mpsc::unbounded_channel(); let handle = Handle { tx, @@ -230,40 +241,55 @@ impl Behaviour { request_id, duty, data_set, - responder, } => { let message = match encode_message(&duty, &data_set) { Ok(message) => message, Err(err) => { - let _ = responder.send(Err(Error::from(err))); + self.broadcast_error(request_id, None, HandlerFailure::Codec(err)); return; } }; + let peers: Vec<_> = self + .config + .p2p_context + .known_peers() + .iter() + .copied() + .collect(); let mut targeted = 0usize; - for (idx, peer) in self.config.peers.iter().enumerate() { - if idx == self.config.self_index { + for peer in peers { + if peer == self.config.peer_id { continue; } - if !(self.config.is_peer_connected)(peer) { - let _ = responder.send(Err(Error::PeerNotConnected(*peer))); - return; + if self + .config + .p2p_context + .peer_store_lock() + .connections_to_peer(&peer) + .is_empty() + { + self.broadcast_error( + request_id, + Some(peer), + HandlerFailure::Io(format!("peer {peer} is not connected")), + ); + continue; } self.pending_actions.push_back(ToSwarm::NotifyHandler { - peer_id: *peer, + peer_id: peer, handler: NotifyHandler::Any, - event: ToHandler::Send { + event: Either::Left(ToHandler::Send { request_id, payload: message.clone(), - }, + }), }); targeted = targeted.saturating_add(1); } if targeted == 0 { - let _ = responder.send(Ok(())); return; } @@ -271,38 +297,47 @@ impl Behaviour { request_id, PendingBroadcast { remaining: targeted, - responder, + failed: false, }, ); } } } - fn finish_broadcast_success(&mut self, request_id: u64) { + fn finish_broadcast_result(&mut self, request_id: u64, failed: bool) { let Some(entry) = self.pending_broadcasts.get_mut(&request_id) else { return; }; + entry.failed |= failed; entry.remaining = entry.remaining.saturating_sub(1); if entry.remaining == 0 { - if let Some(entry) = self.pending_broadcasts.remove(&request_id) { - let _ = entry.responder.send(Ok(())); + let failed = self + .pending_broadcasts + .remove(&request_id) + .map(|entry| entry.failed) + .unwrap_or(failed); + if failed { + self.events + .push_back(Event::BroadcastFinished { request_id }); + } else { + self.events + .push_back(Event::BroadcastComplete { request_id }); } } } - fn finish_broadcast_error(&mut self, request_id: u64, peer: PeerId, error: HandlerFailure) { - if let Some(entry) = self.pending_broadcasts.remove(&request_id) { - let _ = entry.responder.send(Err(Error::BroadcastPeer { - peer, - source: error, - })); - } + fn broadcast_error(&mut self, request_id: u64, peer: Option, error: HandlerFailure) { + self.events.push_back(Event::BroadcastError { + request_id, + peer, + error, + }); } } impl NetworkBehaviour for Behaviour { - type ConnectionHandler = Handler; + type ConnectionHandler = Either; type ToSwarm = Event; fn handle_established_inbound_connection( @@ -312,13 +347,17 @@ impl NetworkBehaviour for Behaviour { _local_addr: &Multiaddr, _remote_addr: &Multiaddr, ) -> std::result::Result, ConnectionDenied> { + if !self.config.p2p_context.is_known_peer(&peer) { + return Ok(Either::Right(dummy::ConnectionHandler)); + } + tracing::trace!("establishing inbound connection to peer: {:?}", peer); - Ok(Handler::new( + Ok(Either::Left(Handler::new( self.config.timeout, self.config.verifier.clone(), self.config.duty_gater.clone(), peer, - )) + ))) } fn handle_established_outbound_connection( @@ -329,13 +368,17 @@ impl NetworkBehaviour for Behaviour { _role_override: libp2p::core::Endpoint, _port_use: libp2p::core::transport::PortUse, ) -> std::result::Result, ConnectionDenied> { + if !self.config.p2p_context.is_known_peer(&peer) { + return Ok(Either::Right(dummy::ConnectionHandler)); + } + tracing::trace!("establishing outbound connection to peer: {:?}", peer); - Ok(Handler::new( + Ok(Either::Left(Handler::new( self.config.timeout, self.config.verifier.clone(), self.config.duty_gater.clone(), peer, - )) + ))) } fn on_swarm_event(&mut self, _event: FromSwarm) {} @@ -346,6 +389,11 @@ impl NetworkBehaviour for Behaviour { connection_id: ConnectionId, event: THandlerOutEvent, ) { + let event = match event { + Either::Left(event) => event, + Either::Right(value) => match value {}, + }; + tracing::trace!("received connection handler event: {:?}", event); match event { FromHandler::Received { duty, data_set } => { @@ -364,10 +412,11 @@ impl NetworkBehaviour for Behaviour { }); } FromHandler::OutboundSuccess { request_id } => { - self.finish_broadcast_success(request_id); + self.finish_broadcast_result(request_id, false); } FromHandler::OutboundError { request_id, error } => { - self.finish_broadcast_error(request_id, peer_id, error); + self.finish_broadcast_result(request_id, true); + self.broadcast_error(request_id, Some(peer_id), error); } } } diff --git a/crates/core/src/parsigex/handler.rs b/crates/parsigex/src/handler.rs similarity index 56% rename from crates/core/src/parsigex/handler.rs rename to crates/parsigex/src/handler.rs index b1cf0bb7..e1925aad 100644 --- a/crates/core/src/parsigex/handler.rs +++ b/crates/parsigex/src/handler.rs @@ -20,12 +20,13 @@ use libp2p::{ }, }; -use crate::types::{Duty, ParSignedDataSet}; +use pluto_core::types::{Duty, ParSignedDataSet}; use super::{DutyGater, PROTOCOL_NAME, Verifier, protocol}; +use crate::Error as CodecError; /// Failure type for the partial signature exchange handler. -#[derive(Debug, Clone, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum Failure { /// Stream negotiation timed out. #[error("parsigex protocol negotiation timed out")] @@ -42,6 +43,9 @@ pub enum Failure { /// I/O error. #[error("{0}")] Io(String), + /// Codec error. + #[error("codec error: {0}")] + Codec(CodecError), } impl Failure { @@ -51,7 +55,7 @@ impl Failure { } /// Command sent from the behaviour to a handler. -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum ToHandler { /// Send the encoded payload to the remote peer. Send { @@ -63,7 +67,7 @@ pub enum ToHandler { } /// Event sent from the handler back to the behaviour. -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum FromHandler { /// A verified message was received. Received { @@ -92,16 +96,91 @@ type SendFuture = BoxFuture<'static, Result<(), Failure>>; type RecvFuture = BoxFuture<'static, Result<(Duty, ParSignedDataSet), Failure>>; enum OutboundState { - OpenStream { request_id: u64, payload: Vec }, + IdleStream { stream: libp2p::swarm::Stream }, + RequestOpenStream { request_id: u64, payload: Vec }, Sending { request_id: u64, future: SendFuture }, } +impl std::fmt::Debug for OutboundState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + OutboundState::IdleStream { .. } => { + write!(f, "IdleStream {{ stream: }}") + } + OutboundState::RequestOpenStream { + request_id, + payload, + } => write!( + f, + "RequestOpenStream {{ request_id: {}, payload: {:?} }}", + request_id, payload + ), + OutboundState::Sending { request_id, .. } => write!( + f, + "Sending {{ request_id: {}, future: }}", + request_id + ), + } + } +} + +fn recv_message( + mut stream: libp2p::swarm::Stream, + verifier: Verifier, + duty_gater: DutyGater, + timeout: Duration, +) -> RecvFuture { + async move { + let recv = async { + let bytes = protocol::recv_message(&mut stream) + .await + .map_err(Failure::io)?; + let (duty, data_set) = + protocol::decode_message(&bytes).map_err(|_| Failure::InvalidPayload)?; + if !(duty_gater)(&duty) { + return Err(Failure::InvalidDuty); + } + + for (pub_key, par_sig) in data_set.inner() { + verifier(duty.clone(), *pub_key, par_sig.clone()) + .await + .map_err(|_| Failure::InvalidPartialSignature)?; + } + + Ok((duty, data_set)) + }; + + futures::pin_mut!(recv); + match futures::future::select(recv, Delay::new(timeout)).await { + futures::future::Either::Left((result, _)) => result, + futures::future::Either::Right(((), _)) => Err(Failure::Timeout), + } + } + .boxed() +} + +fn send_message( + mut stream: libp2p::swarm::Stream, + payload: Vec, + timeout: Duration, +) -> SendFuture { + async move { + let send = + protocol::send_message(&mut stream, &payload).map(|result| result.map_err(Failure::io)); + futures::pin_mut!(send); + match futures::future::select(send, Delay::new(timeout)).await { + futures::future::Either::Left((result, _)) => result, + futures::future::Either::Right(((), _)) => Err(Failure::Timeout), + } + } + .boxed() +} + /// Connection handler for parsigex. pub struct Handler { timeout: Duration, verifier: Verifier, duty_gater: DutyGater, - peer: PeerId, outbound_queue: VecDeque<(u64, Vec)>, outbound: Option, inbound: Option, @@ -110,12 +189,16 @@ pub struct Handler { impl Handler { /// Creates a new handler for one connection. - pub fn new(timeout: Duration, verifier: Verifier, duty_gater: DutyGater, peer: PeerId) -> Self { + pub fn new( + timeout: Duration, + verifier: Verifier, + duty_gater: DutyGater, + _peer: PeerId, + ) -> Self { Self { timeout, verifier, duty_gater, - peer, outbound_queue: VecDeque::new(), outbound: None, inbound: None, @@ -127,7 +210,7 @@ impl Handler { &mut self, error: DialUpgradeError<(), ::OutboundProtocol>, ) { - let Some(OutboundState::OpenStream { request_id, .. }) = self.outbound.take() else { + let Some(OutboundState::RequestOpenStream { request_id, .. }) = self.outbound.take() else { return; }; @@ -197,17 +280,28 @@ impl ConnectionHandler for Handler { if let Some(outbound) = self.outbound.take() { match outbound { - OutboundState::OpenStream { + OutboundState::IdleStream { stream } => { + if let Some((request_id, payload)) = self.outbound_queue.pop_front() { + self.outbound = Some(OutboundState::Sending { + request_id, + future: send_message(stream, payload, self.timeout), + }); + } else { + self.outbound = Some(OutboundState::IdleStream { stream }); + } + } + OutboundState::RequestOpenStream { request_id, payload, } => { - self.outbound = Some(OutboundState::OpenStream { + // Waiting for stream negotiation - put state back and return pending. + // The OutboundSubstreamRequest was already emitted when first entering this + // state. Returning it again would cause libp2p to panic + // with "cannot extract twice". + self.outbound = Some(OutboundState::RequestOpenStream { request_id, payload, }); - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), - }); } OutboundState::Sending { request_id, @@ -217,11 +311,13 @@ impl ConnectionHandler for Handler { self.outbound = Some(OutboundState::Sending { request_id, future }); } Poll::Ready(Ok(())) => { + self.outbound = None; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( FromHandler::OutboundSuccess { request_id }, )); } Poll::Ready(Err(error)) => { + self.outbound = None; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( FromHandler::OutboundError { request_id, error }, )); @@ -230,8 +326,12 @@ impl ConnectionHandler for Handler { } } - if let Some((request_id, payload)) = self.outbound_queue.pop_front() { - self.outbound = Some(OutboundState::OpenStream { + // Only start a new outbound operation if none is in progress. + // This prevents overwriting RequestOpenStream or Sending states. + if self.outbound.is_none() + && let Some((request_id, payload)) = self.outbound_queue.pop_front() + { + self.outbound = Some(OutboundState::RequestOpenStream { request_id, payload, }); @@ -253,73 +353,48 @@ impl ConnectionHandler for Handler { .. }) => { stream.ignore_for_keep_alive(); - let verifier = self.verifier.clone(); - let duty_gater = self.duty_gater.clone(); - let timeout = self.timeout; - self.inbound = Some( - async move { - let recv = async { - let bytes = protocol::recv_message(&mut stream) - .await - .map_err(Failure::io)?; - let (duty, data_set) = protocol::decode_message(&bytes) - .map_err(|_| Failure::InvalidPayload)?; - if !(duty_gater)(&duty) { - return Err(Failure::InvalidDuty); - } - - for (pub_key, par_sig) in data_set.inner() { - verifier(duty.clone(), *pub_key, par_sig.clone()) - .await - .map_err(|_| Failure::InvalidPartialSignature)?; - } - - Ok((duty, data_set)) - }; - - futures::pin_mut!(recv); - match futures::future::select(recv, Delay::new(timeout)).await { - futures::future::Either::Left((result, _)) => result, - futures::future::Either::Right(((), _)) => Err(Failure::Timeout), - } - } - .boxed(), - ); + self.inbound = Some(recv_message( + stream, + self.verifier.clone(), + self.duty_gater.clone(), + self.timeout, + )); } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol: mut stream, .. }) => { stream.ignore_for_keep_alive(); - let Some(OutboundState::OpenStream { - request_id, - payload, - }) = self.outbound.take() - else { - self.pending_events.push_back(FromHandler::OutboundError { - request_id: 0, - error: Failure::io(format!( - "unexpected outbound stream state for peer {}", - self.peer - )), - }); - return; - }; - - let timeout = self.timeout; - self.outbound = Some(OutboundState::Sending { - request_id, - future: async move { - let send = protocol::send_message(&mut stream, &payload) - .map(|result| result.map_err(Failure::io)); - futures::pin_mut!(send); - match futures::future::select(send, Delay::new(timeout)).await { - futures::future::Either::Left((result, _)) => result, - futures::future::Either::Right(((), _)) => Err(Failure::Timeout), - } + match self.outbound.take() { + Some(OutboundState::RequestOpenStream { + request_id, + payload, + }) => { + self.outbound = Some(OutboundState::Sending { + request_id, + future: send_message(stream, payload, self.timeout), + }); } - .boxed(), - }); + Some(OutboundState::Sending { request_id, future }) => { + self.outbound = Some(OutboundState::Sending { request_id, future }); + tracing::debug!( + "dropping unexpected outbound parsigex stream while a send is already in progress" + ); + } + Some(OutboundState::IdleStream { + stream: idle_stream, + }) => { + self.outbound = Some(OutboundState::IdleStream { + stream: idle_stream, + }); + tracing::debug!( + "dropping unexpected outbound parsigex stream while an idle stream is already cached" + ); + } + None => { + self.outbound = Some(OutboundState::IdleStream { stream }); + } + } } ConnectionEvent::DialUpgradeError(error) => self.on_dial_upgrade_error(error), _ => {} diff --git a/crates/parsigex/src/lib.rs b/crates/parsigex/src/lib.rs new file mode 100644 index 00000000..ca967afc --- /dev/null +++ b/crates/parsigex/src/lib.rs @@ -0,0 +1,41 @@ +//! Partial signature exchange protocol. + +pub mod behaviour; +mod handler; +mod protocol; + +pub use behaviour::{ + Behaviour, Config, DutyGater, Error as BehaviourError, Event, Handle, Verifier, VerifyError, +}; +pub use handler::Handler; +pub use protocol::{decode_message, encode_message}; + +use libp2p::PeerId; +use pluto_core::ParSigExCodecError; + +/// The protocol name for partial signature exchange (version 2.0.0). +pub const PROTOCOL_NAME: libp2p::swarm::StreamProtocol = + libp2p::swarm::StreamProtocol::new("/charon/parsigex/2.0.0"); + +/// Returns the supported protocols in precedence order. +pub fn protocols() -> Vec { + vec![PROTOCOL_NAME] +} + +/// Error type for proto and conversion operations. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Core codec error. + #[error(transparent)] + Codec(#[from] ParSigExCodecError), + + /// Broadcast failed for a peer. + #[error("broadcast to peer {peer} failed")] + BroadcastPeer { + /// Peer for which the broadcast failed. + peer: PeerId, + }, +} + +/// Result type for partial signature exchange operations. +pub type Result = std::result::Result; diff --git a/crates/core/src/parsigex/protocol.rs b/crates/parsigex/src/protocol.rs similarity index 61% rename from crates/core/src/parsigex/protocol.rs rename to crates/parsigex/src/protocol.rs index d3bb812a..bfaccaf9 100644 --- a/crates/core/src/parsigex/protocol.rs +++ b/crates/parsigex/src/protocol.rs @@ -2,15 +2,14 @@ use std::io; -use futures::prelude::*; use libp2p::swarm::Stream; use prost::Message; -use unsigned_varint::aio::read_usize; -use crate::{ +use pluto_core::{ corepb::v1::{core as pbcore, parsigex as pbparsigex}, types::{Duty, ParSignedDataSet}, }; +use pluto_p2p::proto; use super::{Error, Result as ParasigexResult}; @@ -45,10 +44,14 @@ pub fn encode_message(duty: &Duty, data_set: &ParSignedDataSet) -> ParasigexResu /// Decodes a partial signature exchange message. pub fn decode_message(bytes: &[u8]) -> ParasigexResult<(Duty, ParSignedDataSet)> { - let pb: pbparsigex::ParSigExMsg = - decode_protobuf(bytes).map_err(|_| Error::InvalidMessageFields)?; - let duty_pb = pb.duty.ok_or(Error::InvalidMessageFields)?; - let data_set_pb = pb.data_set.ok_or(Error::InvalidMessageFields)?; + let pb: pbparsigex::ParSigExMsg = decode_protobuf(bytes) + .map_err(|_| Error::from(pluto_core::ParSigExCodecError::InvalidMessageFields))?; + let duty_pb = pb + .duty + .ok_or(pluto_core::ParSigExCodecError::InvalidMessageFields)?; + let data_set_pb = pb + .data_set + .ok_or(pluto_core::ParSigExCodecError::InvalidMessageFields)?; let duty = Duty::try_from(&duty_pb)?; let data_set = ParSignedDataSet::try_from((&duty.duty_type, &data_set_pb))?; Ok((duty, data_set)) @@ -56,28 +59,10 @@ pub fn decode_message(bytes: &[u8]) -> ParasigexResult<(Duty, ParSignedDataSet)> /// Sends one protobuf message on the stream. pub async fn send_message(stream: &mut Stream, payload: &[u8]) -> io::Result<()> { - let mut len_buf = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(payload.len(), &mut len_buf); - stream.write_all(encoded_len).await?; - stream.write_all(payload).await?; - stream.flush().await + proto::write_length_delimited(stream, payload).await } /// Receives one protobuf payload from the stream. pub async fn recv_message(stream: &mut Stream) -> io::Result> { - let length = read_usize(&mut *stream).await.map_err(|err| match err { - unsigned_varint::io::ReadError::Io(err) => err, - other => io::Error::new(io::ErrorKind::InvalidData, other), - })?; - - if length > MAX_MESSAGE_SIZE { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("message too large: {length} bytes"), - )); - } - - let mut buf = vec![0_u8; length]; - stream.read_exact(&mut buf).await?; - Ok(buf) + proto::read_length_delimited(stream, MAX_MESSAGE_SIZE).await } diff --git a/crates/peerinfo/src/protocol.rs b/crates/peerinfo/src/protocol.rs index 2fe6ad70..5b284521 100644 --- a/crates/peerinfo/src/protocol.rs +++ b/crates/peerinfo/src/protocol.rs @@ -17,14 +17,12 @@ use std::{ }; use chrono::{DateTime, Utc}; -use futures::prelude::*; use libp2p::{PeerId, swarm::Stream}; use pluto_core::version::{self, SemVer, SemVerError}; -use prost::Message; +use pluto_p2p::proto; use regex::Regex; use tokio::sync::Mutex; use tracing::{info, warn}; -use unsigned_varint::aio::read_usize; use crate::{ LocalPeerInfo, @@ -32,9 +30,6 @@ use crate::{ peerinfopb::v1::peerinfo::PeerInfo, }; -/// Maximum message size (64KB should be plenty for peer info). -const MAX_MESSAGE_SIZE: usize = 64 * 1024; - static GIT_HASH_RE: LazyLock = LazyLock::new(|| Regex::new(r"^[0-9a-f]{7}$").expect("invalid regex")); @@ -51,57 +46,6 @@ pub struct ProtocolState { local_info: LocalPeerInfo, } -/// Writes a protobuf message with unsigned varint length prefix to the stream. -/// -/// Wire format: `[uvarint length][protobuf bytes]` -async fn write_protobuf( - stream: &mut S, - msg: &M, -) -> io::Result<()> { - // Encode message to protobuf bytes - let mut buf = Vec::with_capacity(msg.encoded_len()); - msg.encode(&mut buf) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - - // Write unsigned varint length prefix - let mut len_buf = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(buf.len(), &mut len_buf); - stream.write_all(encoded_len).await?; - - // Write protobuf bytes - stream.write_all(&buf).await?; - stream.flush().await -} - -/// Reads a protobuf message with unsigned varint length prefix from the stream. -/// -/// Wire format: `[uvarint length][protobuf bytes]` -/// -/// Returns an error if the message exceeds `MAX_MESSAGE_SIZE`. -async fn read_protobuf( - stream: &mut S, -) -> io::Result { - // Read unsigned varint length prefix - let msg_len = read_usize(&mut *stream).await.map_err(|e| match e { - unsigned_varint::io::ReadError::Io(io_err) => io_err, - other => io::Error::new(io::ErrorKind::InvalidData, other), - })?; - - if msg_len > MAX_MESSAGE_SIZE { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("message too large: {msg_len} bytes (max: {MAX_MESSAGE_SIZE})"), - )); - } - - // Read exactly `msg_len` protobuf bytes - let mut buf = vec![0u8; msg_len]; - stream.read_exact(&mut buf).await?; - - // Unmarshal protobuf - M::decode(&buf[..]).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) -} - /// Errors that can occur during the protocol. #[derive(Debug, thiserror::Error)] pub enum ProtocolError { @@ -317,8 +261,8 @@ impl ProtocolState { request: &PeerInfo, ) -> io::Result<(Stream, PeerInfo)> { let start = Instant::now(); - write_protobuf(&mut stream, request).await?; - let response = read_protobuf(&mut stream).await?; + proto::write_protobuf(&mut stream, request).await?; + let response = proto::read_protobuf(&mut stream).await?; let rtt = start.elapsed(); self.validate_peer_info(&response, rtt).await; @@ -334,8 +278,8 @@ impl ProtocolState { mut stream: Stream, local_info: &PeerInfo, ) -> io::Result<(Stream, PeerInfo)> { - let request = read_protobuf(&mut stream).await?; - write_protobuf(&mut stream, local_info).await?; + let request = proto::read_protobuf(&mut stream).await?; + proto::write_protobuf(&mut stream, local_info).await?; Ok((stream, request)) } } @@ -344,6 +288,7 @@ impl ProtocolState { mod tests { use super::*; use hex_literal::hex; + use prost::Message; // Test case: minimal // CharonVersion: "v1.0.0" @@ -571,7 +516,7 @@ mod tests { // Write to a cursor let mut buf = Vec::new(); - write_protobuf(&mut buf, &original).await.unwrap(); + proto::write_protobuf(&mut buf, &original).await.unwrap(); // The wire format should be: [varint length][protobuf bytes] // Minimal message is 14 bytes, so length prefix is just 1 byte (14 < 128) @@ -580,7 +525,7 @@ mod tests { // Read it back let mut cursor = futures::io::Cursor::new(&buf[..]); - let decoded: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); + let decoded: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); assert_eq!(original, decoded); } @@ -589,11 +534,11 @@ mod tests { let original = make_full_peerinfo(); let mut buf = Vec::new(); - write_protobuf(&mut buf, &original).await.unwrap(); + proto::write_protobuf(&mut buf, &original).await.unwrap(); // Read it back let mut cursor = futures::io::Cursor::new(&buf[..]); - let decoded: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); + let decoded: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); assert_eq!(original, decoded); } @@ -609,10 +554,10 @@ mod tests { for original in variants { let mut buf = Vec::new(); - write_protobuf(&mut buf, &original).await.unwrap(); + proto::write_protobuf(&mut buf, &original).await.unwrap(); let mut cursor = futures::io::Cursor::new(&buf[..]); - let decoded: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); + let decoded: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); assert_eq!(original, decoded); } } @@ -621,13 +566,13 @@ mod tests { async fn test_read_protobuf_message_too_large() { // Create a buffer with a length prefix that exceeds MAX_MESSAGE_SIZE let mut buf = Vec::new(); - let large_len = MAX_MESSAGE_SIZE + 1; + let large_len = proto::MAX_MESSAGE_SIZE + 1; let mut len_buf = unsigned_varint::encode::usize_buffer(); let encoded_len = unsigned_varint::encode::usize(large_len, &mut len_buf); buf.extend_from_slice(encoded_len); let mut cursor = futures::io::Cursor::new(&buf[..]); - let result: io::Result = read_protobuf(&mut cursor).await; + let result: io::Result = proto::read_protobuf(&mut cursor).await; assert!(result.is_err()); let err = result.unwrap_err(); @@ -641,7 +586,7 @@ mod tests { let invalid_data = [0x05, 0xff, 0xff, 0xff, 0xff, 0xff]; // length 5, then garbage let mut cursor = futures::io::Cursor::new(&invalid_data[..]); - let result: io::Result = read_protobuf(&mut cursor).await; + let result: io::Result = proto::read_protobuf(&mut cursor).await; assert!(result.is_err()); assert_eq!(result.unwrap_err().kind(), io::ErrorKind::InvalidData); @@ -653,7 +598,7 @@ mod tests { let truncated = [0x10]; // claims 16 bytes but has none let mut cursor = futures::io::Cursor::new(&truncated[..]); - let result: io::Result = read_protobuf(&mut cursor).await; + let result: io::Result = proto::read_protobuf(&mut cursor).await; assert!(result.is_err()); assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); @@ -667,15 +612,15 @@ mod tests { // Write multiple messages to the same buffer let mut buf = Vec::new(); - write_protobuf(&mut buf, &msg1).await.unwrap(); - write_protobuf(&mut buf, &msg2).await.unwrap(); - write_protobuf(&mut buf, &msg3).await.unwrap(); + proto::write_protobuf(&mut buf, &msg1).await.unwrap(); + proto::write_protobuf(&mut buf, &msg2).await.unwrap(); + proto::write_protobuf(&mut buf, &msg3).await.unwrap(); // Read them back in order let mut cursor = futures::io::Cursor::new(&buf[..]); - let decoded1: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); - let decoded2: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); - let decoded3: PeerInfo = read_protobuf(&mut cursor).await.unwrap(); + let decoded1: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); + let decoded2: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); + let decoded3: PeerInfo = proto::read_protobuf(&mut cursor).await.unwrap(); assert_eq!(msg1, decoded1); assert_eq!(msg2, decoded2);