diff --git a/.gitignore b/.gitignore index f7827fb..240074a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ data *.json .DS_Store configs +*.sqlite* diff --git a/Cargo.lock b/Cargo.lock index b4da7a4..3d91cf3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -80,6 +80,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -94,6 +95,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -312,7 +319,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -334,7 +341,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -345,7 +352,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -359,6 +366,15 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atty" version = "0.2.14" @@ -636,6 +652,9 @@ name = "bitflags" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -932,7 +951,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1127,6 +1146,21 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.3.2" @@ -1207,6 +1241,16 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.16" @@ -1402,6 +1446,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] @@ -1524,6 +1569,12 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "dptree" version = "0.3.0" @@ -1570,6 +1621,9 @@ name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -1692,6 +1746,17 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys", +] + [[package]] name = "eth-keystore" version = "0.5.0" @@ -1828,7 +1893,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "syn 2.0.38", + "syn 2.0.39", "toml 0.7.8", "walkdir", ] @@ -1846,7 +1911,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1903,7 +1968,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.38", + "syn 2.0.39", "tempfile", "thiserror", "tiny-keccak", @@ -2053,6 +2118,12 @@ dependencies = [ "yansi", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "eyre" version = "0.6.8" @@ -2098,6 +2169,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -2126,6 +2203,17 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2214,6 +2302,17 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + [[package]] name = "futures-io" version = "0.3.29" @@ -2239,7 +2338,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2508,6 +2607,10 @@ name = "hashbrown" version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +dependencies = [ + "ahash 0.8.6", + "allocator-api2", +] [[package]] name = "hashers" @@ -2518,6 +2621,15 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.2", +] + [[package]] name = "headers" version = "0.3.9" @@ -2547,6 +2659,9 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] [[package]] name = "hermit-abi" @@ -2569,6 +2684,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -2863,9 +2987,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -2953,6 +3077,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "lazycell" @@ -2962,9 +3089,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libloading" @@ -2982,6 +3109,28 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.10" @@ -3115,7 +3264,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3322,6 +3471,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -3332,6 +3498,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.17" @@ -3370,7 +3547,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3427,9 +3604,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -3448,7 +3625,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3459,9 +3636,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", @@ -3633,7 +3810,7 @@ checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall", "smallvec", "windows-targets", ] @@ -3655,6 +3832,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "paste" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + [[package]] name = "path-slash" version = "0.2.1" @@ -3698,6 +3881,15 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.0" @@ -3765,7 +3957,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3803,7 +3995,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3818,6 +4010,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.8", + "pkcs8 0.10.2", + "spki 0.7.2", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -3924,7 +4127,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4175,15 +4378,6 @@ dependencies = [ "erasable", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -4195,12 +4389,12 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] @@ -4381,6 +4575,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rsa" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ef35bf3e7fe15a53c4ab08a998e42271eab13eb0db224126bc7bc4c4bad96d" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8 0.10.2", + "rand_core", + "signature 2.1.0", + "spki 0.7.2", + "subtle", + "zeroize", +] + [[package]] name = "rsb_derive" version = "0.5.1" @@ -4722,7 +4936,7 @@ checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5035,6 +5249,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -5056,6 +5273,211 @@ dependencies = [ "der 0.7.8", ] +[[package]] +name = "sqlformat" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" +dependencies = [ + "itertools 0.11.0", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d" +dependencies = [ + "ahash 0.8.6", + "atoi", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap 2.1.0", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc" +dependencies = [ + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db" +dependencies = [ + "atoi", + "base64 0.21.5", + "bitflags 2.4.1", + "byteorder", + "bytes", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624" +dependencies = [ + "atoi", + "base64 0.21.5", + "bitflags 2.4.1", + "byteorder", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", +] + [[package]] name = "sscanf" version = "0.4.1" @@ -5079,7 +5501,7 @@ dependencies = [ "quote", "regex-syntax 0.6.29", "strsim", - "syn 2.0.38", + "syn 2.0.39", "unicode-width", ] @@ -5102,6 +5524,17 @@ dependencies = [ "precomputed-hash", ] +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.10.0" @@ -5149,7 +5582,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5192,6 +5625,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3", + "sqlx", "thiserror", "tokio", "tracing", @@ -5238,9 +5672,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -5368,7 +5802,7 @@ checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand", - "redox_syscall 0.4.1", + "redox_syscall", "rustix", "windows-sys", ] @@ -5413,7 +5847,9 @@ dependencies = [ "rand", "serde", "serde_json", + "sqlx", "subgraph-radio", + "tempfile", "test-utils", "tokio", "tower", @@ -5464,6 +5900,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "sqlx", "subgraph-radio", "tokio", "tower", @@ -5497,7 +5934,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5601,7 +6038,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5818,7 +6255,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6020,6 +6457,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "universal-hash" version = "0.5.1" @@ -6194,9 +6637,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6204,24 +6647,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -6231,9 +6674,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6241,22 +6684,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "wasm-streams" @@ -6273,9 +6716,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", @@ -6299,6 +6742,12 @@ dependencies = [ "rustix", ] +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" + [[package]] name = "winapi" version = "0.3.9" @@ -6407,9 +6856,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176b6138793677221d420fd2f0aeeced263f197688b36484660da767bca2fa32" +checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" dependencies = [ "memchr", ] @@ -6460,22 +6909,22 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.21" +version = "0.7.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686b7e407015242119c33dab17b8f61ba6843534de936d94368856528eae4dcc" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.21" +version = "0.7.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020f3dfe25dfc38dfea49ce62d5d45ecdd7f0d8a724fa63eb36b6eba4ec76806" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] diff --git a/subgraph-radio/Cargo.toml b/subgraph-radio/Cargo.toml index f897cd0..d046795 100644 --- a/subgraph-radio/Cargo.toml +++ b/subgraph-radio/Cargo.toml @@ -53,8 +53,14 @@ async-trait = "0.1.71" metrics = "0.21.0" opentelemetry = { version = "0.19.0", features = ["rt-tokio", "trace"] } tracing-opentelemetry = "0.18.0" -clap = { version = "4.4", features = ["cargo", "unstable-doc", "derive", "env"] } +clap = { version = "4.4", features = [ + "cargo", + "unstable-doc", + "derive", + "env", +] } confy = "0.5.1" +sqlx = { version = "0.7.2", features = ["sqlite", "runtime-tokio"] } [dev-dependencies] criterion = { version = "0.4", features = ["async", "async_futures"] } diff --git a/subgraph-radio/benches/attestations.rs b/subgraph-radio/benches/attestations.rs index 7541a77..810dd46 100644 --- a/subgraph-radio/benches/attestations.rs +++ b/subgraph-radio/benches/attestations.rs @@ -5,48 +5,22 @@ extern crate criterion; mod attestation { use criterion::{black_box, criterion_group, Criterion}; use graphcast_sdk::graphcast_agent::message_typing::GraphcastMessage; + use sqlx::SqlitePool; use std::collections::HashMap; use subgraph_radio::{ + entities::{create_local_attestation, NewAttestation}, messages::poi::PublicPoiMessage, - operator::attestation::{ - compare_attestations, local_comparison_point, update_blocks, Attestation, - }, + operator::attestation::{compare_attestations, local_comparison_point, Attestation}, + setup_database, }; criterion_group!( benches, - update_block_bench, update_attestations_bench, compare_attestations_bench, comparison_point_bench ); - fn update_block_bench(c: &mut Criterion) { - let mut blocks: HashMap> = black_box(black_box(HashMap::new())); - black_box(blocks.insert( - 42, - vec![black_box(Attestation::new( - "default".to_string(), - 0.0, - Vec::new(), - Vec::new(), - ))], - )); - - c.bench_function("update_block", |b| { - b.iter(|| { - update_blocks( - 42, - &blocks, - "awesome-ppoi".to_string(), - 0.0, - "0xadd3".to_string(), - 1, - ) - }) - }); - } - fn update_attestations_bench(c: &mut Criterion) { let attestation = black_box(Attestation::new( "awesome-ppoi".to_string(), @@ -78,7 +52,7 @@ mod attestation { Vec::new(), vec![0], )); - black_box(local_blocks.insert(42, local)); + black_box(local_blocks.insert(42, local.clone())); let mut remote_attestations: HashMap>> = black_box(HashMap::new()); @@ -91,9 +65,9 @@ mod attestation { c.bench_function("compare_attestations", |b| { b.iter(|| { compare_attestations( + Some(local.clone()), 42, - black_box(remote_attestations.clone()), - black_box(&local_attestations), + black_box(&remote_attestations.clone()), "my-awesome-hash", ) }) @@ -101,42 +75,61 @@ mod attestation { } fn comparison_point_bench(c: &mut Criterion) { - let mut local_blocks: HashMap = black_box(HashMap::new()); - let attestation1 = black_box(Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![2], - )); + let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime.block_on(async { + // Initialize the database connection pool + let db = SqlitePool::connect("sqlite::memory:").await.unwrap(); - let attestation2 = black_box(Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa2".to_string()], - vec![4], - )); + setup_database(&db).await; - let attestation3 = black_box(Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xa3".to_string()], - vec![6], - )); - - black_box(local_blocks.insert(42, attestation1)); - black_box(local_blocks.insert(43, attestation2)); - black_box(local_blocks.insert(44, attestation3)); - - let mut local_attestations: HashMap> = - black_box(HashMap::new()); - black_box(local_attestations.insert("hash".to_string(), local_blocks.clone())); - black_box(local_attestations.insert("hash2".to_string(), local_blocks)); - let local: HashMap> = black_box(local_attestations); - - c.bench_function("comparison_point", |b| { - b.iter(|| { - local_comparison_point(black_box(&local), &test_msg_vec(), "hash".to_string(), 120) - }) + let attestations = vec![ + NewAttestation::new( + "awesome-ppoi".to_string(), + 42, + "ppoi1".to_string(), + 0, + vec!["0xa1".to_string()], + "sender_group_hash_example".to_string(), + vec![2], + ), + NewAttestation::new( + "awesome-ppoi".to_string(), + 43, + "ppoi2".to_string(), + 0, + vec!["0xa2".to_string()], + "sender_group_hash_example".to_string(), + vec![4], + ), + NewAttestation::new( + "awesome-ppoi".to_string(), + 44, + "ppoi3".to_string(), + 1, + vec!["0xa3".to_string()], + "sender_group_hash_example".to_string(), + vec![6], + ), + ]; + + for attestation in attestations { + create_local_attestation(&db, attestation).await.unwrap(); + } + + // Run the benchmark + c.bench_function("comparison_point", |b| { + b.iter(|| { + let msg_vec = test_msg_vec(); + let db_clone = db.clone(); + runtime.block_on(async { + local_comparison_point(&msg_vec, "hash", 120, db_clone) + .await + .unwrap() + }) + }) + }); + + std::mem::drop(db); }); } diff --git a/subgraph-radio/migrations/20231109112858_persisted-state.down.sql b/subgraph-radio/migrations/20231109112858_persisted-state.down.sql new file mode 100644 index 0000000..87252f5 --- /dev/null +++ b/subgraph-radio/migrations/20231109112858_persisted-state.down.sql @@ -0,0 +1,5 @@ +DROP TABLE comparison_results; +DROP TABLE upgrade_intent_messages; +DROP TABLE remote_ppoi_messages; +DROP TABLE local_attestations; +DROP TABLE notifications; diff --git a/subgraph-radio/migrations/20231109112858_persisted-state.up.sql b/subgraph-radio/migrations/20231109112858_persisted-state.up.sql new file mode 100644 index 0000000..21e7760 --- /dev/null +++ b/subgraph-radio/migrations/20231109112858_persisted-state.up.sql @@ -0,0 +1,48 @@ +CREATE TABLE IF NOT EXISTS local_attestations ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + ppoi VARCHAR(255) NOT NULL, + stake_weight BIGINT NOT NULL, + senders TEXT NOT NULL, + sender_group_hash VARCHAR(255) NOT NULL, + timestamp TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS remote_ppoi_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + nonce BIGINT NOT NULL, + graph_account VARCHAR(255) NOT NULL, + content VARCHAR(255) NOT NULL, + network VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + block_hash VARCHAR(255) NOT NULL, + signature VARCHAR(255) NOT NULL +); + +CREATE TABLE IF NOT EXISTS upgrade_intent_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + nonce BIGINT NOT NULL, + graph_account VARCHAR(255) NOT NULL, + subgraph_id VARCHAR(255) NOT NULL, + new_hash VARCHAR(255) NOT NULL, + signature VARCHAR(255) NOT NULL +); + +CREATE TABLE IF NOT EXISTS comparison_results ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + deployment VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + result_type VARCHAR(255) NOT NULL, + local_attestation_json TEXT NOT NULL, + attestations_json TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS notifications ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + deployment VARCHAR(255) UNIQUE NOT NULL, + message TEXT NOT NULL +); diff --git a/subgraph-radio/src/config.rs b/subgraph-radio/src/config.rs index 5aa5dbb..f0b1df6 100644 --- a/subgraph-radio/src/config.rs +++ b/subgraph-radio/src/config.rs @@ -14,10 +14,9 @@ use graphcast_sdk::{ }; use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use tracing::{debug, info, trace}; +use tracing::info; use crate::operator::notifier::NotificationMode; -use crate::state::{panic_hook, PersistedState}; use crate::{active_allocation_hashes, syncing_deployment_hashes}; #[derive(clap::ValueEnum, Clone, Debug, Serialize, Deserialize, Default)] @@ -140,28 +139,6 @@ impl Config { Ok((my_address, my_stake)) } - pub async fn init_radio_state(&self) -> PersistedState { - let file_path = &self.radio_setup().persistence_file_path.clone(); - - if let Some(path) = file_path { - //TODO: set up synchronous panic hook as part of PersistedState functions - // panic_hook(&path); - let state = PersistedState::load_cache(path); - trace!( - local_attestations = tracing::field::debug(&state.local_attestations()), - remote_ppoi_messages = tracing::field::debug(&state.remote_ppoi_messages()), - state = tracing::field::debug(&state), - "Loaded Persisted state cache" - ); - - panic_hook(path); - state - } else { - debug!("Created new state"); - PersistedState::new(None, None, None, None, None) - } - } - pub fn callbook(&self) -> CallBook { CallBook::new( self.graph_stack().registry_subgraph.clone(), @@ -306,7 +283,7 @@ pub struct RadioSetup { #[clap( long, value_name = "GRAPHCAST_NETWORK", - default_value = "testnet", + default_value = "mainnet", env = "GRAPHCAST_NETWORK", help = "Supported Graphcast networks: mainnet, testnet" )] @@ -426,13 +403,6 @@ pub struct RadioSetup { env = "SERVER_PORT" )] pub server_port: Option, - #[clap( - long, - value_name = "PERSISTENCE_FILE_PATH", - help = "If set, the Radio will periodically store states of the program to the file in json format", - env = "PERSISTENCE_FILE_PATH" - )] - pub persistence_file_path: Option, #[clap( long, value_name = "RADIO_NAME", @@ -497,6 +467,13 @@ pub struct RadioSetup { default_value = "24" )] pub notification_interval: u64, + #[clap( + long, + value_name = "SQLITE_FILE_PATH", + help = "TODO", + env = "SQLITE_FILE_PATH" + )] + pub sqlite_file_path: Option, } #[derive(Clone, Debug, Args, Serialize, Deserialize, Default)] diff --git a/subgraph-radio/src/entities/mod.rs b/subgraph-radio/src/entities/mod.rs new file mode 100644 index 0000000..b95c72a --- /dev/null +++ b/subgraph-radio/src/entities/mod.rs @@ -0,0 +1,726 @@ +use graphcast_sdk::graphcast_agent::message_typing::GraphcastMessage; +use serde::{Deserialize, Serialize}; +use sqlx::FromRow; +use sqlx::{Error as SqlxError, SqlitePool}; + +use crate::messages::poi::PublicPoiMessage; +use crate::messages::upgrade::UpgradeIntentMessage; +use crate::operator::attestation::{Attestation, ComparisonResult, ComparisonResultType}; + +pub async fn create_local_attestation( + pool: &SqlitePool, + new_attestation: NewAttestation, +) -> Result { + let senders = new_attestation.senders.join(","); + + let timestamp = new_attestation + .timestamp + .iter() + .map(|&t| t.to_string()) + .collect::>() + .join(","); + + let inserted_record = sqlx::query_as!( + AttestationRecord, + r#" + INSERT INTO local_attestations (identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp) + VALUES (?, ?, ?, ?, ?, ?, ?) + RETURNING id, identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp + "#, + new_attestation.identifier, + new_attestation.block_number, + new_attestation.ppoi, + new_attestation.stake_weight, + senders, + new_attestation.sender_group_hash, + timestamp, + ) + .fetch_one(pool) + .await?; + + Ok(inserted_record) +} + +pub async fn get_local_attestation( + pool: &SqlitePool, + identifier: &str, + block_number: i64, +) -> Result, SqlxError> { + let attestation = sqlx::query_as!( + AttestationRecord, + r#" + SELECT id, identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp + FROM local_attestations + WHERE identifier = ? AND block_number = ? + "#, + identifier, + block_number + ) + .fetch_optional(pool) + .await?; + + Ok(attestation) +} + +pub async fn get_all_local_attestations_for_identifier( + pool: &SqlitePool, + identifier: &str, +) -> Result, SqlxError> { + let attestations = sqlx::query_as!( + AttestationRecord, + r#" + SELECT id, identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp + FROM local_attestations + WHERE identifier = ? + "#, + identifier + ) + .fetch_all(pool) + .await?; + + Ok(attestations) +} + +pub async fn get_all_local_attestations( + pool: &SqlitePool, +) -> Result, SqlxError> { + let attestations = sqlx::query_as!( + AttestationRecord, + r#" + SELECT id, identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp + FROM local_attestations + "# + ) + .fetch_all(pool) + .await?; + + Ok(attestations) +} + +pub async fn delete_outdated_local_attestations( + pool: &SqlitePool, + identifier: &str, + block_number: i64, +) -> Result { + let affected_rows = sqlx::query!( + r#" + DELETE FROM local_attestations + WHERE identifier = ? AND block_number <= ? + "#, + identifier, + block_number + ) + .execute(pool) + .await? + .rows_affected(); + + Ok(affected_rows as usize) +} + +pub async fn get_local_attestation_by_id( + pool: &SqlitePool, + attestation_id: i64, +) -> Result { + let record = sqlx::query_as!( + AttestationRecord, + r#" + SELECT id, identifier, block_number, ppoi, stake_weight, senders, sender_group_hash, timestamp + FROM local_attestations + WHERE id = ? + "#, + attestation_id + ) + .fetch_one(pool) + .await?; + + // Assuming you have a method to convert AttestationRecord to Attestation + Ok(Attestation::from_record(record)) +} + +pub async fn insert_remote_ppoi_message( + pool: &SqlitePool, + new_message: &NewRemotePpoiMessage, +) -> Result { + let inserted_record = sqlx::query_as!( + RemotePpoiMessageRecord, + r#" + INSERT INTO remote_ppoi_messages (identifier, nonce, graph_account, content, network, block_number, block_hash, signature) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + RETURNING id, identifier, nonce, graph_account, content, network, block_number, block_hash, signature + "#, + new_message.identifier, + new_message.nonce, + new_message.graph_account, + new_message.content, + new_message.network, + new_message.block_number, + new_message.block_hash, + new_message.signature, + ) + .fetch_one(pool) + .await?; + + Ok(inserted_record) +} + +pub async fn count_remote_ppoi_messages(pool: &SqlitePool, identifier: &str) -> i32 { + let count: i32 = sqlx::query_scalar!( + r#" + SELECT COUNT(*) + FROM remote_ppoi_messages + WHERE identifier = ? + "#, + identifier + ) + .fetch_one(pool) + .await + .unwrap(); + + count +} + +pub async fn get_remote_ppoi_messages( + pool: &SqlitePool, +) -> Result>, SqlxError> { + let records = sqlx::query_as!( + RemotePpoiMessageRecord, + r#" + SELECT id, identifier, nonce, graph_account, content, network, block_number, block_hash, signature + FROM remote_ppoi_messages + "# + ) + .fetch_all(pool) + .await?; + + let graphcast_messages = records + .into_iter() + .map(|record| GraphcastMessage { + identifier: record.identifier.clone(), + nonce: record.nonce, + graph_account: record.graph_account.clone(), + signature: record.signature, + payload: PublicPoiMessage { + identifier: record.identifier, + content: record.content, + nonce: record.nonce, + network: record.network, + block_number: record.block_number as u64, + block_hash: record.block_hash, + graph_account: record.graph_account, + }, + }) + .collect(); + + Ok(graphcast_messages) +} + +pub async fn clean_remote_ppoi_messages( + pool: &SqlitePool, + identifier: &str, + block_number: i64, +) -> Result { + let affected_rows = sqlx::query!( + r#" + DELETE FROM remote_ppoi_messages + WHERE identifier = ? AND block_number <= ? + "#, + identifier, + block_number + ) + .execute(pool) + .await? + .rows_affected(); + + Ok(affected_rows as usize) +} + +pub async fn insert_upgrade_intent_message( + pool: &SqlitePool, + msg: GraphcastMessage, +) -> Result { + let new_message = NewUpgradeIntentMessage { + identifier: msg.identifier, + nonce: msg.nonce, + graph_account: msg.graph_account, + subgraph_id: msg.payload.subgraph_id, + new_hash: msg.payload.new_hash, + signature: msg.signature, + }; + + let inserted_record = sqlx::query_as!( + UpgradeIntentMessageRecord, + r#" + INSERT INTO upgrade_intent_messages (identifier, nonce, graph_account, subgraph_id, new_hash, signature) + VALUES (?, ?, ?, ?, ?, ?) + RETURNING id, identifier, nonce, graph_account, subgraph_id, new_hash, signature + "#, + new_message.identifier, + new_message.nonce, + new_message.graph_account, + new_message.subgraph_id, + new_message.new_hash, + new_message.signature, + ) + .fetch_one(pool) + .await?; + + Ok(inserted_record) +} + +pub async fn get_upgrade_intent_message( + pool: &SqlitePool, + identifier: &str, + nonce: i64, +) -> Result, SqlxError> { + let message = sqlx::query_as!( + UpgradeIntentMessageRecord, + r#" + SELECT id, identifier, nonce, graph_account, subgraph_id, new_hash, signature + FROM upgrade_intent_messages + WHERE identifier = ? AND nonce = ? + "#, + identifier, + nonce + ) + .fetch_optional(pool) + .await?; + + Ok(message) +} + +pub async fn get_all_upgrade_intent_messages( + pool: &SqlitePool, +) -> Result, SqlxError> { + let messages = sqlx::query_as!( + UpgradeIntentMessageRecord, + r#" + SELECT id, identifier, nonce, graph_account, subgraph_id, new_hash, signature + FROM upgrade_intent_messages + "# + ) + .fetch_all(pool) + .await?; + + Ok(messages) +} + +pub async fn add_upgrade_intent_message( + pool: &SqlitePool, + msg: GraphcastMessage, +) -> Result<(), SqlxError> { + let key = msg.payload.subgraph_id.clone(); + let existing_message = get_upgrade_intent_message(pool, &key, msg.nonce).await?; + + match existing_message { + Some(_) => { + sqlx::query!( + r#" + UPDATE upgrade_intent_messages + SET identifier = ?, nonce = ?, graph_account = ?, new_hash = ?, signature = ? + WHERE subgraph_id = ? + "#, + msg.identifier, + msg.nonce, + msg.graph_account, + msg.payload.new_hash, + msg.signature, + key + ) + .execute(pool) + .await?; + } + None => { + // Insert the new message + insert_upgrade_intent_message(pool, msg).await?; + } + } + + Ok(()) +} + +pub async fn recent_upgrade( + pool: &SqlitePool, + msg: &UpgradeIntentMessage, + upgrade_threshold: i64, +) -> Result { + // Compute the nonce threshold and bind it to a local variable + let nonce_threshold = msg.nonce - upgrade_threshold; + + let record = sqlx::query!( + r#" + SELECT EXISTS ( + SELECT 1 FROM upgrade_intent_messages + WHERE subgraph_id = ? + AND nonce > ? + ) AS "exists_!" + "#, + msg.subgraph_id, + nonce_threshold // Use the local variable here + ) + .fetch_one(pool) + .await?; + + Ok(record.exists_ != 0) +} + +pub async fn create_notification( + pool: &SqlitePool, + new_notification: NewNotification, +) -> Result { + let inserted_record = sqlx::query_as!( + NotificationRecord, + r#" + INSERT INTO notifications (deployment, message) + VALUES (?, ?) + RETURNING id, deployment, message + "#, + new_notification.deployment, + new_notification.message, + ) + .fetch_one(pool) + .await?; + + Ok(inserted_record) +} + +pub async fn get_notification_by_deployment( + pool: &SqlitePool, + deployment: &str, +) -> Result { + let notification = sqlx::query_as!( + NotificationRecord, + r#" + SELECT id, deployment, message + FROM notifications + WHERE deployment = ? + "#, + deployment + ) + .fetch_one(pool) + .await?; + + Ok(notification) +} + +pub async fn get_all_notifications( + pool: &SqlitePool, +) -> Result, SqlxError> { + let notifications = sqlx::query_as!( + NotificationRecord, + r#" + SELECT id, deployment, message + FROM notifications + "# + ) + .fetch_all(pool) + .await?; + + Ok(notifications) +} + +pub async fn clear_all_notifications(pool: &SqlitePool) -> Result<(), SqlxError> { + sqlx::query!( + r#" + DELETE FROM notifications + "# + ) + .execute(pool) + .await?; + + Ok(()) +} + +pub struct NewAttestation { + pub identifier: String, + pub block_number: i64, + pub ppoi: String, + pub stake_weight: i64, + pub senders: Vec, + pub sender_group_hash: String, + pub timestamp: Vec, +} + +impl NewAttestation { + pub fn new( + identifier: String, + block_number: i64, + ppoi: String, + stake_weight: i64, + senders: Vec, + sender_group_hash: String, + timestamp: Vec, + ) -> NewAttestation { + NewAttestation { + identifier, + block_number, + ppoi, + stake_weight, + senders, + sender_group_hash, + timestamp, + } + } +} + +#[derive(FromRow, Debug, Clone, Serialize, Deserialize)] +pub struct AttestationRecord { + pub id: i64, + pub identifier: String, + pub block_number: i64, + pub ppoi: String, + pub stake_weight: i64, + pub senders: String, + pub sender_group_hash: String, + pub timestamp: String, +} + +#[derive(sqlx::FromRow)] +#[allow(dead_code)] +pub struct RemotePpoiMessageRecord { + id: i64, + identifier: String, + nonce: i64, + graph_account: String, + content: String, + network: String, + block_number: i64, + block_hash: String, + signature: String, +} + +impl From for PublicPoiMessage { + fn from(record: RemotePpoiMessageRecord) -> Self { + PublicPoiMessage { + identifier: record.identifier, + content: record.content, + nonce: record.nonce, + network: record.network, + block_number: record.block_number as u64, + block_hash: record.block_hash, + graph_account: record.graph_account, + } + } +} + +pub async fn insert_comparison_result( + pool: &SqlitePool, + new_result: &NewComparisonResult, +) -> Result { + let inserted_record = sqlx::query_as!( + ComparisonResultRecord, + r#" + INSERT INTO comparison_results (identifier, deployment, block_number, result_type, local_attestation_json, attestations_json) + VALUES (?, ?, ?, ?, ?, ?) + RETURNING id, identifier, deployment, block_number, result_type, local_attestation_json, attestations_json + "#, + new_result.identifier, + new_result.deployment, + new_result.block_number, + new_result.result_type, + new_result.local_attestation_json, + new_result.attestations_json, + ) + .fetch_one(pool) + .await?; + + Ok(inserted_record) +} + +// This function fetches all comparison results from the database. +pub async fn get_comparison_results( + pool: &SqlitePool, +) -> Result, SqlxError> { + let results = sqlx::query_as!( + ComparisonResultRecord, + r#" + SELECT id, identifier, deployment, block_number, result_type, local_attestation_json, attestations_json + FROM comparison_results + "# + ) + .fetch_all(pool) + .await?; + + Ok(results) +} + +// This function fetches comparison results of a specific type from the database. +pub async fn get_comparison_results_by_type( + pool: &SqlitePool, + result_type: &str, +) -> Result, SqlxError> { + let results = sqlx::query_as!( + ComparisonResultRecord, + r#" + SELECT id, identifier, deployment, block_number, result_type, local_attestation_json, attestations_json + FROM comparison_results + WHERE result_type = ? + "#, + result_type + ) + .fetch_all(pool) + .await?; + + Ok(results) +} + +// This function fetches comparison results by a specific deployment hash from the database. +pub async fn get_comparison_results_by_deployment( + pool: &SqlitePool, + deployment_hash: &str, +) -> Result, SqlxError> { + let results = sqlx::query_as!( + ComparisonResultRecord, + r#" + SELECT id, identifier, deployment, block_number, result_type, local_attestation_json, attestations_json + FROM comparison_results + WHERE deployment = ? + "#, + deployment_hash + ) + .fetch_all(pool) + .await?; + + Ok(results) +} +// This function now returns a sqlx::Query object which can be executed later. +pub async fn update_comparison_result( + pool: &SqlitePool, + comparison_result: &ComparisonResult, +) -> Result<(), SqlxError> { + let local_attestation_json = + serde_json::to_string(&comparison_result.local_attestation.as_ref()).unwrap(); + let attestations_json = serde_json::to_string(&comparison_result.attestations).unwrap(); + + let result_type = comparison_result.result_type.to_string(); + let block = comparison_result.block_number as i64; + let deployment = comparison_result.deployment_hash(); + + sqlx::query!( + "UPDATE comparison_results + SET result_type = ?, block_number = ?, local_attestation_json = ?, attestations_json = ? + WHERE deployment = ?", + result_type, + block, + local_attestation_json, + attestations_json, + deployment + ) + .execute(pool) + .await?; + + Ok(()) +} + +pub struct NewRemotePpoiMessage { + pub identifier: String, + pub nonce: i64, + pub graph_account: String, + pub content: String, + pub network: String, + pub block_number: i64, + pub block_hash: String, + pub signature: String, +} + +#[derive(sqlx::FromRow, Debug, Clone, Serialize, Deserialize)] +pub struct UpgradeIntentMessageRecord { + pub id: i64, + pub identifier: String, + pub nonce: i64, + pub graph_account: String, + pub subgraph_id: String, + pub new_hash: String, + pub signature: String, +} + +pub struct NewUpgradeIntentMessage { + pub identifier: String, + pub nonce: i64, + pub graph_account: String, + pub subgraph_id: String, + pub new_hash: String, + pub signature: String, +} + +pub struct NewComparisonResult { + pub identifier: String, + pub deployment: String, + pub block_number: i64, + pub result_type: String, + pub local_attestation_json: String, + pub attestations_json: String, +} + +impl From<&ComparisonResult> for NewComparisonResult { + fn from(comparison_result: &ComparisonResult) -> Self { + let local_attestation_json = match &comparison_result.local_attestation { + Some(attestation) => { + serde_json::to_string(attestation).unwrap_or_else(|_| "null".to_string()) + } + None => "null".to_string(), + }; + + let attestations_json = serde_json::to_string(&comparison_result.attestations) + .unwrap_or_else(|_| "[]".to_string()); + + NewComparisonResult { + identifier: comparison_result.deployment.clone(), + deployment: comparison_result.deployment.clone(), + block_number: comparison_result.block_number as i64, + result_type: comparison_result.result_type.to_string(), + local_attestation_json, + attestations_json, + } + } +} + +#[derive(sqlx::FromRow, Serialize, Deserialize, Debug)] +pub struct ComparisonResultRecord { + pub id: i64, + pub identifier: String, + pub deployment: String, + pub block_number: i64, + pub result_type: String, + pub local_attestation_json: String, + pub attestations_json: String, +} + +impl ComparisonResultRecord { + pub fn into_comparison_result(self) -> Result { + let result_type = match self.result_type.as_str() { + "NotFound" => ComparisonResultType::NotFound, + "Divergent" => ComparisonResultType::Divergent, + "Match" => ComparisonResultType::Match, + "BuildFailed" => ComparisonResultType::BuildFailed, + // TODO: don't panic + _ => panic!(), + }; + + // Deserialize the local attestation JSON string into an Attestation + let local_attestation: Attestation = serde_json::from_str(&self.local_attestation_json)?; + + // Deserialize the attestations JSON string into a Vec + let attestations: Vec = serde_json::from_str(&self.attestations_json)?; + + Ok(ComparisonResult { + deployment: self.deployment, + block_number: self.block_number as u64, + result_type, + local_attestation: Some(local_attestation), + attestations, + }) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct NewNotification { + pub deployment: String, + pub message: String, +} + +#[derive(FromRow, Debug, Serialize, Deserialize)] +pub struct NotificationRecord { + pub id: Option, + pub deployment: String, + pub message: String, +} diff --git a/subgraph-radio/src/lib.rs b/subgraph-radio/src/lib.rs index 6692aa5..bd80186 100644 --- a/subgraph-radio/src/lib.rs +++ b/subgraph-radio/src/lib.rs @@ -6,6 +6,7 @@ use derive_getters::Getters; use once_cell::sync::OnceCell; use std::{ collections::HashMap, + fmt, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -25,14 +26,15 @@ use graphcast_sdk::{ networks::NetworkName, waku_set_event_callback, BlockPointer, }; +use sqlx::{sqlite::SqliteError, Error as CoreSqlxError, SqlitePool}; pub mod config; +pub mod entities; pub mod graphql; pub mod messages; pub mod metrics; pub mod operator; pub mod server; -pub mod state; /// A global static (singleton) instance of GraphcastAgent. It is useful to ensure that we have only one GraphcastAgent /// per Radio instance, so that we can keep track of state and more easily test our Radio application. @@ -138,6 +140,33 @@ pub async fn shutdown(control: ControlFlow) { .graceful_shutdown(Some(Duration::from_secs(3))); } +#[derive(Debug)] +pub enum DatabaseError { + Sqlite(SqliteError), + CoreSqlx(CoreSqlxError), +} + +impl From for DatabaseError { + fn from(err: SqliteError) -> Self { + DatabaseError::Sqlite(err) + } +} + +impl From for DatabaseError { + fn from(err: CoreSqlxError) -> Self { + DatabaseError::CoreSqlx(err) + } +} + +impl fmt::Display for DatabaseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DatabaseError::Sqlite(err) => write!(f, "SQLite error: {}", err), + DatabaseError::CoreSqlx(err) => write!(f, "SQLx Core error: {}", err), + } + } +} + #[derive(Debug, thiserror::Error)] pub enum OperationError { #[error("Send message trigger isn't met: {0}")] @@ -152,6 +181,8 @@ pub enum OperationError { Query(QueryError), #[error("Attestation failure: {0}")] Attestation(AttestationError), + #[error("Database error: {0}")] + Database(DatabaseError), #[error("Others: {0}")] Others(String), } @@ -222,6 +253,92 @@ impl ControlFlow { } } +pub async fn setup_database(pool: &SqlitePool) { + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS local_attestations ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + ppoi VARCHAR(255) NOT NULL, + stake_weight BIGINT NOT NULL, + senders TEXT NOT NULL, + sender_group_hash VARCHAR(255) NOT NULL, + timestamp TEXT NOT NULL + ); + "#, + ) + .execute(pool) + .await + .unwrap(); + + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS remote_ppoi_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + nonce BIGINT NOT NULL, + graph_account VARCHAR(255) NOT NULL, + content VARCHAR(255) NOT NULL, + network VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + block_hash VARCHAR(255) NOT NULL, + signature VARCHAR(255) NOT NULL + ); + "#, + ) + .execute(pool) + .await + .unwrap(); + + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS upgrade_intent_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + nonce BIGINT NOT NULL, + graph_account VARCHAR(255) NOT NULL, + subgraph_id VARCHAR(255) NOT NULL, + new_hash VARCHAR(255) NOT NULL, + signature VARCHAR(255) NOT NULL + ); + "#, + ) + .execute(pool) + .await + .unwrap(); + + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS comparison_results ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + identifier VARCHAR(255) NOT NULL, + deployment VARCHAR(255) NOT NULL, + block_number BIGINT NOT NULL, + result_type VARCHAR(255) NOT NULL, + local_attestation_json TEXT NOT NULL, + attestations_json TEXT NOT NULL + ); + "#, + ) + .execute(pool) + .await + .unwrap(); + + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS notifications ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + deployment VARCHAR(255) UNIQUE NOT NULL, + message TEXT NOT NULL + ); + "#, + ) + .execute(pool) + .await + .unwrap(); +} + #[cfg(test)] mod tests { use crate::messages::poi::PublicPoiMessage; diff --git a/subgraph-radio/src/messages/poi.rs b/subgraph-radio/src/messages/poi.rs index a636b24..e90cab1 100644 --- a/subgraph-radio/src/messages/poi.rs +++ b/subgraph-radio/src/messages/poi.rs @@ -10,31 +10,34 @@ use graphcast_sdk::{ graphql::client_graph_node::query_graph_node_network_block_hash, networks::NetworkName, }; + use prost::Message; use serde::{Deserialize, Serialize}; -use std::cmp::max; -use std::collections::HashMap; -use std::sync::{Arc, Mutex as SyncMutex}; -use tracing::{debug, error, trace, warn}; +use sqlx::SqlitePool; +use tracing::{error, trace, warn}; use graphcast_sdk::{ graphcast_agent::{GraphcastAgent, GraphcastAgentError}, BlockPointer, }; +use crate::entities::{ + count_remote_ppoi_messages, create_local_attestation, get_local_attestation, + insert_remote_ppoi_message, NewAttestation, NewRemotePpoiMessage, +}; +use crate::operator::attestation::process_ppoi_message; +use crate::DatabaseError; use crate::{ metrics::CACHED_PPOI_MESSAGES, operator::{ attestation::{ - compare_attestations, local_comparison_point, save_local_attestation, Attestation, - ComparisonResult, + compare_attestations, local_comparison_point, Attestation, ComparisonResult, }, callbook::CallBookRadioExtensions, }, OperationError, }; -use crate::{operator::attestation::process_ppoi_message, state::PersistedState}; #[derive(Eip712, EthAbiType, Clone, Message, Serialize, Deserialize, PartialEq, SimpleObject)] #[eip712( @@ -182,8 +185,8 @@ pub async fn send_poi_message( message_block: u64, latest_block: BlockPointer, network_name: NetworkName, - local_attestations: Arc>>>, graphcast_agent: &GraphcastAgent, + db: SqlitePool, ) -> Result { trace!( message_block = message_block, @@ -193,7 +196,6 @@ pub async fn send_poi_message( // Deployment did not sync to message_block if latest_block.number < message_block { - //TODO: fill in variant in SDK let err_msg = format!( "Did not send message for deployment {}: latest_block ({}) syncing status must catch up to the message block ({})", id.clone(), @@ -201,16 +203,10 @@ pub async fn send_poi_message( ); trace!(err = err_msg, "Skip send",); return Err(OperationError::SendTrigger(err_msg)); - }; + } - // Message has already been sent - if local_attestations - .lock() - .unwrap() - .get(&id.clone()) - .and_then(|blocks| blocks.get(&message_block)) - .is_some() - { + //Message has already been sent + if let Ok(Some(_)) = get_local_attestation(&db, &id, message_block as i64).await { let err_msg = format!( "Repeated message for deployment {}, skip sending message for block: {}", id.clone(), @@ -220,8 +216,7 @@ pub async fn send_poi_message( return Err(OperationError::SkipDuplicate(err_msg)); } - let block_hash = match graphcast_agent - .callbook + let block_hash = match callbook .block_hash(&network_name.to_string(), message_block) .await { @@ -249,21 +244,33 @@ pub async fn send_poi_message( nonce, network_name, message_block, - block_hash, + block_hash.clone(), graphcast_agent.graphcast_identity.graph_account.clone(), ); + match graphcast_agent .send_message(&id, radio_message, nonce) .await { Ok(msg_id) => { - save_local_attestation( - local_attestations.clone(), - content.clone(), - id.clone(), - message_block, - ); - trace!("save local attestations: {:#?}", local_attestations); + // After successfully sending, save the attestation + let new_attestation = NewAttestation { + identifier: id.clone(), + block_number: message_block as i64, + ppoi: content.clone(), + stake_weight: 0, + timestamp: vec![Utc::now().timestamp()], + senders: vec![], + sender_group_hash: String::new(), + }; + if let Err(e) = create_local_attestation(&db, new_attestation).await { + error!( + err = tracing::field::debug(&e), + "Failed to save local attestation" + ); + } + + trace!("Saved local attestation for deployment: {}", id.clone()); Ok(msg_id) } Err(e) => { @@ -288,26 +295,34 @@ pub async fn send_poi_message( /// we should update PersistedState::remote_ppoi_message standalone /// from GraphcastMessage field such as nonce #[autometrics(track_concurrency)] -pub async fn process_valid_message( - msg: GraphcastMessage, - state: &PersistedState, -) { +pub async fn process_valid_message(msg: GraphcastMessage, pool: &SqlitePool) { let identifier = msg.identifier.clone(); - state.add_remote_ppoi_message(msg.clone()); - CACHED_PPOI_MESSAGES.with_label_values(&[&identifier]).set( - state - .remote_ppoi_messages() - .iter() - .filter(|m: &&GraphcastMessage| m.identifier == identifier) - .collect::>>() - .len() - .try_into() - .unwrap(), - ); + // Create a NewRemotePpoiMessage from GraphcastMessage + let new_message = NewRemotePpoiMessage { + identifier: msg.identifier, + nonce: msg.payload.nonce, // assuming these fields are directly accessible + graph_account: msg.payload.graph_account, + content: msg.payload.content, + network: msg.payload.network, + block_number: msg.payload.block_number as i64, + block_hash: msg.payload.block_hash, + signature: msg.signature, + }; + + // Insert the message into the database + if let Err(e) = insert_remote_ppoi_message(pool, &new_message).await { + error!("Error adding remote ppoi message to database: {:?}", e); + } else { + let message_count = count_remote_ppoi_messages(pool, &identifier).await; + + // Update the metrics + CACHED_PPOI_MESSAGES + .with_label_values(&[&identifier]) + .set(message_count as i64); + } } -/// Compare validated messages #[allow(clippy::too_many_arguments)] #[autometrics(track_concurrency)] pub async fn poi_message_comparison( @@ -315,66 +330,62 @@ pub async fn poi_message_comparison( collect_window_duration: i64, callbook: CallBook, messages: Vec>, - local_attestations: HashMap>, + db: SqlitePool, ) -> Result { let time = Utc::now().timestamp(); - let (compare_block, collect_window_end) = match local_comparison_point( - &local_attestations, - &messages, - id.clone(), - collect_window_duration, - ) { - Some((block, window)) if time >= window => (block, window), - Some((compare_block, window)) => { - let err_msg = format!("Deployment {} comparison not triggered: collecting messages until time {}; currently {time}", id.clone(), window); - debug!(err = err_msg, "Collecting messages",); - return Err(OperationError::CompareTrigger( - id.clone(), - compare_block, - err_msg, - )); - } - _ => { - let err_msg = format!( - "Deployment {} comparison not triggered: no local attestation to compare", - id.clone() - ); - debug!(err = err_msg, "No local attestations for comparison",); - return Err(OperationError::CompareTrigger(id.clone(), 0, err_msg)); - } - }; + // Determine the comparison point + let (compare_block, collect_window_end) = + match local_comparison_point(&messages, &id, collect_window_duration, db.clone()).await { + Ok(Some((block, window))) if time >= window => (block, window), + Ok(Some((block, _window))) => { + // Construct error for early comparison attempt + return Err(OperationError::CompareTrigger( + id, + block, + "Comparison window has not yet ended".to_string(), + )); + } + Ok(None) => { + // Construct error for lack of local attestations + return Err(OperationError::CompareTrigger( + id, + 0, + "No local attestations to compare".to_string(), + )); + } + Err(e) => { + return Err(OperationError::Database(DatabaseError::CoreSqlx(e))); + } + }; + + // Filter messages for the current comparison + let filtered_messages = messages + .into_iter() + .filter(|m| m.payload.block_number == compare_block && m.nonce <= collect_window_end) + .collect::>(); + + // Process the filtered POI messages to get remote attestations + let remote_attestations = process_ppoi_message(filtered_messages, &callbook) + .await + .map_err(OperationError::Attestation)?; - let filter_msg: Vec> = messages - .iter() - .filter(|&m| m.payload.block_number == compare_block && m.nonce <= collect_window_end) - .cloned() - .collect(); - debug!( - deployment_hash = id, - time, - comparison_time = collect_window_end, + let local_attestation_record = get_local_attestation(&db, &id, compare_block as i64) + .await + .map_err(|e| OperationError::Database(DatabaseError::CoreSqlx(e)))? + .ok_or(OperationError::Others( + "Local attestation record not found".to_string(), + ))?; // Handling the None case + + let local_attestation = Attestation::from_record(local_attestation_record); + + // Perform the comparison + let comparison_result = compare_attestations( + Some(local_attestation), compare_block, - comparison_countdown_seconds = max(0, time - collect_window_end), - number_of_messages_matched_to_compare = filter_msg.len(), - "Comparison state", + &remote_attestations, + &id, ); - let remote_attestations_result = process_ppoi_message(filter_msg, &callbook).await; - let remote_attestations = match remote_attestations_result { - Ok(remote) => { - debug!(unique_remote_pPOIs = remote.len(), "Processed messages",); - remote - } - Err(err) => { - trace!( - err = tracing::field::debug(&err), - "An error occured while processing the messages", - ); - return Err(OperationError::Attestation(err)); - } - }; - let comparison_result = - compare_attestations(compare_block, remote_attestations, &local_attestations, &id); Ok(comparison_result) } diff --git a/subgraph-radio/src/messages/upgrade.rs b/subgraph-radio/src/messages/upgrade.rs index 9cc2f20..a09479a 100644 --- a/subgraph-radio/src/messages/upgrade.rs +++ b/subgraph-radio/src/messages/upgrade.rs @@ -5,6 +5,7 @@ use ethers_derive_eip712::*; use prost::Message; use serde::{Deserialize, Serialize}; +use sqlx::SqlitePool; use tracing::{debug, info}; use graphcast_sdk::{ @@ -12,8 +13,8 @@ use graphcast_sdk::{ graphql::client_graph_account::{owned_subgraphs, subgraph_hash_by_id}, }; -use crate::operator::notifier::Notifier; -use crate::{config::Config, state::PersistedState}; +use crate::{config::Config, DatabaseError}; +use crate::{entities::recent_upgrade, operator::notifier::Notifier}; use crate::{ operator::indexer_management::{check_decision_basis, offchain_sync_indexing_rules}, OperationError, @@ -100,11 +101,16 @@ impl UpgradeIntentMessage { &self, config: &Config, notifier: &Notifier, - state: &PersistedState, + db: &SqlitePool, ) -> Result<&Self, OperationError> { // ratelimit upgrades: return early if there was a recent upgrade - if state.recent_upgrade(self, config.radio_setup.auto_upgrade_ratelimit) { - info!(subgraph = &self.subgraph_id, "Received an Upgrade Intent Message for a recently upgraded subgraph, skiping notification and auto deployment"); + let recent_upgrade_result = + recent_upgrade(db, self, config.radio_setup.auto_upgrade_ratelimit) + .await + .map_err(|e| OperationError::Database(DatabaseError::CoreSqlx(e)))?; // Convert SqlxError to DatabaseError and then to OperationError + + if recent_upgrade_result { + info!(subgraph = &self.subgraph_id, "Received an Upgrade Intent Message for a recently upgraded subgraph, skipping notification and auto deployment"); return Ok(self); } // send notifications diff --git a/subgraph-radio/src/operator/attestation.rs b/subgraph-radio/src/operator/attestation.rs index 30872e2..6897d9f 100644 --- a/subgraph-radio/src/operator/attestation.rs +++ b/subgraph-radio/src/operator/attestation.rs @@ -1,13 +1,21 @@ -use async_graphql::{Enum, Error, ErrorExtensions, SimpleObject}; +use crate::entities::{ + clear_all_notifications, create_notification, get_all_notifications, + get_comparison_results_by_deployment, insert_comparison_result, update_comparison_result, + NewComparisonResult, NewNotification, +}; +use crate::operator::notifier::NotificationMode; +use async_graphql::{Enum, Error as AsyncGraphqlError, ErrorExtensions, SimpleObject}; use autometrics::autometrics; use chrono::Utc; -use num_traits::Zero; use serde_derive::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; +use sqlx::{Error as SqlxError, SqlitePool}; +use std::error::Error; +use std::fmt; +use std::str::FromStr; use std::{ - collections::HashMap, - fmt::{self, Display}, - sync::{Arc, Mutex as SyncMutex}, + collections::{HashMap, HashSet}, + fmt::Display, }; use tracing::{debug, error, info, trace, warn}; @@ -17,11 +25,14 @@ use graphcast_sdk::{ graphcast_agent::message_typing::{get_indexer_stake, GraphcastMessage, MessageError}, }; -use crate::operator::notifier::NotificationMode; use crate::{ - messages::poi::PublicPoiMessage, metrics::ACTIVE_INDEXERS, state::PersistedState, - OperationError, + entities::{ + create_local_attestation, get_all_local_attestations_for_identifier, AttestationRecord, + NewAttestation, + }, + RADIO_OPERATOR, }; +use crate::{messages::poi::PublicPoiMessage, metrics::ACTIVE_INDEXERS, OperationError}; use super::Notifier; @@ -70,6 +81,27 @@ impl Attestation { )) } } + + pub fn from_record(record: AttestationRecord) -> Self { + let timestamp = record + .timestamp + .split(',') + .filter_map(|s| s.parse::().ok()) + .collect(); + + Attestation { + ppoi: record.ppoi, + stake_weight: record.stake_weight, + senders: record + .senders + .split(',') + .map(str::trim) + .map(|s| s.to_string()) + .collect(), + sender_group_hash: record.sender_group_hash, + timestamp, + } + } } impl fmt::Display for Attestation { @@ -92,17 +124,26 @@ pub struct AttestationEntry { pub attestation: Attestation, } -pub fn attestations_to_vec(attestations: &LocalAttestationsMap) -> Vec { - attestations - .iter() - .flat_map(|(ppoi, inner_map)| { - inner_map.iter().map(move |(blk, att)| AttestationEntry { - deployment: ppoi.clone(), - block_number: *blk, - attestation: att.clone(), - }) - }) - .collect() +impl From for AttestationEntry { + fn from(record: AttestationRecord) -> Self { + let timestamp = record + .timestamp + .split(',') + .filter_map(|s| s.parse::().ok()) + .collect(); + + AttestationEntry { + deployment: record.identifier, + block_number: record.block_number as u64, + attestation: Attestation { + ppoi: record.ppoi, + stake_weight: record.stake_weight, + senders: record.senders.split(',').map(String::from).collect(), + sender_group_hash: record.sender_group_hash, + timestamp, + }, + } + } } #[autometrics] @@ -128,7 +169,6 @@ pub async fn process_ppoi_message( .await .map_err(|e| AttestationError::BuildError(MessageError::FieldDerivations(e)))?; - //TODO: update this to utilize update_blocks? let blocks = remote_attestations .entry(msg.identifier.to_string()) .or_default(); @@ -185,94 +225,55 @@ pub fn combine_senders(attestations: &[Attestation]) -> Vec { /// Determine the comparison pointer on both block and time based on the local attestations /// If they don't exist, then return default value that shall never be validated to trigger -pub fn local_comparison_point( - local_attestations: &LocalAttestationsMap, +pub async fn local_comparison_point( remote_ppoi_messages: &[GraphcastMessage], - id: String, + id: &str, collect_window_duration: i64, -) -> Option<(u64, i64)> { - if let Some(blocks_map) = local_attestations.get(&id) { - // Find the attestaion by the smallest block - let remote_blocks = remote_ppoi_messages - .iter() - .filter(|m| m.identifier == id.clone()) - .map(|m| m.payload.block_number) - .collect::>(); - blocks_map - .iter() - .filter(|(&block, _)| remote_blocks.contains(&block)) - .min_by_key(|(&min_block, attestation)| { - // unwrap is okay because we add timestamp at local creation of attestation - (min_block, *attestation.timestamp.first().unwrap()) - }) - .map(|(&block, a)| { + db: SqlitePool, +) -> Result, SqlxError> { + // Fetch all local attestations for the identifier from the database + let local_attestations = get_all_local_attestations_for_identifier(&db, id).await?; + + let remote_blocks: HashSet = remote_ppoi_messages + .iter() + .filter(|m| m.identifier == id) + .map(|m| m.payload.block_number) + .collect(); + + let comparison_point = local_attestations + .into_iter() + .filter(|attestation| remote_blocks.contains(&(attestation.block_number as u64))) + .filter_map(|attestation| { + attestation.timestamp.parse::().ok().map(|timestamp| { ( - block, - *a.timestamp.first().unwrap() + collect_window_duration, + attestation.block_number as u64, + timestamp + collect_window_duration, ) }) - } else { - None - } -} + }) + .min_by_key(|&(block_number, timestamp)| (block_number, timestamp)); -/// Updates the `blocks` HashMap to include the new attestation. -pub fn update_blocks( - block_number: u64, - blocks: &HashMap>, - ppoi: String, - stake: f32, - address: String, - timestamp: i64, -) -> HashMap> { - let mut blocks_clone: HashMap> = HashMap::new(); - blocks_clone.extend(blocks.clone()); - blocks_clone.insert( - block_number, - vec![Attestation::new( - ppoi, - stake, - vec![address], - vec![timestamp], - )], - ); - blocks_clone + Ok(comparison_point) } /// Saves PPOIs that we've generated locally, in order to compare them with remote ones later -pub fn save_local_attestation( - local_attestations: Arc>, +pub async fn save_local_attestation( + pool: &SqlitePool, content: String, ipfs_hash: String, block_number: u64, -) { - let attestation = Attestation::new(content, Zero::zero(), vec![], vec![Utc::now().timestamp()]); - - let mut local_attestations = local_attestations.lock().unwrap(); - - local_attestations - .entry(ipfs_hash.clone()) - .or_default() - .entry(block_number) - .and_modify(|existing_attestation| *existing_attestation = attestation.clone()) - .or_insert(attestation); -} - -/// Clear the expired local attestations after comparing with remote results -pub fn clear_local_attestation( - local_attestations: Arc>>>, - ipfs_hash: String, - block_number: u64, -) { - let mut local_attestations = local_attestations.lock().unwrap(); - let blocks = local_attestations.get(&ipfs_hash); - - if let Some(blocks) = blocks { - let mut blocks_clone: HashMap = HashMap::new(); - blocks_clone.extend(blocks.clone()); - blocks_clone.remove(&block_number); - local_attestations.insert(ipfs_hash, blocks_clone); +) -> Result { + let new_attestation = NewAttestation { + identifier: ipfs_hash, + block_number: block_number as i64, + ppoi: content, + stake_weight: 0, + sender_group_hash: String::new(), + senders: vec![], + timestamp: vec![Utc::now().timestamp()], }; + + create_local_attestation(pool, new_attestation).await } /// Tracks results indexed by deployment hash and block number @@ -305,6 +306,86 @@ impl ComparisonResult { } } +pub async fn handle_comparison_result( + pool: &SqlitePool, + new_comparison_result: &ComparisonResult, +) -> Result { + let deployment_hash = new_comparison_result.deployment_hash(); + let mut should_notify = false; + + // Fetch the existing result for the deployment + let existing_result = get_comparison_results_by_deployment(pool, &deployment_hash) + .await? + .into_iter() + .next(); + + let result_type = match existing_result { + Some(current_result) => { + if current_result.result_type != new_comparison_result.result_type.to_string() + && new_comparison_result.result_type != ComparisonResultType::NotFound + { + // Only notify if there is a state change, excluding NotFound -> Match + if new_comparison_result.result_type != ComparisonResultType::Match { + should_notify = true; + } + // Update the result as the state has changed + update_comparison_result(pool, new_comparison_result).await?; + new_comparison_result.result_type + } else { + // If the new result is NotFound or the same, we don't update + serde_json::from_str(¤t_result.result_type) + .unwrap_or(ComparisonResultType::NotFound) + } + } + None => { + // If there is no existing result, insert the new one + insert_comparison_result(pool, &NewComparisonResult::from(new_comparison_result)) + .await?; + new_comparison_result.result_type + } + }; + + // Notify if needed + if should_notify { + let new_notification = NewNotification { + deployment: new_comparison_result.deployment.clone(), + message: new_comparison_result.to_string(), // Ensure this method exists + }; + // Note: If create_notification should be part of a transaction, you'll need to adjust this + create_notification(pool, new_notification).await?; + } + + Ok(result_type) +} + +#[derive(Debug, Clone)] +pub struct ParseComparisonResultTypeError; + +impl fmt::Display for ParseComparisonResultTypeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "provided string did not match any ComparisonResultType variants" + ) + } +} + +impl Error for ParseComparisonResultTypeError {} + +impl FromStr for ComparisonResultType { + type Err = ParseComparisonResultTypeError; + + fn from_str(s: &str) -> Result { + match s { + "NotFound" => Ok(ComparisonResultType::NotFound), + "Divergent" => Ok(ComparisonResultType::Divergent), + "Match" => Ok(ComparisonResultType::Match), // Accepting both for compatibility + "BuildFailed" => Ok(ComparisonResultType::BuildFailed), + _ => Err(ParseComparisonResultTypeError), + } + } +} + impl Display for ComparisonResultType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -315,7 +396,7 @@ impl Display for ComparisonResultType { write!(f, "Divergent") } ComparisonResultType::Match => { - write!(f, "Matched") + write!(f, "Match") } ComparisonResultType::BuildFailed => write!(f, "Failed to build message"), } @@ -384,124 +465,54 @@ impl Clone for ComparisonResult { } } } - -/// Compares local attestations against remote ones using the attestation stores we populated while processing saved GraphcastMessage messages. -/// It takes our attestation (PPOI) for a given subgraph on a given block and compares it to the top-attested one from the remote attestations. -/// The top remote attestation is found by grouping attestations together and increasing their total stake-weight every time we see a new message -/// with the same PPOI from an Indexer (NOTE: one Indexer can only send 1 attestation per subgraph per block). The attestations are then sorted -/// and we take the one with the highest total stake-weight. +/// Compares a local attestation against remote ones. +/// - `local_attestation`: The local attestation data for a given block, if it exists. +/// - `attestation_block`: The specific block number we are comparing. +/// - `remote`: A map with a similar structure to what `local` was, but contains remote attestations. +/// - `ipfs_hash`: The identifier for the deployment whose attestations we are comparing. pub fn compare_attestations( + local_attestation: Option, attestation_block: u64, - remote: RemoteAttestationsMap, - local: &LocalAttestationsMap, + remote: &RemoteAttestationsMap, ipfs_hash: &str, ) -> ComparisonResult { - trace!( - local = tracing::field::debug(&local), - remote = tracing::field::debug(&remote), - "Comparing attestations", - ); - - // Filtering local and remote attestations - let blocks = match local.get(ipfs_hash) { - Some(blocks) => blocks, - None => { - debug!(ipfs_hash, local = tracing::field::debug(&local), - remote = tracing::field::debug(&remote), - "No local attestation stored for any blocks (Should not get here as attestation_block is determined by local_attestations)",); - return ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::NotFound, - local_attestation: None, - attestations: vec![], - }; - } - }; - let local_attestation = match blocks.get(&attestation_block) { - Some(attestations) => attestations, - None => { - debug!(ipfs_hash, attestation_block, local = tracing::field::debug(&local), - remote = tracing::field::debug(&remote), - "No local attestation stored for the block (Should not get here as attestation_block is determined by local_attestations)",); - return ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::NotFound, - local_attestation: None, - attestations: vec![], - }; - } - }; + // Attempt to retrieve remote attestations for the given IPFS hash and block number + let remote_attestations = remote + .get(ipfs_hash) + .and_then(|blocks| blocks.get(&attestation_block)) + .cloned() + .unwrap_or_default(); + + // Sort remote attestations by stake weight in descending order + let mut sorted_remote_attestations = remote_attestations; + sorted_remote_attestations.sort_by(|a, b| { + b.stake_weight + .partial_cmp(&a.stake_weight) + .unwrap_or(std::cmp::Ordering::Equal) + }); - let remote_blocks = match remote.get(ipfs_hash) { - Some(blocks) => blocks, - None => { - debug!(ipfs_hash, "No remote attestation stored for any block"); - return ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::NotFound, - local_attestation: Some(local_attestation.clone()), - attestations: vec![], - }; - } - }; - let remote_attestations = match remote_blocks.get(&attestation_block) { - Some(attestations) if !attestations.is_empty() => attestations, - _ => { - debug!(ipfs_hash, attestation_block, "No remote attestation stored",); - return ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::NotFound, - local_attestation: Some(local_attestation.clone()), - attestations: vec![], - }; + // Determine the comparison result based on the top attested remote PPOI + let result_type = if let Some(local_att) = &local_attestation { + if let Some(most_attested) = sorted_remote_attestations.last() { + if most_attested.ppoi == local_att.ppoi { + ComparisonResultType::Match + } else { + ComparisonResultType::Divergent + } + } else { + ComparisonResultType::NotFound } + } else { + ComparisonResultType::NotFound }; - let mut remote_attestations = remote_attestations.clone(); - remote_attestations.sort_by(|a, b| a.stake_weight.partial_cmp(&b.stake_weight).unwrap()); - - if remote_attestations.len() > 1 { - warn!( - ipfs_hash, - attestation_block, - sorted_attestations = tracing::field::debug(&remote_attestations), - "More than 1 pPOI found", - ); - } - - let most_attested_ppoi = &remote_attestations.last().unwrap().ppoi; - if most_attested_ppoi == &local_attestation.ppoi { - trace!( - ipfs_hash, - attestation_block, - num_unique_ppois = remote_attestations.len(), - "pPOI matched", - ); - ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::Match, - local_attestation: Some(local_attestation.clone()), - attestations: remote_attestations, - } - } else { - debug!( - attestation_block, - remote_attestations = tracing::field::debug(&remote_attestations), - local_attestation = tracing::field::debug(&local_attestation), - "Number of pPOI submitted", - ); - ComparisonResult { - deployment: ipfs_hash.to_string(), - block_number: attestation_block, - result_type: ComparisonResultType::Divergent, - local_attestation: Some(local_attestation.clone()), - attestations: remote_attestations, - } + // Construct the comparison result + ComparisonResult { + deployment: ipfs_hash.to_string(), + block_number: attestation_block, + result_type, + local_attestation, + attestations: sorted_remote_attestations, } } @@ -611,7 +622,7 @@ pub async fn process_comparison_results( num_topics: usize, result_strings: Vec>, notifier: Notifier, - persisted_state: PersistedState, + db: SqlitePool, ) { // Generate attestation summary let mut match_strings = vec![]; @@ -624,9 +635,9 @@ pub async fn process_comparison_results( for result in result_strings { match result { Ok(comparison_result) => { - let result_type = persisted_state - .handle_comparison_result(comparison_result.clone()) - .await; + let result_type = handle_comparison_result(&db, &comparison_result.clone()) + .await + .unwrap(); match result_type { ComparisonResultType::Match => { @@ -647,10 +658,18 @@ pub async fn process_comparison_results( } } - let notifications = persisted_state.notifications(); + let notifications = get_all_notifications(&RADIO_OPERATOR.get().unwrap().db) + .await + .unwrap(); if notifier.notification_mode == NotificationMode::Live && !notifications.is_empty() { - notifier.notify(notifications.join("\n")).await; - persisted_state.clear_notifications(); + let messages: Vec = notifications + .iter() + .map(|notification| notification.message.clone()) + .collect(); + notifier.notify(messages.join("\n")).await; + clear_all_notifications(&RADIO_OPERATOR.get().unwrap().db) + .await + .unwrap(); } info!( @@ -676,444 +695,7 @@ pub enum AttestationError { } impl ErrorExtensions for AttestationError { - fn extend(&self) -> Error { - Error::new(format!("{}", self)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // TODO: add setup and teardown functions - - #[test] - fn test_update_blocks() { - let mut blocks: HashMap> = HashMap::new(); - blocks.insert( - 42, - vec![Attestation::new( - "default".to_string(), - 0.0, - Vec::new(), - Vec::new(), - )], - ); - let block_clone = update_blocks( - 42, - &blocks, - "awesome-ppoi".to_string(), - 0.0, - "0xadd3".to_string(), - 1, - ); - - assert_eq!( - block_clone.get(&42).unwrap().first().unwrap().ppoi, - "awesome-ppoi".to_string() - ); - } - - #[test] - fn test_sort_sender_addresses_unique() { - let attestation = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xaac5349585cbbf924026d25a520ffa9e8b51a39b".to_string()], - vec![1], - ); - let attestation2 = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xbbc5349585cbbf924026d25a520ffa9e8b51a39b".to_string()], - vec![1], - ); - assert_ne!( - attestation2.sender_group_hash, - attestation.sender_group_hash - ); - } - - #[test] - fn test_sort_sender_addresses() { - let attestation = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec![ - "0xaac5349585cbbf924026d25a520ffa9e8b51a39b".to_string(), - "0xbbc5349585cbbf924026d25a520ffa9e8b51a39b".to_string(), - ], - vec![1, 2], - ); - let attestation2 = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec![ - "0xbbc5349585cbbf924026d25a520ffa9e8b51a39b".to_string(), - "0xaac5349585cbbf924026d25a520ffa9e8b51a39b".to_string(), - ], - vec![1, 2], - ); - assert_eq!( - attestation2.sender_group_hash, - attestation.sender_group_hash - ); - } - - #[test] - fn test_attestation_sorting() { - let attestation1 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![0], - ); - - let attestation2 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa2".to_string()], - vec![1], - ); - - let attestation3 = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xa3".to_string()], - vec![2], - ); - - let mut attestations = vec![attestation1, attestation2, attestation3]; - - attestations.sort_by(|a, b| a.stake_weight.partial_cmp(&b.stake_weight).unwrap()); - - assert_eq!(attestations.last().unwrap().stake_weight, 1); - assert_eq!( - attestations.last().unwrap().senders.first().unwrap(), - &"0xa3".to_string() - ); - assert_eq!(attestations.last().unwrap().timestamp, vec![2]); - } - - #[test] - fn test_attestation_update_success() { - let attestation = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![2], - ); - - let updated_attestation = Attestation::update(&attestation, "0xa2".to_string(), 1.0, 1); - - assert!(updated_attestation.is_ok()); - assert_eq!(updated_attestation.as_ref().unwrap().stake_weight, 1); - assert_eq!(updated_attestation.unwrap().timestamp, [2, 1]); - } - - #[test] - fn test_attestation_update_fail() { - let attestation = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![0], - ); - - let updated_attestation = Attestation::update(&attestation, "0xa1".to_string(), 0.0, 0); - - assert!(updated_attestation.is_err()); - assert_eq!( - updated_attestation.unwrap_err().to_string(), - "Failed to update attestation: There is already an attestation from this address. Skipping...".to_string() - ); - } - - #[tokio::test] - async fn test_compare_attestations_generic_fail() { - let res = compare_attestations( - 42, - HashMap::new(), - &HashMap::new(), - "non-existent-ipfs-hash", - ); - - assert_eq!( - res.to_string(), - "NotFound for local attestation: deployment non-existent-ipfs-hash at block 42" - .to_string() - ); - } - - #[tokio::test] - async fn test_compare_attestations_remote_not_found_fail() { - let mut remote_blocks: HashMap> = HashMap::new(); - let mut local_blocks: HashMap = HashMap::new(); - - remote_blocks.insert( - 42, - vec![Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![1], - )], - ); - - local_blocks.insert( - 42, - Attestation::new("awesome-ppoi".to_string(), 0.0, Vec::new(), vec![0]), - ); - - let mut remote_attestations: HashMap>> = - HashMap::new(); - let mut local_attestations: HashMap> = HashMap::new(); - - remote_attestations.insert("my-awesome-hash".to_string(), remote_blocks); - local_attestations.insert("different-awesome-hash".to_string(), local_blocks); - - let res = compare_attestations( - 42, - remote_attestations, - &local_attestations, - "different-awesome-hash", - ); - - assert_eq!( - res.to_string(), - "NotFound for remote attestations: deployment different-awesome-hash at block 42" - .to_string() - ); - } - - #[tokio::test] - async fn test_compare_attestations_local_not_found_fail() { - let remote_blocks: HashMap> = HashMap::new(); - let local_blocks: HashMap = HashMap::new(); - - let mut remote_attestations: HashMap>> = - HashMap::new(); - let mut local_attestations: HashMap> = HashMap::new(); - - remote_attestations.insert("my-awesome-hash".to_string(), remote_blocks); - local_attestations.insert("my-awesome-hash".to_string(), local_blocks); - - let res = compare_attestations( - 42, - remote_attestations, - &local_attestations, - "my-awesome-hash", - ); - - assert_eq!( - res.to_string(), - "NotFound for local attestation: deployment my-awesome-hash at block 42".to_string() - ); - } - - #[tokio::test] - async fn test_compare_attestations_success() { - let mut remote_blocks: HashMap> = HashMap::new(); - let mut local_blocks: HashMap = HashMap::new(); - - let remote = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![0], - ); - remote_blocks.insert(42, vec![remote.clone()]); - - let local = Attestation::new("awesome-ppoi".to_string(), 0.0, Vec::new(), vec![0]); - local_blocks.insert(42, local.clone()); - - let mut remote_attestations: HashMap>> = - HashMap::new(); - let mut local_attestations: HashMap> = HashMap::new(); - - remote_attestations.insert("my-awesome-hash".to_string(), remote_blocks); - local_attestations.insert("my-awesome-hash".to_string(), local_blocks); - - let res = compare_attestations( - 42, - remote_attestations, - &local_attestations, - "my-awesome-hash", - ); - - assert_eq!( - res, - ComparisonResult { - deployment: "my-awesome-hash".to_string(), - block_number: 42, - result_type: ComparisonResultType::Match, - local_attestation: Some(local), - attestations: vec![remote], - } - ); - } - - #[tokio::test] - async fn clear_local_attestation_success() { - let mut local_blocks: HashMap = HashMap::new(); - let attestation1 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![0], - ); - - let attestation2 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa2".to_string()], - vec![1], - ); - - let attestation3 = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xa3".to_string()], - vec![2], - ); - - local_blocks.insert(42, attestation1); - local_blocks.insert(43, attestation2); - local_blocks.insert(44, attestation3); - - let mut local_attestations: HashMap> = HashMap::new(); - local_attestations.insert("hash".to_string(), local_blocks.clone()); - local_attestations.insert("hash2".to_string(), local_blocks); - let local = Arc::new(SyncMutex::new(local_attestations)); - - clear_local_attestation(Arc::clone(&local), "hash".to_string(), 43); - - assert_eq!(local.lock().unwrap().get("hash").unwrap().len(), 2); - assert!(local - .lock() - .unwrap() - .get("hash") - .unwrap() - .get(&43) - .is_none()); - assert_eq!(local.lock().unwrap().get("hash2").unwrap().len(), 3); - } - - pub fn test_msg_vec() -> Vec> { - vec![GraphcastMessage { - identifier: String::from("hash"), - nonce: 2, - graph_account: String::from("0x7e6528e4ce3055e829a32b5dc4450072bac28bc6"), - payload: PublicPoiMessage { - identifier: String::from("hash"), - content: String::from("awesome-ppoi"), - nonce: 2, - network: String::from("goerli"), - block_number: 42, - block_hash: String::from("4dbba1ba9fb18b0034965712598be1368edcf91ae2c551d59462aab578dab9c5"), - graph_account: String::from("0xa1"), - }, - signature: String::from("03b197380ab9ee3a9fcaea1301224ad1ff02e9e414275fd79d6ee463b21eb6957af7670a26b0a7f8a6316d95dba8497f2bd67b32b39be07073cf81beff0b37961b"), - }] - } - - #[tokio::test] - async fn local_attestation_pointer_success() { - let mut local_blocks: HashMap = HashMap::new(); - let attestation1 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa1".to_string()], - vec![2], - ); - - let attestation2 = Attestation::new( - "awesome-ppoi".to_string(), - 0.0, - vec!["0xa2".to_string()], - vec![4], - ); - - let attestation3 = Attestation::new( - "awesome-ppoi".to_string(), - 1.0, - vec!["0xa3".to_string()], - vec![6], - ); - - local_blocks.insert(42, attestation1); - local_blocks.insert(43, attestation2); - local_blocks.insert(44, attestation3); - - let mut local_attestations: HashMap> = HashMap::new(); - local_attestations.insert("hash".to_string(), local_blocks.clone()); - local_attestations.insert("hash2".to_string(), local_blocks); - let (block_num, collect_window_end) = local_comparison_point( - &local_attestations, - &test_msg_vec(), - "hash".to_string(), - 120, - ) - .unwrap(); - - assert_eq!(block_num, 42); - assert_eq!(collect_window_end, 122); - } - - #[tokio::test] - async fn test_save_local_attestation() { - let local_attestations = Arc::new(SyncMutex::new(HashMap::new())); - save_local_attestation( - local_attestations.clone(), - "ppoi-x".to_string(), - "0xa1".to_string(), - 0, - ); - - save_local_attestation( - local_attestations.clone(), - "ppoi-y".to_string(), - "0xa1".to_string(), - 1, - ); - - save_local_attestation( - local_attestations.clone(), - "ppoi-z".to_string(), - "0xa2".to_string(), - 2, - ); - - assert!(!local_attestations.lock().unwrap().is_empty()); - assert_eq!(local_attestations.lock().unwrap().len(), 2); - assert_eq!( - local_attestations - .lock() - .unwrap() - .get("0xa1") - .unwrap() - .len(), - 2 - ); - assert_eq!( - local_attestations - .lock() - .unwrap() - .get("0xa2") - .unwrap() - .len(), - 1 - ); - assert_eq!( - local_attestations - .lock() - .unwrap() - .get("0xa1") - .unwrap() - .get(&0) - .unwrap() - .ppoi, - *"ppoi-x" - ); + fn extend(&self) -> AsyncGraphqlError { + AsyncGraphqlError::new(format!("{}", self)) } } diff --git a/subgraph-radio/src/operator/mod.rs b/subgraph-radio/src/operator/mod.rs index e756077..2968eb3 100644 --- a/subgraph-radio/src/operator/mod.rs +++ b/subgraph-radio/src/operator/mod.rs @@ -1,3 +1,4 @@ +use std::path::Path; use std::sync::{atomic::Ordering, mpsc::Receiver, Arc}; use std::time::Duration; @@ -12,17 +13,20 @@ use graphcast_sdk::{ WakuMessage, }; +use sqlx::SqlitePool; use tokio::time::{interval, sleep, timeout}; use tracing::{debug, error, info, trace, warn}; +use crate::entities::{ + clear_all_notifications, get_all_notifications, get_comparison_results, + get_comparison_results_by_type, insert_upgrade_intent_message, +}; use crate::messages::upgrade::UpgradeIntentMessage; use crate::metrics::handle_serve_metrics; use crate::operator::attestation::log_gossip_summary; use crate::operator::attestation::process_comparison_results; use crate::operator::notifier::NotificationMode; use crate::server::run_server; -use crate::state::PersistedState; -use crate::GRAPHCAST_AGENT; use crate::{ chainhead_block_str, messages::poi::{process_valid_message, PublicPoiMessage}, @@ -32,6 +36,7 @@ use crate::{ operator::{attestation::ComparisonResultType, indexer_management::health_query}, }; use crate::{config::Config, shutdown, ControlFlow}; +use crate::{GRAPHCAST_AGENT, RADIO_OPERATOR}; use self::notifier::Notifier; @@ -45,19 +50,37 @@ pub mod operation; #[allow(unused)] pub struct RadioOperator { config: Config, - persisted_state: PersistedState, graphcast_agent: Arc, notifier: Notifier, control_flow: ControlFlow, + pub db: SqlitePool, } impl RadioOperator { /// Create a radio operator with radio configurations, persisted data, /// graphcast agent, and control flow pub async fn new(config: &Config, agent: GraphcastAgent) -> RadioOperator { - debug!("Initializing program state"); - // Initialize program state - let persisted_state: PersistedState = config.init_radio_state().await; + debug!("Connecting to database"); + + let db_path = config.radio_setup().sqlite_file_path.clone().unwrap(); + debug!("Database path: {}", db_path); + + // Check if the database file exists + if !Path::new(&db_path).exists() { + panic!("Database file does not exist at path: {}", db_path); + } + + let db_url = format!("sqlite:///{}", db_path); + + let db = SqlitePool::connect(&db_url) + .await + .expect("Could not connect to the SQLite database"); + + debug!("Check for database migration"); + sqlx::migrate!() + .run(&db) + .await + .expect("Could not run migration"); debug!("Initializing Graphcast Agent"); let graphcast_agent = Arc::new(agent); @@ -99,10 +122,10 @@ impl RadioOperator { RadioOperator { config: config.clone(), - persisted_state, graphcast_agent, notifier, control_flow, + db, } } @@ -110,11 +133,6 @@ impl RadioOperator { &self.graphcast_agent } - /// Read persisted state at the time of access - pub fn state(&self) -> PersistedState { - self.persisted_state.clone() - } - /// Radio operations pub async fn run(&'static self) { // Control flow @@ -144,11 +162,10 @@ impl RadioOperator { // Initialize Http server with graceful shutdown if configured if self.config.radio_setup().server_port.is_some() { - let state_ref = &self.persisted_state; let config_cloned = self.config.clone(); tokio::spawn(run_server( config_cloned, - state_ref, + &self.db, self.graphcast_agent(), self.control_flow.server_handle.clone(), )); @@ -189,14 +206,11 @@ impl RadioOperator { CONNECTED_PEERS.set(connected_peers); GOSSIP_PEERS.set(gossip_peers); - let diverged_num = self.persisted_state.comparison_result_typed(ComparisonResultType::Divergent).len(); + + let diverged_num = get_comparison_results_by_type(&RADIO_OPERATOR.get().unwrap().db, "Divergent").await.unwrap().len(); DIVERGING_SUBGRAPHS.set(diverged_num.try_into().unwrap()); info!(connected_peers, gossip_peers, diverged_num, "State update summary"); - // Save cache if path provided - let _ = &self.config.radio_setup().persistence_file_path.as_ref().map(|path| { - self.persisted_state.update_cache(path); - }); }, _ = gossip_poi_interval.tick() => { if self.control_flow.skip_iteration.load(Ordering::SeqCst) { @@ -286,11 +300,6 @@ impl RadioOperator { let identifiers = self.graphcast_agent().content_identifiers(); let blocks_str = chainhead_block_str(&network_chainhead_blocks); - trace!( - state = tracing::field::debug(&self.state()), - "current state", - ); - let comparison_res = self.compare_poi( identifiers.clone(), ) @@ -301,7 +310,7 @@ impl RadioOperator { identifiers.len(), comparison_res, self.notifier.clone(), - self.persisted_state.clone(), + self.db.clone() ) }).await; @@ -314,22 +323,29 @@ impl RadioOperator { _ = notification_interval.tick() => { match self.config.radio_setup.notification_mode { NotificationMode::PeriodicReport => { - let comparison_results = self.persisted_state.comparison_results(); + let comparison_results = get_comparison_results(&self.db.clone()).await.unwrap(); if !comparison_results.is_empty() { let lines = { let (mut matching, mut divergent) = (0, 0); let mut lines = Vec::new(); let total = comparison_results.len(); - let divergent_lines: Vec = comparison_results.iter().filter_map(|(identifier, res)| { - match res.result_type { + let divergent_lines: Vec = comparison_results.iter().filter_map(|res| { + // Convert the string to ComparisonResultType + let result_type = match res.result_type.parse::() { + Ok(rt) => rt, + Err(_) => return None, // Handle the error appropriately + }; + + match result_type { ComparisonResultType::Match => { matching += 1; None }, ComparisonResultType::Divergent => { divergent += 1; - Some(format!("{} - {}", identifier, res.block_number)) + // Assuming that `res` has an `identifier` field or a method to get it + Some(format!("{} - {}", res.identifier, res.block_number)) }, _ => None, } @@ -346,11 +362,18 @@ impl RadioOperator { self.notifier.notify(lines.join("\n")).await; } }, - NotificationMode::PeriodicUpdate=> { - let notifications = self.persisted_state.notifications(); + NotificationMode::PeriodicUpdate => { + let notifications = get_all_notifications(&self.db).await.unwrap(); + if !notifications.is_empty() { - self.notifier.notify(notifications.join("\n")).await; - self.persisted_state.clear_notifications(); + let notification_messages: Vec = notifications + .iter() + .map(|n| format!("{}: {}", n.deployment, n.message)) + .collect(); + + self.notifier.notify(notification_messages.join("\n")).await; + + clear_all_notifications(&self.db).await.unwrap(); } }, _ => {} @@ -365,20 +388,21 @@ impl RadioOperator { } } - /// Process messages pub async fn message_processor(&self, receiver: Receiver) { - let state = self.persisted_state.clone(); let notifier = self.notifier.clone(); let config = self.config.clone(); + let db = self.db.clone(); tokio::spawn(async move { for msg in receiver { let timeout_duration = Duration::from_secs(10); + let process_res = timeout( timeout_duration, - process_message(state.clone(), notifier.clone(), config.clone(), msg), + process_message(notifier.clone(), config.clone(), msg, &db), ) .await; + match process_res { Ok(_) => trace!("New message processed"), Err(e) => debug!(error = e.to_string(), "Message processor timed out"), @@ -390,10 +414,10 @@ impl RadioOperator { /// Decode message into persistence, notifications, and other handlers pub async fn process_message( - state: PersistedState, notifier: Notifier, config: Config, msg: WakuMessage, + db: &SqlitePool, ) { trace!("Decoding waku message into Graphcast Message with Radio specified payload"); RECEIVED_MESSAGES.inc(); @@ -423,6 +447,8 @@ pub async fn process_message( .map_err(|e| WakuHandlingError::InvalidMessage(e.to_string())) { Ok(msg) => { + //panic!("are we ever even getting here"); + if msg .payload .validity_check(&msg, &config.graph_stack.graph_node_status_endpoint) @@ -432,7 +458,7 @@ pub async fn process_message( VALIDATED_MESSAGES .with_label_values(&[&msg.identifier, "public_poi_message"]) .inc(); - process_valid_message(msg.clone(), &state).await; + process_valid_message(msg.clone(), db).await; } } Err(e) => { @@ -478,11 +504,11 @@ pub async fn process_message( .with_label_values(&[&msg.identifier, "upgrade_intent_message"]) .inc(); if radio_msg - .process_valid_message(&config, ¬ifier, &state) + .process_valid_message(&config, ¬ifier, db) .await .is_ok() { - state.add_upgrade_intent_message(msg.clone()); + let _ = insert_upgrade_intent_message(db, msg.clone()).await; }; }; } diff --git a/subgraph-radio/src/operator/operation.rs b/subgraph-radio/src/operator/operation.rs index b950679..c232db0 100644 --- a/subgraph-radio/src/operator/operation.rs +++ b/subgraph-radio/src/operator/operation.rs @@ -1,16 +1,22 @@ use autometrics::autometrics; +use sqlx::SqlitePool; use std::cmp::max; use std::collections::HashMap; -use std::sync::Arc; -use tracing::{debug, trace, warn}; +use tracing::{debug, warn}; use graphcast_sdk::{ determine_message_block, graphcast_agent::message_typing::MessageError, networks::NetworkName, BlockPointer, NetworkBlockError, NetworkPointer, }; +use crate::entities::{ + clean_remote_ppoi_messages, delete_outdated_local_attestations, get_remote_ppoi_messages, +}; + +// use crate::entities::get_local_attestation; use crate::messages::poi::{poi_message_comparison, send_poi_message}; +// use crate::RADIO_OPERATOR; use crate::{ operator::{attestation::ComparisonResult, RadioOperator}, @@ -86,9 +92,9 @@ impl RadioOperator { /* Send message */ let id_cloned = id.clone(); - let callbook = self.config.callbook(); - let local_attestations = self.persisted_state.local_attestations.clone(); + let db = self.db.clone(); + let send_handle = tokio::spawn(async move { send_poi_message( id_cloned, @@ -96,8 +102,8 @@ impl RadioOperator { message_block, latest_block, network_name, - Arc::clone(&local_attestations), GRAPHCAST_AGENT.get().unwrap(), + db, ) .await }); @@ -119,59 +125,65 @@ impl RadioOperator { identifiers: Vec, ) -> Vec> { let mut compare_handles = vec![]; - let remote_ppoi_messages = self.state().remote_ppoi_messages(); - for id in identifiers.clone() { - /* Set up */ - let collect_duration: i64 = self.config.radio_setup.collect_message_duration.to_owned(); - let id_cloned = id.clone(); - let callbook = self.config.callbook(); - let local_attestations = self.state().local_attestations(); - let filtered_msg = remote_ppoi_messages + // Get remote_ppoi_messages directly from the database + let remote_ppoi_messages = get_remote_ppoi_messages(&self.db).await.unwrap(); + + for id in identifiers { + let collect_duration = self.config.radio_setup.collect_message_duration; + let callbook = self.config.callbook().clone(); + + let filtered_msg: Vec<_> = remote_ppoi_messages .iter() - .filter(|&m| m.identifier == id.clone()) + .filter(|m| m.identifier == *id) .cloned() .collect(); + let db = self.db.clone(); + let compare_handle = tokio::spawn(async move { - poi_message_comparison( - id_cloned, - collect_duration, - callbook.clone(), - filtered_msg, - local_attestations, - ) - .await + poi_message_comparison(id.clone(), collect_duration, callbook, filtered_msg, db) + .await }); compare_handles.push(compare_handle); } let mut compare_ops = vec![]; for handle in compare_handles { - let res = handle.await; - if let Ok(s) = res { - // Skip clean up for comparisonResult for Error and buildFailed - match s { + if let Ok(result) = handle.await { + match result { Ok(r) => { compare_ops.push(Ok(r.clone())); - - /* Clean up cache */ - // Only clear the ones matching identifier and block number equal or less - // Retain the msgs with a different identifier, or if their block number is greater - // clear_local_attestation(&mut local_attestations, r.deployment_hash(), r.block()); - self.persisted_state - .clean_local_attestations(r.block(), r.deployment_hash()); - self.persisted_state - .clean_remote_ppoi_messages(r.block(), r.deployment_hash()); + if let Err(_e) = Self::cleanup_after_comparison(&self.db.clone(), &r).await + { + // Log error + } } Err(e) => { - trace!(err = tracing::field::debug(&e), "Compare handles"); - - compare_ops.push(Err(e.clone_with_inner())); + // Log error + compare_ops.push(Err(e)); } } } } + compare_ops } + + async fn cleanup_after_comparison( + db_pool: &SqlitePool, + result: &ComparisonResult, + ) -> Result<(), OperationError> { + let block_number: i64 = result.block().try_into().unwrap(); + + delete_outdated_local_attestations(db_pool, &result.deployment_hash(), block_number) + .await + .unwrap(); + + clean_remote_ppoi_messages(db_pool, &result.deployment_hash(), block_number) + .await + .unwrap(); + + Ok(()) + } } diff --git a/subgraph-radio/src/server/mod.rs b/subgraph-radio/src/server/mod.rs index 0df9c38..1a6f89e 100644 --- a/subgraph-radio/src/server/mod.rs +++ b/subgraph-radio/src/server/mod.rs @@ -1,6 +1,7 @@ use axum::{extract::Extension, routing::get, Router}; use axum_server::Handle; use graphcast_sdk::graphcast_agent::GraphcastAgent; +use sqlx::SqlitePool; use std::net::SocketAddr; use std::str::FromStr; @@ -13,7 +14,6 @@ use crate::{ model::{build_schema, SubgraphRadioContext}, routes::{graphql_handler, graphql_playground, health}, }, - state::PersistedState, }; pub mod model; @@ -25,7 +25,7 @@ pub mod routes; /// This function starts a API server at the configured server_host and server_port pub async fn run_server( config: Config, - persisted_state: &'static PersistedState, + db: &SqlitePool, graphcast_agent: &'static GraphcastAgent, handle: Handle, ) { @@ -35,7 +35,7 @@ pub async fn run_server( let port = config.radio_setup().server_port.unwrap(); let context = Arc::new(SubgraphRadioContext::init( config.clone(), - persisted_state, + db, graphcast_agent, )); diff --git a/subgraph-radio/src/server/model/mod.rs b/subgraph-radio/src/server/model/mod.rs index 3016d53..28a1829 100644 --- a/subgraph-radio/src/server/model/mod.rs +++ b/subgraph-radio/src/server/model/mod.rs @@ -1,18 +1,25 @@ use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema, SimpleObject}; use chrono::Utc; +use sqlx::{Error as SqlxError, SqlitePool}; +use crate::entities::{ + get_comparison_results, get_comparison_results_by_deployment, UpgradeIntentMessageRecord, +}; use std::{collections::HashMap, sync::Arc}; use thiserror::Error; use crate::{ config::Config, + entities::{ + get_all_local_attestations, get_all_local_attestations_for_identifier, + get_local_attestation, get_remote_ppoi_messages, AttestationRecord, + }, messages::{poi::PublicPoiMessage, upgrade::UpgradeIntentMessage}, operator::attestation::{ - self, attestations_to_vec, compare_attestation, process_ppoi_message, Attestation, - AttestationEntry, AttestationError, ComparisonResult, ComparisonResultType, - LocalAttestationsMap, + self, compare_attestation, process_ppoi_message, Attestation, AttestationEntry, + AttestationError, ComparisonResult, ComparisonResultType, }, - state::PersistedState, + RADIO_OPERATOR, }; use graphcast_sdk::{ graphcast_agent::{message_typing::GraphcastMessage, GraphcastAgent, PeerData}, @@ -35,7 +42,8 @@ impl QueryRoot { ) -> Result>, HttpServiceError> { let msgs = ctx .data_unchecked::>() - .remote_ppoi_messages_filtered(&identifier, &block); + .remote_ppoi_messages_filtered(&identifier, &block) + .await; Ok(msgs) } @@ -47,6 +55,7 @@ impl QueryRoot { let msgs = ctx .data_unchecked::>() .upgrade_intent_messages_filtered(&subgraph_id) + .await .into_iter() .map(|m| m.payload) .collect(); @@ -55,16 +64,14 @@ impl QueryRoot { async fn local_attestations( &self, - ctx: &Context<'_>, + _ctx: &Context<'_>, identifier: Option, block: Option, ) -> Result, HttpServiceError> { - let attestations = ctx - .data_unchecked::>() - .local_attestations(identifier, block); - let filtered = attestations_to_vec(&attestations); + let pool = &RADIO_OPERATOR.get().unwrap().db; + let attestations = SubgraphRadioContext::local_attestations(pool, identifier, block).await; - Ok(filtered) + Ok(attestations) } /// Function that optionally takes in identifier and block filters. @@ -86,12 +93,13 @@ impl QueryRoot { /// Function to grab the latest relevant comparison result of a deployment async fn comparison_result( &self, - ctx: &Context<'_>, identifier: String, ) -> Result, HttpServiceError> { - let res = &ctx - .data_unchecked::>() - .comparison_result(identifier); + let res = &SubgraphRadioContext::comparison_result( + &RADIO_OPERATOR.get().unwrap().db, + &identifier, + ) + .await; Ok(res.clone()) } @@ -201,72 +209,68 @@ pub fn calc_ratios( pub async fn build_schema(ctx: Arc) -> SubgraphRadioSchema { Schema::build(QueryRoot, EmptyMutation, EmptySubscription) - .data(ctx.persisted_state) + .data(ctx.db.clone()) // Pass the database connection pool .finish() } pub struct SubgraphRadioContext { pub radio_config: Config, - pub persisted_state: &'static PersistedState, + pub db: SqlitePool, pub graphcast_agent: &'static GraphcastAgent, } impl SubgraphRadioContext { pub fn init( radio_config: Config, - persisted_state: &'static PersistedState, + db: &SqlitePool, graphcast_agent: &'static GraphcastAgent, ) -> Self { Self { radio_config, - persisted_state, + db: db.clone(), graphcast_agent, } } - pub fn local_attestations( - &self, + pub async fn local_attestations( + pool: &SqlitePool, identifier: Option, block: Option, - ) -> LocalAttestationsMap { - let attestations = self.persisted_state.local_attestations(); - let mut empty_attestations: LocalAttestationsMap = HashMap::new(); - - if let Some(deployment) = identifier { - if let Some(deployment_attestations) = attestations.get(&deployment) { - if let Some(block) = block { - if let Some(attestation) = deployment_attestations.get(&block) { - let single_entry = (block, attestation.clone()); - let inner_map = vec![single_entry].into_iter().collect(); - - vec![(deployment, inner_map)].into_iter().collect() - } else { - // Return empty hashmap if no entry satisfy the supplied identifier and block - empty_attestations - } - } else { - // Return all blocks since no block was specified - empty_attestations.insert(deployment, deployment_attestations.clone()); - empty_attestations - } - } else { - empty_attestations + ) -> Vec { + let db_records: Vec = match (identifier, block) { + (Some(deployment_id), Some(block_num)) => { + get_local_attestation(pool, &deployment_id, block_num as i64) + .await + .unwrap_or_default() // Replace with proper error handling + .into_iter() + .collect() } - } else { - attestations - } + (Some(deployment_id), None) => { + get_all_local_attestations_for_identifier(pool, &deployment_id) + .await + .unwrap_or_default() // Replace with proper error handling + } + (None, None) => { + get_all_local_attestations(pool).await.unwrap_or_default() // Replace with proper error handling + } + (None, Some(_)) => Vec::new(), + }; + + db_records.into_iter().map(AttestationEntry::from).collect() } - pub fn remote_ppoi_messages(&self) -> Vec> { - self.persisted_state.remote_ppoi_messages() + pub async fn remote_ppoi_messages(&self) -> Vec> { + get_remote_ppoi_messages(&RADIO_OPERATOR.get().unwrap().db) + .await + .unwrap() } - pub fn remote_ppoi_messages_filtered( + pub async fn remote_ppoi_messages_filtered( &self, identifier: &Option, block: &Option, ) -> Vec> { - let msgs = self.remote_ppoi_messages(); + let msgs = self.remote_ppoi_messages().await; let filtered = msgs .iter() .filter(|&message| filter_remote_ppoi_messages(message, identifier, block)) @@ -275,25 +279,69 @@ impl SubgraphRadioContext { filtered } - pub fn upgrade_intent_messages( - &self, - ) -> HashMap> { - self.persisted_state.upgrade_intent_messages() + // TODO: maybe move this to db file + pub async fn upgrade_intent_messages( + pool: &SqlitePool, + ) -> Result>, SqlxError> { + let records = sqlx::query_as!( + UpgradeIntentMessageRecord, + r#" + SELECT id, identifier, nonce, graph_account, subgraph_id, new_hash, signature + FROM upgrade_intent_messages + "# + ) + .fetch_all(pool) + .await?; + + let mut messages = HashMap::new(); + for record in records { + let msg = GraphcastMessage { + identifier: record.identifier, + nonce: record.nonce, + graph_account: record.graph_account.clone(), + signature: record.signature, + payload: UpgradeIntentMessage { + subgraph_id: record.subgraph_id, + new_hash: record.new_hash, + nonce: record.nonce, + graph_account: record.graph_account, + }, + }; + messages.insert(msg.payload.subgraph_id.clone(), msg); + } + + Ok(messages) } - pub fn upgrade_intent_messages_filtered( + pub async fn upgrade_intent_messages_filtered( &self, subgraph_id: &Option, ) -> Vec> { + let msgs = SubgraphRadioContext::upgrade_intent_messages(&RADIO_OPERATOR.get().unwrap().db) + .await + .unwrap(); + subgraph_id .as_ref() - .and_then(|id| self.upgrade_intent_messages().get(id).cloned()) + .and_then(|id| msgs.get(id).cloned()) .map_or(vec![], |m| vec![m]) } - pub fn comparison_result(&self, identifier: String) -> Option { - let cmp_results = self.persisted_state.comparison_results(); - cmp_results.get(&identifier).cloned() + pub async fn comparison_result( + pool: &SqlitePool, + deployment_hash: &str, + ) -> Option { + // Fetch comparison results by deployment hash + let records = get_comparison_results_by_deployment(pool, deployment_hash) + .await + .ok()?; // Convert Result to Option, return None on error + + // Assuming the deployment hash uniquely identifies a single comparison result, + // we take the first one. Otherwise, this needs to be adjusted. + let record = records.into_iter().next()?; + + // Convert the record to a ComparisonResult + record.into_comparison_result().ok() } pub async fn comparison_results( @@ -302,33 +350,28 @@ impl SubgraphRadioContext { block: Option, result_type: Option, ) -> Vec { - // Simply take from persisted state if block is not specified - if block.is_none() { - let cmp_results = self.persisted_state.comparison_results(); - - cmp_results - .iter() - .filter(|&(deployment, cmp_res)| { - (identifier.is_none() | (Some(deployment.clone()) == identifier)) - && (result_type.is_none() | (Some(cmp_res.result_type) == result_type)) - }) - .map(|(_, cmp_res)| cmp_res.clone()) - .collect::>() - } else { - // Calculate for the block if specified - let locals = attestations_to_vec(&self.local_attestations(identifier.clone(), block)); + let db_pool = RADIO_OPERATOR.get().unwrap().db.clone(); // Clone the db pool + + if let Some(block) = block { + let locals = + SubgraphRadioContext::local_attestations(&db_pool, identifier.clone(), Some(block)) + .await; // Handle this Result properly let config = self.radio_config(); - let mut res = vec![]; + let mut res = Vec::new(); for entry in locals { - let deployment_identifier = entry.deployment.clone(); - let msgs = self.remote_ppoi_messages_filtered(&identifier, &block); + // Here we would assume locals is a Vec + let msgs = self + .remote_ppoi_messages_filtered(&identifier, &Some(block)) + .await; // Handle this Result properly + + // You might need to adjust this part to correctly process the messages let remote_attestations = process_ppoi_message(msgs, &config.callbook()) .await - .ok() + .ok() // Handle this Result properly .and_then(|r| { - r.get(&deployment_identifier) + r.get(&entry.deployment) .and_then(|deployment_attestations| { deployment_attestations.get(&entry.block_number).cloned() }) @@ -336,12 +379,34 @@ impl SubgraphRadioContext { .unwrap_or_default(); let r = compare_attestation(entry, remote_attestations); - if result_type.is_none() | (result_type.unwrap() == r.result_type) { + if result_type.is_none() || (Some(r.result_type) == result_type) { res.push(r); } } res + } else { + let comparison_results = match get_comparison_results(&db_pool).await { + Ok(results) => results, + Err(_) => return Vec::new(), // Handle error appropriately, here we return an empty Vec for simplicity + }; + + // Then filter them based on identifier and result_type and convert them to ComparisonResult + comparison_results + .into_iter() + .filter(|cmp_res_record| { + (identifier.is_none() + || Some(&cmp_res_record.identifier) == identifier.as_ref()) + && (result_type.is_none() + || Some(cmp_res_record.result_type.as_str()) + == result_type.map(|rt| rt.to_string().clone()).as_deref()) + }) + .filter_map(|cmp_res_record| { + // Convert ComparisonResultRecord to ComparisonResult + // Handle this asynchronously if into_comparison_result is an async function + cmp_res_record.into_comparison_result().ok() + }) + .collect() } } diff --git a/subgraph-radio/src/state.rs b/subgraph-radio/src/state.rs deleted file mode 100644 index 981d25c..0000000 --- a/subgraph-radio/src/state.rs +++ /dev/null @@ -1,859 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use std::panic::PanicInfo; -use std::path::Path; - -use std::str::FromStr; -use std::sync::{Arc, Mutex as SyncMutex}; -use std::{ - collections::HashMap, - fs::{remove_file, File}, - io::{BufReader, Write}, -}; -use std::{fs, panic}; -use tracing::{debug, info, trace, warn}; - -use graphcast_sdk::graphcast_agent::message_typing::GraphcastMessage; - -use crate::messages::upgrade::UpgradeIntentMessage; -use crate::metrics::CACHED_PPOI_MESSAGES; -use crate::{ - messages::poi::PublicPoiMessage, - operator::attestation::{ - clear_local_attestation, Attestation, ComparisonResult, ComparisonResultType, - }, - RADIO_OPERATOR, -}; - -type Local = Arc>>>; -type Remote = Arc>>>; -type UpgradeMessages = Arc>>>; -type ComparisonResults = Arc>>; -type Notifications = Arc>>; - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct PersistedState { - pub local_attestations: Local, - pub remote_ppoi_messages: Remote, - pub upgrade_intent_messages: UpgradeMessages, - pub comparison_results: ComparisonResults, - pub notifications: Notifications, -} - -impl PersistedState { - pub fn new( - local: Option, - remote: Option, - upgrade_intent_messages: Option, - comparison_results: Option, - notifications: Option, - ) -> PersistedState { - let local_attestations = local.unwrap_or(Arc::new(SyncMutex::new(HashMap::new()))); - let remote_ppoi_messages = remote.unwrap_or(Arc::new(SyncMutex::new(vec![]))); - let upgrade_intent_messages = - upgrade_intent_messages.unwrap_or(Arc::new(SyncMutex::new(HashMap::new()))); - let comparison_results = - comparison_results.unwrap_or(Arc::new(SyncMutex::new(HashMap::new()))); - let notifications = notifications.unwrap_or(Arc::new(SyncMutex::new(HashMap::new()))); - PersistedState { - local_attestations, - remote_ppoi_messages, - upgrade_intent_messages, - comparison_results, - notifications, - } - } - - /// Optional updates for either local_attestations, remote_ppoi_messages or comparison_results without requiring either to be in-scope - pub async fn update( - &mut self, - local_attestations: Option, - remote_ppoi_messages: Option, - upgrade_intent_messages: Option, - comparison_results: Option, - notifications: Option, - ) -> PersistedState { - let local_attestations = match local_attestations { - None => self.local_attestations.clone(), - Some(l) => l, - }; - let remote_ppoi_messages = match remote_ppoi_messages { - None => self.remote_ppoi_messages.clone(), - Some(r) => r, - }; - let upgrade_intent_messages = match upgrade_intent_messages { - None => self.upgrade_intent_messages.clone(), - Some(r) => r, - }; - let comparison_results = match comparison_results { - None => self.comparison_results.clone(), - Some(r) => r, - }; - let notifications = match notifications { - None => self.notifications.clone(), - Some(n) => n, - }; - PersistedState { - local_attestations, - remote_ppoi_messages, - upgrade_intent_messages, - comparison_results, - notifications, - } - } - - /// Getter for local_attestations - pub fn local_attestations(&self) -> HashMap> { - self.local_attestations.lock().unwrap().clone() - } - - /// Getter for one local_attestation - pub fn local_attestation(&self, deployment: String, block_number: u64) -> Option { - match self.local_attestations.lock().unwrap().get(&deployment) { - None => None, - Some(blocks_map) => blocks_map.get(&block_number).cloned(), - } - } - - /// Getter for Public POI messages - pub fn remote_ppoi_messages(&self) -> Vec> { - self.remote_ppoi_messages.lock().unwrap().clone() - } - - /// Getter for upgrade intent messages - pub fn upgrade_intent_messages( - &self, - ) -> HashMap> { - self.upgrade_intent_messages.lock().unwrap().clone() - } - - /// Getter for upgrade intent messages for subgraph - pub fn upgrade_intent_message( - &self, - subgraph_id: &str, - ) -> Option> { - self.upgrade_intent_messages - .lock() - .unwrap() - .get(subgraph_id) - .cloned() - } - - /// Getter for comparison_results - pub fn comparison_results(&self) -> HashMap { - self.comparison_results.lock().unwrap().clone() - } - - /// Getter for comparison result - pub fn comparison_result(&self, deployment: String) -> Option { - self.comparison_results - .lock() - .unwrap() - .get(&deployment) - .cloned() - } - - /// Getter for comparison results with a certain result type - pub fn comparison_result_typed( - &self, - result_type: ComparisonResultType, - ) -> Vec { - let mut matched_type = vec![]; - for (_key, value) in self.comparison_results() { - if value.result_type == result_type { - matched_type.push(value.clone()); - } - } - matched_type - } - - /// Getter for notifications, return only the values - pub fn notifications(&self) -> Vec { - self.notifications - .lock() - .unwrap() - .values() - .cloned() - .collect() - } - - pub fn clear_notifications(&self) { - self.notifications.lock().unwrap().clear(); - } - - /// Update local_attestations - pub async fn update_local(&mut self, local_attestations: Local) { - self.local_attestations = local_attestations; - } - - /// Update remote_ppoi_messages - pub async fn update_remote( - &mut self, - remote_ppoi_messages: Vec>, - ) -> Vec> { - self.remote_ppoi_messages = Arc::new(SyncMutex::new(remote_ppoi_messages)); - self.remote_ppoi_messages() - } - - /// Add message to remote_ppoi_messages - /// Generalize PublicPoiMessage - pub fn add_remote_ppoi_message(&self, msg: GraphcastMessage) { - trace!( - msg = tracing::field::debug(&msg), - "Adding remote ppoi message" - ); - self.remote_ppoi_messages.lock().unwrap().push(msg) - } - - /// Add message to remote_ppoi_messages - pub fn add_upgrade_intent_message(&self, msg: GraphcastMessage) { - let key = msg.payload.subgraph_id.clone(); - if let Some(_existing) = self.upgrade_intent_message(&key) { - // replace the existing "outdated" record of ratelimit - debug!( - msg = tracing::field::debug(&msg), - "Replace the outdated upgrade message with new message" - ); - let mut msgs = self.upgrade_intent_messages.lock().unwrap(); - msgs.insert(key, msg); - } else { - trace!( - msg = tracing::field::debug(&msg), - "Adding upgrade intent message" - ); - self.upgrade_intent_messages - .lock() - .unwrap() - .entry(key.clone()) - .or_insert(msg); - } - } - - /// Check if there is a recent upgrade message for the subgraph - pub fn recent_upgrade(&self, msg: &UpgradeIntentMessage, upgrade_threshold: i64) -> bool { - self.upgrade_intent_messages() - .iter() - .any(|(matching_id, existing)| { - // there is a upgrade msg of the same subgraph id within the upgrade threshold - matching_id == &msg.subgraph_id && existing.nonce > msg.nonce - upgrade_threshold - }) - } - - /// Add entry to comparison_results - pub fn add_comparison_result(&self, comparison_result: ComparisonResult) { - let deployment = comparison_result.clone().deployment; - - self.comparison_results - .lock() - .unwrap() - .insert(deployment, comparison_result); - } - - pub fn add_notification(&self, deployment: String, notification: String) { - self.notifications - .lock() - .unwrap() - .insert(deployment, notification); - } - - pub async fn valid_ppoi_messages( - &mut self, - graph_node_endpoint: &str, - ) -> Vec> { - let remote_ppoi_messages = self.remote_ppoi_messages(); - let mut valid_messages = vec![]; - - for message in remote_ppoi_messages { - let is_valid = message - .payload - .validity_check(&message, graph_node_endpoint) - .await; - - if is_valid.is_ok() { - valid_messages.push(message); - } - } - - self.update_remote(valid_messages).await - } - - pub async fn handle_comparison_result( - &self, - new_comparison_result: ComparisonResult, - ) -> ComparisonResultType { - let (should_notify, updated_comparison_result, result_type) = { - let mut results = self.comparison_results.lock().unwrap(); - let deployment = &new_comparison_result.deployment; - - // Only notify users if there is a switch between match<->diverged, and if notFound become diverged - let mut should_notify = false; - // let current_result = results.get(deployment).cloned(); - let result_type = match results.get(deployment).cloned() { - // If there's no existing result, simply update and return new result - None => { - results.insert(deployment.clone(), new_comparison_result.clone()); - new_comparison_result.result_type - } - - // If previous type and current type switch - // update and return result type - Some(current_result) - if current_result.result_type != new_comparison_result.result_type - && new_comparison_result.result_type != ComparisonResultType::NotFound => - { - results.insert(deployment.clone(), new_comparison_result.clone()); - // Skip notification if notFound becomes match, otherwise notify for - // diverged<->match, notFound->diverged - if new_comparison_result.result_type != ComparisonResultType::Match { - should_notify = true; - } - new_comparison_result.result_type - } - // New result is not found or same as previous result - Some(current_result) => { - // Do not update result if the type is divergence so we keep track of the first diverged block - if let ComparisonResultType::Match | ComparisonResultType::NotFound = - new_comparison_result.result_type - { - results.insert(deployment.clone(), new_comparison_result.clone()); - } - current_result.result_type - } - }; - - (should_notify, new_comparison_result.clone(), result_type) - }; - - if should_notify { - self.add_notification( - updated_comparison_result.deployment.clone(), - updated_comparison_result.to_string(), - ); - } - - result_type - } - - /// Clean remote_ppoi_messages - pub fn clean_remote_ppoi_messages(&self, block_number: u64, deployment: String) { - trace!( - msgs = tracing::field::debug(&self.remote_ppoi_messages.lock().unwrap()), - "cleaning these messages" - ); - self.remote_ppoi_messages - .lock() - .unwrap() - .retain(|msg| msg.payload.block_number >= block_number || msg.identifier != deployment); - - CACHED_PPOI_MESSAGES.with_label_values(&[&deployment]).set( - self.remote_ppoi_messages - .lock() - .unwrap() - .iter() - .filter(|m: &&GraphcastMessage| m.identifier == deployment) - .collect::>>() - .len() - .try_into() - .unwrap(), - ); - } - - /// Clean local_attestations - // TODO: Refactor with attestations operations - pub fn clean_local_attestations(&self, block_number: u64, ipfs_hash: String) { - clear_local_attestation(self.local_attestations.clone(), ipfs_hash, block_number) - } - - /// Update file cache - pub fn update_cache(&self, path: &str) { - // Attempt to serialize state to JSON - let state_json = serde_json::to_string(&self.clone()) - .unwrap_or_else(|_| "Could not serialize state to JSON".to_owned()); - - let path = Path::new(path); - - if let Some(parent) = path.parent() { - fs::create_dir_all(parent).unwrap(); - } - // Write state to file - let mut file = File::create(path).unwrap(); - file.write_all(state_json.as_bytes()).unwrap(); - } - - /// Load cache into persisted state - pub fn load_cache(path: &str) -> PersistedState { - info!(path, "load cache from path"); - let file = match File::open(path) { - Ok(f) => f, - Err(e) => { - warn!( - err = tracing::field::debug(&e), - "No persisted state file provided, create an empty state" - ); - // No state persisted, create new - let state = PersistedState::new(None, None, None, None, None); - state.update_cache(path); - return state; - } - }; - - let reader: BufReader = BufReader::new(file); - let state: PersistedState = match serde_json::from_reader(reader) { - Ok(s) => s, - Err(e) => { - // Persisted state can't be parsed, create a new one - warn!( - err = e.to_string(), - "Could not parse persisted state file, created an empty state", - ); - PersistedState::new(None, None, None, None, None) - } - }; - state - } - - /// Clean up - pub fn delete_cache(path: &str) { - _ = remove_file(path); - } -} - -// TODO: panic hook for updating the cache file before exiting the program -/// Set up panic hook to store persisted state -pub fn panic_hook(file_path: &str) { - let path = String::from_str(file_path).expect("Invalid file path provided"); - panic::set_hook(Box::new(move |panic_info| panic_cache(panic_info, &path))); -} - -pub fn panic_cache(panic_info: &PanicInfo<'_>, file_path: &str) { - RADIO_OPERATOR - .get() - .unwrap() - .state() - .update_cache(file_path); - // Log panic information and program state - eprintln!("Panic occurred! Panic info: {:?}", panic_info); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::operator::attestation::{save_local_attestation, ComparisonResultType}; - use graphcast_sdk::networks::NetworkName; - - /// Tests for load, update, and store cache - #[tokio::test] - async fn test_state_cache() { - let path = "test-state.json"; - PersistedState::delete_cache(path); - - let mut state = PersistedState::load_cache(path); - assert!(state.local_attestations().is_empty()); - assert!(state.remote_ppoi_messages().is_empty()); - assert!(state.comparison_results().is_empty()); - - let local_attestations = Arc::new(SyncMutex::new(HashMap::new())); - let ppoi_messages = Arc::new(SyncMutex::new(Vec::new())); - let comparison_results = Arc::new(SyncMutex::new(HashMap::new())); - - save_local_attestation( - local_attestations.clone(), - "ppoi-x".to_string(), - "0xa1".to_string(), - 0, - ); - - save_local_attestation( - local_attestations.clone(), - "ppoi-y".to_string(), - "0xa1".to_string(), - 1, - ); - - save_local_attestation( - local_attestations.clone(), - "ppoi-z".to_string(), - "0xa2".to_string(), - 2, - ); - - let test_comparison_result = ComparisonResult { - deployment: "test_deployment".to_string(), - block_number: 42, - result_type: ComparisonResultType::Match, - local_attestation: None, - attestations: vec![], - }; - comparison_results - .lock() - .unwrap() - .insert("test_deployment".to_string(), test_comparison_result); - - let hash: String = "QmWECgZdP2YMcV9RtKU41GxcdW8EGYqMNoG98ubu5RGN6U".to_string(); - let content: String = - "0xa6008cea5905b8b7811a68132feea7959b623188e2d6ee3c87ead7ae56dd0eae".to_string(); - let nonce: i64 = 123321; - let block_number: u64 = 0; - let block_hash: String = "0xblahh".to_string(); - let ppoi_msg = PublicPoiMessage::build( - hash.clone(), - content, - nonce, - NetworkName::Goerli, - block_number, - block_hash, - String::from("0xe9a1cabd57700b17945fd81feefba82340d9568f"), - ); - let sig: String = "4be6a6b7f27c4086f22e8be364cbdaeddc19c1992a42b08cbe506196b0aafb0a68c8c48a730b0e3155f4388d7cc84a24b193d091c4a6a4e8cd6f1b305870fae61b".to_string(); - let msg = GraphcastMessage::new( - hash.clone(), - nonce, - String::from("0xe9a1cabd57700b17945fd81feefba82340d9568f"), - ppoi_msg, - sig, - ) - .expect("Shouldn't get here since the message is purposefully constructed for testing"); - ppoi_messages.lock().unwrap().push(msg); - - state = state - .update( - Some(local_attestations.clone()), - Some(ppoi_messages.clone()), - None, - Some(comparison_results.clone()), - None, - ) - .await; - - let ui_msg = UpgradeIntentMessage { - subgraph_id: String::from("CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"), - new_hash: String::from("QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWAA"), - nonce, - graph_account: String::from("0xe9a1cabd57700b17945fd81feefba82340d9568f"), - }; - let sig: String = "4be6a6b7f27c4086f22e8be364cbdaeddc19c1992a42b08cbe506196b0aafb0a68c8c48a730b0e3155f4388d7cc84a24b193d091c4a6a4e8cd6f1b305870fae61b".to_string(); - let msg = GraphcastMessage::new( - "QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB".to_string(), - nonce, - String::from("0xe9a1cabd57700b17945fd81feefba82340d9568f"), - ui_msg, - sig, - ) - .expect("Shouldn't get here since the message is purposefully constructed for testing"); - state.add_upgrade_intent_message(msg); - state.update_cache(path); - - let state = PersistedState::load_cache(path); - assert_eq!(state.remote_ppoi_messages.lock().unwrap().len(), 1); - assert_eq!(state.upgrade_intent_messages.lock().unwrap().len(), 1); - assert!(!state.local_attestations.lock().unwrap().is_empty()); - assert_eq!(state.local_attestations.lock().unwrap().len(), 2); - assert_eq!( - state - .local_attestations - .lock() - .unwrap() - .get("0xa1") - .unwrap() - .len(), - 2 - ); - assert_eq!( - state - .local_attestations - .lock() - .unwrap() - .get("0xa2") - .unwrap() - .len(), - 1 - ); - assert_eq!( - state - .local_attestations - .lock() - .unwrap() - .get("0xa1") - .unwrap() - .get(&0) - .unwrap() - .ppoi, - *"ppoi-x" - ); - - assert_eq!(state.comparison_results.lock().unwrap().len(), 1); - assert_eq!( - state - .comparison_results - .lock() - .unwrap() - .get("test_deployment") - .unwrap() - .block_number, - 42 - ); - assert_eq!( - state - .comparison_results - .lock() - .unwrap() - .get("test_deployment") - .unwrap() - .result_type, - ComparisonResultType::Match - ); - - PersistedState::delete_cache(path); - } - - #[tokio::test] - async fn handle_comparison_result_new_deployment() { - let local_attestations = Arc::new(SyncMutex::new(HashMap::new())); - let remote_ppoi_messages = Arc::new(SyncMutex::new(Vec::new())); - let upgrade_intent_messages = Arc::new(SyncMutex::new(HashMap::new())); - let comparison_results = Arc::new(SyncMutex::new(HashMap::new())); - let notifications = Arc::new(SyncMutex::new(HashMap::new())); - - let state = PersistedState { - local_attestations, - remote_ppoi_messages, - upgrade_intent_messages, - comparison_results, - notifications, - }; - - let new_result = ComparisonResult { - deployment: String::from("new_deployment"), - block_number: 1, - result_type: ComparisonResultType::Match, - local_attestation: None, - attestations: Vec::new(), - }; - - state.handle_comparison_result(new_result).await; - - let comparison_results = state.comparison_results.lock().unwrap(); - assert!(comparison_results.contains_key(&String::from("new_deployment"))); - } - - #[tokio::test] - async fn handle_comparison_result_change_result_type() { - let local_attestations = Arc::new(SyncMutex::new(HashMap::new())); - let remote_ppoi_messages = Arc::new(SyncMutex::new(Vec::new())); - let upgrade_intent_messages = Arc::new(SyncMutex::new(HashMap::new())); - let comparison_results = Arc::new(SyncMutex::new(HashMap::new())); - let notifications = Arc::new(SyncMutex::new(HashMap::new())); - - let state = PersistedState { - local_attestations, - remote_ppoi_messages, - upgrade_intent_messages, - comparison_results, - notifications, - }; - - let old_result = ComparisonResult { - deployment: String::from("existing_deployment"), - block_number: 1, - result_type: ComparisonResultType::Match, - local_attestation: None, - attestations: Vec::new(), - }; - - let new_result = ComparisonResult { - deployment: String::from("existing_deployment"), - block_number: 1, - result_type: ComparisonResultType::Divergent, - local_attestation: None, - attestations: Vec::new(), - }; - - state - .comparison_results - .lock() - .unwrap() - .insert(String::from("existing_deployment"), old_result.clone()); - state.handle_comparison_result(new_result).await; - - let comparison_results = state.comparison_results.lock().unwrap(); - let result = comparison_results - .get(&String::from("existing_deployment")) - .unwrap(); - assert_eq!(result.result_type, ComparisonResultType::Divergent); - } - - #[tokio::test] - async fn upgrade_ratelimiting() { - let upgrade_threshold = 86400; - let local_attestations = Arc::new(SyncMutex::new(HashMap::new())); - let remote_ppoi_messages = Arc::new(SyncMutex::new(Vec::new())); - let upgrade_intent_messages = Arc::new(SyncMutex::new(HashMap::new())); - let comparison_results = Arc::new(SyncMutex::new(HashMap::new())); - let notifications = Arc::new(SyncMutex::new(HashMap::new())); - - let test_id = "AAAMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3".to_string(); - let state = PersistedState { - local_attestations, - remote_ppoi_messages, - upgrade_intent_messages, - comparison_results, - notifications, - }; - - // Make 2 msgs - let msg0 = UpgradeIntentMessage { - subgraph_id: test_id.clone(), - new_hash: "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x".to_string(), - nonce: 1692307513, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - }; - let gc_msg0 = GraphcastMessage { - identifier: "A0".to_string(), - nonce: 1692307513, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - payload: msg0, - signature: "0xA".to_string(), - }; - let msg1 = UpgradeIntentMessage { - subgraph_id: "BBBMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3".to_string(), - new_hash: "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x".to_string(), - nonce: 1691307513, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - }; - let gc_msg1 = GraphcastMessage { - identifier: "B".to_string(), - nonce: 1691307513, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - payload: msg1, - signature: "0xB".to_string(), - }; - - state.add_upgrade_intent_message(gc_msg0); - state.add_upgrade_intent_message(gc_msg1); - - assert_eq!(state.upgrade_intent_messages().len(), 2); - - // Ratelimited by nonce - let msg0 = UpgradeIntentMessage { - subgraph_id: test_id.clone(), - new_hash: "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x".to_string(), - nonce: 1692307600, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - }; - - assert!(state.recent_upgrade(&msg0, upgrade_threshold)); - - // Update to new upgrade message - let msg0 = UpgradeIntentMessage { - subgraph_id: test_id.clone(), - new_hash: "QmAAfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x".to_string(), - nonce: 1692407600, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - }; - assert!(!state.recent_upgrade(&msg0, upgrade_threshold)); - - let gc_msg0 = GraphcastMessage { - identifier: "A2".to_string(), - nonce: 1692407600, - graph_account: "0xe9a1cabd57700b17945fd81feefba82340d9568f".to_string(), - payload: msg0, - signature: "0xA".to_string(), - }; - state.add_upgrade_intent_message(gc_msg0); - - assert_eq!( - state.upgrade_intent_message(&test_id).unwrap().nonce, - 1692407600 - ); - assert_eq!( - state - .upgrade_intent_message(&test_id) - .unwrap() - .payload - .new_hash, - "QmAAfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x".to_string() - ); - } - - #[test] - fn test_comparison_result_typed_not_found() { - let mut comparison_results = HashMap::new(); - comparison_results.insert( - "a".to_string(), - ComparisonResult { - result_type: ComparisonResultType::NotFound, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - comparison_results.insert( - "b".to_string(), - ComparisonResult { - result_type: ComparisonResultType::Match, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - comparison_results.insert( - "c".to_string(), - ComparisonResult { - result_type: ComparisonResultType::Match, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - comparison_results.insert( - "d".to_string(), - ComparisonResult { - result_type: ComparisonResultType::Match, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - comparison_results.insert( - "e".to_string(), - ComparisonResult { - result_type: ComparisonResultType::Divergent, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - comparison_results.insert( - "f".to_string(), - ComparisonResult { - result_type: ComparisonResultType::NotFound, - deployment: String::from("Qmhash"), - block_number: 100, - local_attestation: None, - attestations: vec![], - }, - ); - - let state = PersistedState { - comparison_results: Arc::new(SyncMutex::new(comparison_results)), - local_attestations: Arc::new(SyncMutex::new(HashMap::new())), - remote_ppoi_messages: Arc::new(SyncMutex::new(Vec::new())), - upgrade_intent_messages: Arc::new(SyncMutex::new(HashMap::new())), - notifications: Arc::new(SyncMutex::new(HashMap::new())), - }; - - let results = state.comparison_result_typed(ComparisonResultType::Match); - assert_eq!(results.len(), 3); - let results = state.comparison_result_typed(ComparisonResultType::NotFound); - assert_eq!(results.len(), 2); - let results = state.comparison_result_typed(ComparisonResultType::Divergent); - assert_eq!(results.len(), 1); - let results = state.comparison_result_typed(ComparisonResultType::BuildFailed); - assert_eq!(results.len(), 0); - } -} diff --git a/template.toml b/template.toml index 1ec0933..941fd95 100644 --- a/template.toml +++ b/template.toml @@ -10,7 +10,7 @@ indexer_management_server_endpoint = 'http://127.0.0.1:18000' boot_node_addresses = [] [radio_setup] -graphcast_network = 'Testnet' +graphcast_network = 'Mainnet' topics = ['QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB'] gossip_topic_coverage = 'Comprehensive' auto_upgrade_coverage = 'Comprehensive' diff --git a/test-runner/Cargo.toml b/test-runner/Cargo.toml index 42ec35b..463382b 100644 --- a/test-runner/Cargo.toml +++ b/test-runner/Cargo.toml @@ -35,3 +35,5 @@ tower-http = { version = "0.4.0", features = ["trace", "cors"] } tower = "0.4.13" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +sqlx = { version = "0.7.2", features = ["sqlite", "runtime-tokio"] } +tempfile = "3.2.0" diff --git a/test-runner/src/invalid_block_hash.rs b/test-runner/src/invalid_block_hash.rs index 9624db0..3b8ad42 100644 --- a/test-runner/src/invalid_block_hash.rs +++ b/test-runner/src/invalid_block_hash.rs @@ -1,14 +1,14 @@ -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_remote_ppoi_messages, setup_database}; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn invalid_block_hash_test() { let test_file_name = "invalid_block_hash"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + let db_path = format!("./test-runner/test_dbs/{}.db", test_file_name); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -16,7 +16,7 @@ pub async fn invalid_block_hash_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); let mut test_sender_config = TestSenderConfig { @@ -31,16 +31,18 @@ pub async fn invalid_block_hash_test() { poi: None, }; + let pool = SqlitePool::connect("sqlite::memory:") + .await + .expect("Failed to connect to the in-memory database"); + setup_database(&pool).await; + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(89)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - teardown(process_manager, &store_path); + teardown(process_manager, &db_path); - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); assert!( remote_ppoi_messages.is_empty(), "Remote messages should be empty" diff --git a/test-runner/src/invalid_nonce.rs b/test-runner/src/invalid_nonce.rs index e1b6135..7196506 100644 --- a/test-runner/src/invalid_nonce.rs +++ b/test-runner/src/invalid_nonce.rs @@ -1,14 +1,14 @@ -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_remote_ppoi_messages, setup_database}; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn invalid_nonce_test() { let test_file_name = "invalid_nonce"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + let db_path = format!("./test-runner/test_dbs/{}.db", test_file_name); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -16,7 +16,7 @@ pub async fn invalid_nonce_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); let mut test_sender_config = TestSenderConfig { @@ -29,16 +29,18 @@ pub async fn invalid_nonce_test() { poi: None, }; + let pool = SqlitePool::connect("sqlite::memory:") + .await + .expect("Failed to connect to the in-memory database"); + setup_database(&pool).await; + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(89)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - teardown(process_manager, &store_path); + teardown(process_manager, &db_path); - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); assert!( remote_ppoi_messages.is_empty(), "Remote messages should be empty" diff --git a/test-runner/src/invalid_payload.rs b/test-runner/src/invalid_payload.rs index d095001..c37ada2 100644 --- a/test-runner/src/invalid_payload.rs +++ b/test-runner/src/invalid_payload.rs @@ -1,15 +1,15 @@ -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_remote_ppoi_messages, setup_database}; use test_utils::{ config::{test_config, TestSenderConfig}, dummy_msg::DummyMsg, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn invalid_payload_test() { let test_file_name = "invalid_payload"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + let db_path = format!("./test-runner/test_dbs/{}.db", test_file_name); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -17,7 +17,7 @@ pub async fn invalid_payload_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); let dummy_radio_payload = DummyMsg::new("hello".to_string(), 42); @@ -34,16 +34,17 @@ pub async fn invalid_payload_test() { poi: None, }; + let pool = SqlitePool::connect("sqlite::memory:") + .await + .expect("Failed to connect to the in-memory database"); + setup_database(&pool).await; let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(89)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); + teardown(process_manager, &db_path); - teardown(process_manager, &store_path); - - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); assert!( remote_ppoi_messages.is_empty(), "Remote messages should be empty" diff --git a/test-runner/src/invalid_sender.rs b/test-runner/src/invalid_sender.rs index 0666238..0cce548 100644 --- a/test-runner/src/invalid_sender.rs +++ b/test-runner/src/invalid_sender.rs @@ -1,15 +1,15 @@ use graphcast_sdk::graphcast_agent::message_typing::IdentityValidation; -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_remote_ppoi_messages, setup_database}; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn invalid_sender_test() { let test_file_name = "invalid_sender"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + let db_path = format!("./test-runner/test_dbs/{}.db", test_file_name); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -17,7 +17,7 @@ pub async fn invalid_sender_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); config.radio_setup.id_validation = IdentityValidation::RegisteredIndexer; @@ -31,16 +31,18 @@ pub async fn invalid_sender_test() { poi: None, }; + let pool = SqlitePool::connect("sqlite::memory:") + .await + .expect("Failed to connect to the in-memory database"); + setup_database(&pool).await; + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(89)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - teardown(process_manager, &store_path); + teardown(process_manager, &db_path); - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); assert!( remote_ppoi_messages.is_empty(), "Remote messages should be empty" diff --git a/test-runner/src/main.rs b/test-runner/src/main.rs index 6c60558..22be13f 100644 --- a/test-runner/src/main.rs +++ b/test-runner/src/main.rs @@ -33,48 +33,41 @@ async fn run_tests( (tests_passed, test_results) } + #[tokio::main] pub async fn main() { let config = test_config(); std::env::set_var( - "RUST_LOG", - "off,hyper=off,graphcast_sdk=trace,subgraph_radio=trace,test_runner=trace,test_sender=trace,test_utils=trace", - ); + "RUST_LOG", + "off,hyper=off,graphcast_sdk=trace,subgraph_radio=trace,test_runner=trace,test_sender=trace,test_utils=trace", + ); init_tracing(config.radio_setup.log_format.to_string()).expect("Could not set up global default subscriber for logger, check environmental variable `RUST_LOG` or the CLI input `log-level"); let start_time = Instant::now(); - let mut retry_count = 5; - let mut initial_tests_passed = false; - let mut initial_test_results: HashMap = HashMap::new(); + // Run send_and_receive_test separately + let send_and_receive_tests = vec![( + "send_and_receive_test", + tokio::spawn(send_and_receive_test()), + )]; + let (send_and_receive_tests_passed, send_and_receive_test_results) = + run_tests(send_and_receive_tests).await; - while retry_count > 0 && !initial_tests_passed { - let initial_tests = vec![ - ( - "send_and_receive_test", - tokio::spawn(send_and_receive_test()), - ), - ("topics_test", tokio::spawn(topics_test())), - ]; + // Run topics_test separately + let topics_tests = vec![("topics_test", tokio::spawn(topics_test()))]; + let (topics_tests_passed, topics_test_results) = run_tests(topics_tests).await; - let (tests_passed, test_results) = run_tests(initial_tests).await; - initial_test_results = test_results; + // Run poi_divergent_test separately + let poi_divergent_tests = vec![("poi_divergent_test", tokio::spawn(poi_divergent_test()))]; + let (poi_divergent_tests_passed, poi_divergent_test_results) = + run_tests(poi_divergent_tests).await; - if tests_passed { - initial_tests_passed = true; - } else { - retry_count -= 1; - } - } - - let poi_tests = vec![ - ("poi_divergent_test", tokio::spawn(poi_divergent_test())), - ("poi_match_test", tokio::spawn(poi_match_test())), - ]; - - let (poi_tests_passed, poi_test_results) = run_tests(poi_tests).await; + // Run poi_match_test separately + let poi_match_tests = vec![("poi_match_test", tokio::spawn(poi_match_test()))]; + let (poi_match_tests_passed, poi_match_test_results) = run_tests(poi_match_tests).await; + // Run other validity tests let validity_tests_group_1 = vec![ ( "invalid_block_hash_test", @@ -82,7 +75,6 @@ pub async fn main() { ), ("invalid_sender_test", tokio::spawn(invalid_sender_test())), ]; - let (validity_tests_group_1_passed, validity_test_results_group_1) = run_tests(validity_tests_group_1).await; @@ -90,26 +82,33 @@ pub async fn main() { ("invalid_nonce_test", tokio::spawn(invalid_nonce_test())), ("invalid_payload_test", tokio::spawn(invalid_payload_test())), ]; - let (validity_tests_group_2_passed, validity_test_results_group_2) = run_tests(validity_tests_group_2).await; + // Print test summary print_test_summary( - initial_test_results, - poi_test_results, + send_and_receive_test_results, + topics_test_results, + poi_divergent_test_results, + poi_match_test_results, validity_test_results_group_1, validity_test_results_group_2, - initial_tests_passed - && poi_tests_passed + send_and_receive_tests_passed + && topics_tests_passed + && poi_divergent_tests_passed + && poi_match_tests_passed && validity_tests_group_1_passed && validity_tests_group_2_passed, start_time, ); } +#[allow(clippy::too_many_arguments)] fn print_test_summary( - initial_test_results: HashMap, - poi_test_results: HashMap, + send_and_receive_test_results: HashMap, + topics_test_results: HashMap, + poi_divergent_test_results: HashMap, + poi_match_test_results: HashMap, validity_test_results_group_1: HashMap, validity_test_results_group_2: HashMap, tests_passed: bool, @@ -118,9 +117,11 @@ fn print_test_summary( let elapsed_time = start_time.elapsed(); // Print summary of tests println!("\nTest Summary:\n"); - for (test_name, passed) in initial_test_results + for (test_name, passed) in send_and_receive_test_results .iter() - .chain(&poi_test_results) + .chain(&topics_test_results) + .chain(&poi_divergent_test_results) + .chain(&poi_match_test_results) .chain(&validity_test_results_group_1) .chain(&validity_test_results_group_2) { diff --git a/test-runner/src/message_handling.rs b/test-runner/src/message_handling.rs index 4fd2538..d7a5f60 100644 --- a/test-runner/src/message_handling.rs +++ b/test-runner/src/message_handling.rs @@ -1,27 +1,32 @@ -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{ + entities::{get_all_local_attestations, get_remote_ppoi_messages}, + setup_database, +}; +use tempfile::NamedTempFile; use test_utils::{ config::{test_config, TestSenderConfig}, - messages_are_equal, payloads_are_equal, setup, teardown, + setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::{debug, trace}; pub async fn send_and_receive_test() { let test_file_name = "message_handling"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); - let radio_topics = vec![ - "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string(), - "Qmdefault2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN".to_string(), - ]; + // Create a new temporary file for the database + let temp_file = + NamedTempFile::new().expect("Failed to create a temporary file for the database."); + let db_path = temp_file.path().to_str().unwrap().to_string(); + + let radio_topics = vec!["Qm11default1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let test_sender_topics = - vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; + vec!["Qm11default1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); - config.radio_setup.topic_update_interval = 10; + config.radio_setup.topic_update_interval = 90; let mut test_sender_config = TestSenderConfig { topics: test_sender_topics, @@ -33,67 +38,40 @@ pub async fn send_and_receive_test() { poi: None, }; - let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; - - sleep(Duration::from_secs(85)).await; + // Connection string for the SQLite database using the temporary file + let connection_string = format!("sqlite:{}", db_path); + let pool = SqlitePool::connect(&connection_string) + .await + .expect("Failed to connect to the SQLite database file"); + setup_database(&pool).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; - teardown(process_manager, &store_path); + sleep(Duration::from_secs(100)).await; - let local_attestations = persisted_state.local_attestations(); - debug!( - "local tattestations {:#?}, \nchecking result: {:#?}", - local_attestations, - !local_attestations.is_empty() - ); - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + teardown(process_manager, &db_path); + let local_attestations = get_all_local_attestations(&pool).await.unwrap(); assert!( !local_attestations.is_empty(), "There should be at least one element in local_attestations" ); - for test_hash in radio_topics { - assert!( - local_attestations.contains_key(&test_hash), - "No attestation found with ipfs hash {}", - test_hash - ); - } - - let test_hashes_remote = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq"]; - - for target_id in test_hashes_remote { - let has_target_id = remote_ppoi_messages + for test_hash in &radio_topics { + let has_attestation_for_topic = local_attestations .iter() - .any(|msg| msg.identifier == *target_id); + .any(|attestation| &attestation.identifier == test_hash); assert!( - has_target_id, - "No remote message found with identifier {}", - target_id + has_attestation_for_topic, + "No attestation found for ipfs hash {}", + test_hash ); } - trace!("Num of remote messages {}", remote_ppoi_messages.len()); - + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); assert!( remote_ppoi_messages.len() >= 5, - "The number of remote messages should at least 5. Actual: {}", + "The number of remote messages should be at least 5. Actual: {}", remote_ppoi_messages.len() ); - - for (index, message1) in remote_ppoi_messages.iter().enumerate() { - for message2 in remote_ppoi_messages.iter().skip(index + 1) { - if messages_are_equal(message1, message2) - && payloads_are_equal(&message1.payload, &message2.payload) - { - panic!( - "Duplicate remote message found with identifier {}", - message1.identifier - ); - } - } - } } diff --git a/test-runner/src/poi_divergent.rs b/test-runner/src/poi_divergent.rs index 33f9cd1..b406855 100644 --- a/test-runner/src/poi_divergent.rs +++ b/test-runner/src/poi_divergent.rs @@ -1,14 +1,19 @@ -use subgraph_radio::{operator::attestation::ComparisonResultType, state::PersistedState}; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_comparison_results, setup_database}; +use tempfile::NamedTempFile; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn poi_divergent_test() { let test_file_name = "poi_divergent"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + + // Create a new temporary file for the database + let temp_file = + NamedTempFile::new().expect("Failed to create a temporary file for the database."); + let db_path = temp_file.path().to_str().unwrap().to_string(); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -16,7 +21,7 @@ pub async fn poi_divergent_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); let mut test_sender_config = TestSenderConfig { @@ -29,23 +34,26 @@ pub async fn poi_divergent_test() { poi: Some("0x8a937e93f72bf4396214fd519e3ded51a7f3b4316ada7b87d246b4626f7e9e8d".to_string()), }; + // Connection string for the SQLite database using the temporary file + let connection_string = format!("sqlite:{}", db_path); + let pool = SqlitePool::connect(&connection_string) + .await + .expect("Failed to connect to the SQLite database file"); + setup_database(&pool).await; + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(550)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - let comparison_results = persisted_state.comparison_results(); - + let comparison_results = get_comparison_results(&pool).await.unwrap(); assert!( !comparison_results.is_empty(), "The comparison results should not be empty" ); let has_divergent_result = comparison_results.iter().any(|result| { - result.1.deployment == "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq" - && result.1.result_type == ComparisonResultType::Divergent + result.deployment == "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq" + && result.result_type == "Divergent" }); assert!( @@ -53,5 +61,5 @@ pub async fn poi_divergent_test() { "No comparison result found with deployment 'Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq' and result type 'Divergent'" ); - teardown(process_manager, &store_path); + teardown(process_manager, &db_path); } diff --git a/test-runner/src/poi_match.rs b/test-runner/src/poi_match.rs index a08967d..240182f 100644 --- a/test-runner/src/poi_match.rs +++ b/test-runner/src/poi_match.rs @@ -1,14 +1,19 @@ -use subgraph_radio::{operator::attestation::ComparisonResultType, state::PersistedState}; +use sqlx::SqlitePool; +use subgraph_radio::{entities::get_comparison_results, setup_database}; +use tempfile::NamedTempFile; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn poi_match_test() { let test_file_name = "poi_match"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + + // Create a new temporary file for the database + let temp_file = + NamedTempFile::new().expect("Failed to create a temporary file for the database."); + let db_path = temp_file.path().to_str().unwrap().to_string(); let radio_topics = vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; @@ -16,7 +21,7 @@ pub async fn poi_match_test() { vec!["Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string()]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); let mut test_sender_config = TestSenderConfig { @@ -29,14 +34,18 @@ pub async fn poi_match_test() { poi: None, }; + // Connection string for the SQLite database using the temporary file + let connection_string = format!("sqlite:{}", db_path); + let pool = SqlitePool::connect(&connection_string) + .await + .expect("Failed to connect to the SQLite database file"); + setup_database(&pool).await; + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; sleep(Duration::from_secs(550)).await; - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - let comparison_results = persisted_state.comparison_results(); + let comparison_results = get_comparison_results(&pool).await.unwrap(); assert!( !comparison_results.is_empty(), @@ -44,8 +53,8 @@ pub async fn poi_match_test() { ); let has_match_result = comparison_results.iter().any(|result| { - result.1.deployment == "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq" - && result.1.result_type == ComparisonResultType::Match + result.deployment == "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq" + && result.result_type == "Match" }); assert!( @@ -53,5 +62,5 @@ pub async fn poi_match_test() { "No comparison result found with deployment 'Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq' and result type 'Match'" ); - teardown(process_manager, &store_path); + teardown(process_manager, &db_path); } diff --git a/test-runner/src/topics.rs b/test-runner/src/topics.rs index 2fe7fcf..e30171a 100644 --- a/test-runner/src/topics.rs +++ b/test-runner/src/topics.rs @@ -1,29 +1,37 @@ -use subgraph_radio::state::PersistedState; +use sqlx::SqlitePool; +use subgraph_radio::{ + entities::{get_all_local_attestations, get_remote_ppoi_messages}, + setup_database, +}; +use tempfile::NamedTempFile; use test_utils::{ config::{test_config, TestSenderConfig}, setup, teardown, }; use tokio::time::{sleep, Duration}; -use tracing::debug; pub async fn topics_test() { let test_file_name = "topics"; - let store_path = format!("./test-runner/state/{}.json", test_file_name); + + // Create a new temporary file for the database + let temp_file = + NamedTempFile::new().expect("Failed to create a temporary file for the database."); + let db_path = temp_file.path().to_str().unwrap().to_string(); let radio_topics = vec![ - "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string(), - "Qmdefault2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN".to_string(), - "QmonlyinradioXyZABCdeFgHIjklMNOpqrstuvWXYZabcdefGHIJKL".to_string(), + "Qm22default1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string(), + "Qm22default2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN".to_string(), + "Qm22onlyinradioXyZABCdeFgHIjklMNOpqrstuvWXYZabcdefGHIJKL".to_string(), ]; let test_sender_topics = vec![ - "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string(), - "Qmdefault2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN".to_string(), - "QmonlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG".to_string(), + "Qm22default1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq".to_string(), + "Qm22default2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN".to_string(), + "Qmo22nlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG".to_string(), ]; let mut config = test_config(); - config.radio_setup.persistence_file_path = Some(store_path.clone()); + config.radio_setup.sqlite_file_path = Some(db_path.clone()); config.radio_setup.topics = radio_topics.clone(); config.radio_setup.topic_update_interval = 90; @@ -37,93 +45,71 @@ pub async fn topics_test() { poi: None, }; - let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; - - sleep(Duration::from_secs(89)).await; + // Connection string for the SQLite database using the temporary file + let connection_string = format!("sqlite:{}", db_path); + let pool = SqlitePool::connect(&connection_string) + .await + .expect("Failed to connect to the SQLite database file"); + setup_database(&pool).await; - // can be sure that file path is set to Some (after test_config() - let persisted_state = - PersistedState::load_cache(&config.radio_setup.persistence_file_path.unwrap()); - debug!( - local_attestations = tracing::field::debug(&persisted_state.local_attestations()), - remote_ppoi_messages = tracing::field::debug(&persisted_state.remote_ppoi_messages()), - persisted_state = tracing::field::debug(&persisted_state), - "loaded persisted state" - ); + let process_manager = setup(&config, test_file_name, &mut test_sender_config).await; - let local_attestations = persisted_state.local_attestations(); - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); - - debug!( - local_attestations = tracing::field::debug(&local_attestations), - remote_ppoi_messages = tracing::field::debug(&remote_ppoi_messages), - "Starting topics_test" - ); + sleep(Duration::from_secs(100)).await; + // Retrieve local attestations and verify them against radio topics + let local_attestations = get_all_local_attestations(&pool).await.unwrap(); assert!( !local_attestations.is_empty(), "There should be at least one element in local_attestations" ); - for test_hash in radio_topics { - assert!( - local_attestations.contains_key(&test_hash), - "No attestation found with ipfs hash {}", - test_hash - ); - } - - let test_hashes_remote = vec![ - "Qmdefault1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq", - "Qmdefault2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN", - ]; - - for target_id in test_hashes_remote { - let has_target_id = remote_ppoi_messages + for test_hash in &radio_topics { + let has_attestation_for_topic = local_attestations .iter() - .any(|msg| msg.identifier == *target_id); + .any(|attestation| &attestation.identifier == test_hash); assert!( - has_target_id, - "No remote message found with identifier {}", - target_id + has_attestation_for_topic, + "No attestation found for ipfs hash {}", + test_hash ); } - let non_existent_test_hash = "QmonlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG"; - - let has_non_existent_test_hash = remote_ppoi_messages - .iter() - .any(|msg| msg.identifier == non_existent_test_hash); + // Retrieve remote PPOI messages and verify expected presence and count + let remote_ppoi_messages = get_remote_ppoi_messages(&pool).await.unwrap(); + // Ensure we have received some remote PPOI messages assert!( - !has_non_existent_test_hash, - "Unexpected remote message found with identifier {}", - non_existent_test_hash + !remote_ppoi_messages.is_empty(), + "There should be at least one remote PPOI message" ); - let new_subgraphs = vec!["QmonlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG".to_string()]; // change this to your new subgraphs - process_manager - .server_state - .update_subgraphs(new_subgraphs) - .await; - - tokio::time::sleep(Duration::from_secs(50)).await; - - let persisted_state = PersistedState::load_cache(&store_path); - debug!("persisted state {:?}", persisted_state); - - let remote_ppoi_messages = persisted_state.remote_ppoi_messages(); + // Assert that the remote PPOI messages contain expected identifiers + let expected_remote_identifiers = vec![ + "Qm22default1AbcDEFghijKLmnoPQRstUVwxYzABCDEFghijklmnopq", + "Qm22default2XyZABCdefGHIjklMNOpqrstuvWXYZabcdefGHIJKLMN", + // Add more expected identifiers if necessary + ]; - let test_hash = "QmonlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG"; - let has_test_hash = remote_ppoi_messages - .iter() - .any(|msg| msg.identifier == test_hash); + for expected_id in &expected_remote_identifiers { + assert!( + remote_ppoi_messages + .iter() + .any(|msg| &msg.identifier == expected_id), + "Expected identifier {} not found in remote PPOI messages", + expected_id + ); + } + // Assert that we do not have remote PPOI messages with identifiers that should not be present + let unexpected_identifier = "Qm22onlyintestsenderXyZABCdeFgHIjklMNOpqrstuvWXYZabcdEFG"; assert!( - has_test_hash, - "Expected remote message not found with identifier {}", - test_hash + !remote_ppoi_messages + .iter() + .any(|msg| msg.identifier == unexpected_identifier), + "Unexpected identifier {} found in remote PPOI messages", + unexpected_identifier ); - teardown(process_manager, &store_path); + // Cleanup after test + teardown(process_manager, &db_path); } diff --git a/test-sender/src/main.rs b/test-sender/src/main.rs index 91b37ce..3fac4a6 100644 --- a/test-sender/src/main.rs +++ b/test-sender/src/main.rs @@ -29,30 +29,33 @@ async fn start_sender(config: TestSenderConfig) { ..Default::default() }; - let pubsub_topic = WakuPubSubTopic::from_str("/waku/2/graphcast-v0-testnet/proto").unwrap(); + let pubsub_topic = WakuPubSubTopic::from_str("/waku/2/graphcast-v0-mainnet/proto").unwrap(); - let discv5_nodes: Vec = get_dns_nodes(&pubsub_topic) + let mut discv5_nodes: Vec = get_dns_nodes(&pubsub_topic) .into_iter() .filter(|d| d.enr.is_some()) .map(|d| d.enr.unwrap().to_string()) .collect::>(); let port = find_random_udp_port(); + let discv_port = find_random_udp_port(); info!("Starting test sender instance on port {}", port); + discv5_nodes.push("enr:-P-4QJI8tS1WTdIQxq_yIrD05oIIW1Xg-tm_qfP0CHfJGnp9dfr6ttQJmHwTNxGEl4Le8Q7YHcmi-kXTtphxFysS11oBgmlkgnY0gmlwhLymh5GKbXVsdGlhZGRyc7hgAC02KG5vZGUtMDEuZG8tYW1zMy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGdl8ALzYobm9kZS0wMS5kby1hbXMzLndha3V2Mi5wcm9kLnN0YXR1c2ltLm5ldAYfQN4DiXNlY3AyNTZrMaEDbl1X_zJIw3EAJGtmHMVn4Z2xhpSoUaP5ElsHKCv7hlWDdGNwgnZfg3VkcIIjKIV3YWt1Mg8".to_string()); + let node_config = WakuNodeConfig { host: IpAddr::from_str("127.0.0.1").ok(), port: Some(port.into()), advertise_addr: None, // Fill this for boot nodes node_key: None, keep_alive_interval: None, - relay: Some(false), // Default true - will receive all msg on relay + relay: Some(true), // Default true - will receive all msg on relay min_peers_to_publish: Some(0), // Default 0 - filter: Some(true), // Default falseĀ” + filter: Some(false), // Default false log_level: None, relay_topics: [].to_vec(), - discv5: Some(false), + discv5: Some(true), discv5_bootstrap_nodes: discv5_nodes, - discv5_udp_port: None, + discv5_udp_port: Some(discv_port), store: None, database_url: None, store_retention_max_messages: None, @@ -67,7 +70,7 @@ async fn start_sender(config: TestSenderConfig) { let wallet = build_wallet("baf5c93f0c8aee3b945f33b9192014e83d50cec25f727a13460f6ef1eb6a5844").unwrap(); - let pubsub_topic_str = "/waku/2/graphcast-v0-testnet/proto"; + let pubsub_topic_str = "/waku/2/graphcast-v0-mainnet/proto"; let pubsub_topic = WakuPubSubTopic::from_str(pubsub_topic_str).unwrap(); loop { for topic in config.topics.clone() { @@ -90,7 +93,7 @@ async fn start_sender(config: TestSenderConfig) { topic.clone(), config.poi.clone().unwrap(), nonce.unwrap_or(timestamp), - NetworkName::Goerli, + NetworkName::Mainnet, block_number.try_into().unwrap(), config.block_hash.clone().unwrap(), "0x7e6528e4ce3055e829a32b5dc4450072bac28bc6".to_string(), @@ -113,7 +116,7 @@ async fn start_sender(config: TestSenderConfig) { match graphcast_message.send_to_waku( &node_handle, - WakuPubSubTopic::from_str("/waku/2/graphcast-v0-testnet/proto").unwrap(), + WakuPubSubTopic::from_str("/waku/2/graphcast-v0-mainnet/proto").unwrap(), content_topic, ) { Ok(id) => { @@ -138,7 +141,7 @@ async fn start_sender(config: TestSenderConfig) { match graphcast_message.send_to_waku( &node_handle, - WakuPubSubTopic::from_str("/waku/2/graphcast-v0-testnet/proto").unwrap(), + WakuPubSubTopic::from_str("/waku/2/graphcast-v0-mainnet/proto").unwrap(), content_topic, ) { Ok(id) => { diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index ad73f1b..cb2f918 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -43,3 +43,4 @@ ethers-contract = "2.0.4" ethers-core = "2.0.4" ethers-derive-eip712 = "1.0.2" async-graphql = "4.0.16" +sqlx = { version = "0.7.2", features = ["sqlite", "runtime-tokio"] } diff --git a/test-utils/src/config.rs b/test-utils/src/config.rs index d2fbae5..99c9aef 100644 --- a/test-utils/src/config.rs +++ b/test-utils/src/config.rs @@ -6,6 +6,8 @@ use serde::{Deserialize, Serialize}; use subgraph_radio::config::{Config, CoverageLevel, GraphStack, RadioSetup, Waku}; use subgraph_radio::operator::notifier::NotificationMode; +use crate::find_random_udp_port; + #[derive(Clone, Debug, Parser, Serialize, Deserialize)] #[clap(name = "test-sender", about = "Mock message sender")] pub struct TestSenderConfig { @@ -49,14 +51,14 @@ pub fn test_config() -> Config { waku_addr: None, boot_node_addresses: vec![], waku_log_level: "fatal".to_string(), - discv5_enrs: None, - discv5_port: None, + discv5_enrs: Some(vec!["enr:-P-4QJI8tS1WTdIQxq_yIrD05oIIW1Xg-tm_qfP0CHfJGnp9dfr6ttQJmHwTNxGEl4Le8Q7YHcmi-kXTtphxFysS11oBgmlkgnY0gmlwhLymh5GKbXVsdGlhZGRyc7hgAC02KG5vZGUtMDEuZG8tYW1zMy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGdl8ALzYobm9kZS0wMS5kby1hbXMzLndha3V2Mi5wcm9kLnN0YXR1c2ltLm5ldAYfQN4DiXNlY3AyNTZrMaEDbl1X_zJIw3EAJGtmHMVn4Z2xhpSoUaP5ElsHKCv7hlWDdGNwgnZfg3VkcIIjKIV3YWt1Mg8".to_string()]), + discv5_port: Some(find_random_udp_port()), filter_protocol: None, } }, radio_setup: { RadioSetup { - graphcast_network: GraphcastNetworkName::Testnet, + graphcast_network: GraphcastNetworkName::Mainnet, topics: vec![], gossip_topic_coverage: CoverageLevel::OnChain, auto_upgrade_ratelimit: 60000, @@ -71,7 +73,6 @@ pub fn test_config() -> Config { metrics_port: None, server_host: String::new(), server_port: None, - persistence_file_path: None, log_format: LogFormat::Pretty, radio_name: String::new(), telegram_chat_id: None, @@ -81,6 +82,7 @@ pub fn test_config() -> Config { auto_upgrade_coverage: CoverageLevel::OnChain, notification_mode: NotificationMode::Live, notification_interval: 24, + sqlite_file_path: None, } }, config_file: None, diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index 961b746..7c84b2d 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -70,12 +70,12 @@ pub async fn setup( test_file_name: &str, test_sender_config: &mut TestSenderConfig, ) -> ProcessManager { - if let Some(file_path) = &config.radio_setup().persistence_file_path { - let path = Path::new(file_path); - if path.exists() { - fs::remove_file(path).expect("Failed to remove file"); - } - } + // if let Some(file_path) = &config.radio_setup().sqlite_file_path { + // let path = Path::new(file_path); + // if path.exists() { + // fs::remove_file(path).expect("Failed to remove db file"); + // } + // } let id = uuid::Uuid::new_v4().to_string(); let radio_name = format!("{}-{}", test_file_name, id); @@ -148,15 +148,15 @@ pub async fn setup( } } -pub fn teardown(process_manager: ProcessManager, store_path: &str) { +pub fn teardown(process_manager: ProcessManager, db_path: &str) { // Kill the processes for sender in &process_manager.senders { let _ = sender.lock().unwrap().kill(); } let _ = process_manager.radio.lock().unwrap().kill(); - if Path::new(&store_path).exists() { - fs::remove_file(store_path).unwrap(); + if Path::new(&db_path).exists() { + fs::remove_file(db_path).unwrap(); } } @@ -166,7 +166,7 @@ pub fn start_radio(config: &Config) -> Child { .arg("-p") .arg("subgraph-radio") .arg("--") - .arg("--graph-node-endpoint") + .arg("--graph-node-status-endpoint") .arg(&config.graph_stack().graph_node_status_endpoint) .arg("--private-key") .arg( @@ -223,11 +223,11 @@ pub fn start_radio(config: &Config) -> Child { .as_deref() .unwrap_or("None"), ) - .arg("--persistence-file-path") + .arg("--sqlite-file-path") .arg( config .radio_setup() - .persistence_file_path + .sqlite_file_path .as_deref() .unwrap_or("None"), )