From 82587bd207bf6176b71b740125c36455b94ce4a9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 8 Mar 2023 12:58:17 -0800 Subject: [PATCH 0001/2104] store: Treat 'no connection to the server' as db not available So far we treated the Postgres error "no connection to the server" as not indicating that the database was not available. It's a little murky what exactly that error indicates, but it seems to indicate that an existing connection got killed, e.g., because the database crashed. We now treat this as an indication that the database is not available, which will trigger the right action further up in the stack. For example, on indexing, db operations will be retried instead of causing the subgraph to fail. Queries will be aborted with an error message. --- store/postgres/src/connection_pool.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index 756db59bad0..6040ecc71e9 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -566,7 +566,6 @@ impl r2d2::HandleError for ErrorHandler { // in a locale other than English. In that case, their database will // be marked as unavailable even though it is perfectly fine. if msg.contains("canceling statement") - || msg.contains("no connection to the server") || msg.contains("terminating connection due to conflict with recovery") { return; From 2d55cabf8d7771af4932c4d49258e2a64fb6fe14 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 10 Mar 2023 14:05:56 -0800 Subject: [PATCH 0002/2104] runtime: Log additional info on handler failure --- runtime/wasm/src/module/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 4937bc62fcd..9d7151b7751 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -9,6 +9,7 @@ use std::time::Instant; use anyhow::anyhow; use anyhow::Error; +use graph::slog::SendSyncRefUnwindSafeKV; use never::Never; use semver::Version; use wasmtime::{Memory, Trap}; @@ -171,8 +172,9 @@ impl WasmInstance { { let handler_name = trigger.handler_name().to_owned(); let gas = self.gas.clone(); + let logging_extras = trigger.logging_extras().cheap_clone(); let asc_trigger = trigger.to_asc_ptr(self.instance_ctx_mut().deref_mut(), &gas)?; - self.invoke_handler(&handler_name, asc_trigger) + self.invoke_handler(&handler_name, asc_trigger, logging_extras) } pub fn take_ctx(&mut self) -> WasmInstanceContext { @@ -201,6 +203,7 @@ impl WasmInstance { &mut self, handler: &str, arg: AscPtr, + logging_extras: Arc, ) -> Result<(BlockState, Gas), MappingError> { let func = self .instance @@ -260,6 +263,7 @@ impl WasmInstance { "Handler skipped due to execution failure"; "handler" => handler, "error" => &message, + logging_extras ); let subgraph_error = SubgraphError { subgraph_id: self.instance_ctx().ctx.host_exports.subgraph_id.clone(), From b34116ebd3c2a542c57d01841579c0e20263a004 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 10 Mar 2023 15:28:38 -0800 Subject: [PATCH 0003/2104] store: Return a struct for the ManifestInfo --- store/postgres/src/deployment.rs | 55 +++++++++++++++----------- store/postgres/src/deployment_store.rs | 14 +++---- 2 files changed, 40 insertions(+), 29 deletions(-) diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index bf392d8d8d5..4d5aedaeed2 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -302,28 +302,39 @@ pub fn schema(conn: &PgConnection, site: &Site) -> Result<(Schema, bool), StoreE .map(|schema| (schema, use_bytea_prefix)) } -pub fn manifest_info( - conn: &PgConnection, - site: &Site, -) -> Result<(Schema, Option, Option, String), StoreError> { - use subgraph_manifest as sm; - let (s, description, repository, spec_version): ( - String, - Option, - Option, - String, - ) = sm::table - .select(( - sm::schema, - sm::description, - sm::repository, - sm::spec_version, - )) - .filter(sm::id.eq(site.id)) - .first(conn)?; - Schema::parse(s.as_str(), site.deployment.clone()) - .map_err(StoreError::Unknown) - .map(|schema| (schema, description, repository, spec_version)) +pub struct ManifestInfo { + pub input_schema: Schema, + pub description: Option, + pub repository: Option, + pub spec_version: String, +} + +impl ManifestInfo { + pub fn load(conn: &PgConnection, site: &Site) -> Result { + use subgraph_manifest as sm; + let (s, description, repository, spec_version): ( + String, + Option, + Option, + String, + ) = sm::table + .select(( + sm::schema, + sm::description, + sm::repository, + sm::spec_version, + )) + .filter(sm::id.eq(site.id)) + .first(conn)?; + let input_schema = Schema::parse(s.as_str(), site.deployment.clone())?; + + Ok(ManifestInfo { + input_schema, + description, + repository, + spec_version, + }) + } } #[allow(dead_code)] diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 29d103af2d2..7a9c8970a87 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -553,8 +553,7 @@ impl DeploymentStore { return Ok(info.clone()); } - let (input_schema, description, repository, spec_version) = - deployment::manifest_info(conn, site)?; + let manifest_info = deployment::ManifestInfo::load(conn, site)?; let graft_block = deployment::graft_point(conn, &site.deployment)?.map(|(_, ptr)| ptr.number); @@ -567,14 +566,15 @@ impl DeploymentStore { for version in VERSIONS.iter() { let api_version = ApiVersion::from_version(version).expect("Invalid API version"); - let mut schema = input_schema.clone(); + let mut schema = manifest_info.input_schema.clone(); schema.document = api_schema(&schema.document).map_err(|e| StoreError::Unknown(e.into()))?; schema.add_subgraph_id_directives(site.deployment.clone()); api.insert(api_version, Arc::new(ApiSchema::from_api_schema(schema)?)); } - let spec_version = Version::from_str(&spec_version).map_err(anyhow::Error::from)?; + let spec_version = + Version::from_str(&manifest_info.spec_version).map_err(anyhow::Error::from)?; let poi_version = if spec_version.ge(&SPEC_VERSION_0_0_6) { ProofOfIndexingVersion::Fast } else { @@ -582,12 +582,12 @@ impl DeploymentStore { }; let info = SubgraphInfo { - input: Arc::new(input_schema), + input: Arc::new(manifest_info.input_schema), api, graft_block, debug_fork, - description, - repository, + description: manifest_info.description, + repository: manifest_info.repository, poi_version, }; From c1861a5304391b7fc992752b2d1e70c489b7e615 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 10 Mar 2023 17:21:49 -0800 Subject: [PATCH 0004/2104] all: Store an 'instrument' flag for deployments Setting this flag will make subgraph processing log additional information to help with diagnosing subgraph failures. The flag is stored in subgraph_manifest.features by adding the string 'instrument' to the features. When the flag is set, all calls to `store_set`, `store_get` and `store_remove` are logged --- chain/substreams/src/trigger.rs | 1 + core/src/subgraph/context.rs | 4 ++++ core/src/subgraph/inputs.rs | 4 ++++ core/src/subgraph/instance_manager.rs | 2 ++ core/src/subgraph/runner.rs | 3 +++ core/src/subgraph/trigger_processor.rs | 2 ++ graph/src/components/store/traits.rs | 5 +++++ graph/src/components/subgraph/host.rs | 1 + graph/src/components/trigger_processor.rs | 1 + runtime/test/src/common.rs | 1 + runtime/wasm/src/host.rs | 4 ++++ runtime/wasm/src/mapping.rs | 3 +++ runtime/wasm/src/module/mod.rs | 26 ++++++++++++++++++----- store/postgres/src/deployment.rs | 11 +++++++++- store/postgres/src/deployment_store.rs | 2 ++ store/postgres/src/subgraph_store.rs | 8 +++++++ 16 files changed, 72 insertions(+), 6 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index f4a7729c78c..334c5db30df 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -172,6 +172,7 @@ where causality_region: &str, _debug_fork: &Option>, _subgraph_metrics: &Arc, + _instrument: bool, ) -> Result, MappingError> { for entity_change in block.changes.entity_changes.iter() { match entity_change.operation() { diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index 710c8d1e57f..c5cb800d63e 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -93,6 +93,7 @@ impl> IndexingContext { causality_region: &str, debug_fork: &Option>, subgraph_metrics: &Arc, + instrument: bool, ) -> Result, MappingError> { self.process_trigger_in_hosts( logger, @@ -104,6 +105,7 @@ impl> IndexingContext { causality_region, debug_fork, subgraph_metrics, + instrument, ) .await } @@ -119,6 +121,7 @@ impl> IndexingContext { causality_region: &str, debug_fork: &Option>, subgraph_metrics: &Arc, + instrument: bool, ) -> Result, MappingError> { self.trigger_processor .process_trigger( @@ -131,6 +134,7 @@ impl> IndexingContext { causality_region, debug_fork, subgraph_metrics, + instrument, ) .await } diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 191dc69cbf4..11b35352f85 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -28,4 +28,8 @@ pub struct IndexingInputs { // Correspondence between data source or template position in the manifest and name. pub manifest_idx_and_name: Vec<(u32, String)>, + + /// Whether to instrument trigger processing and log additional, + /// possibly expensive and noisy, information + pub instrument: bool, } diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 6ac471dabb1..08ca909d9df 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -403,6 +403,7 @@ impl SubgraphInstanceManager { let causality_region_seq = CausalityRegionSeq::from_current(store.causality_region_curr_val().await?); + let instrument = self.subgraph_store.instrument(&deployment)?; let instance = super::context::instance::SubgraphInstance::from_manifest( &logger, manifest, @@ -427,6 +428,7 @@ impl SubgraphInstanceManager { manifest_idx_and_name, poi_version, network, + instrument, }; // The subgraph state tracks the state of the subgraph instance over time diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 521e68efe0c..479d0f0b1bd 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -325,6 +325,7 @@ where &causality_region, &self.inputs.debug_fork, &self.metrics.subgraph, + self.inputs.instrument, ) .await .map_err(|e| { @@ -504,6 +505,7 @@ where causality_region, &self.inputs.debug_fork, &self.metrics.subgraph, + self.inputs.instrument, ) .await .map_err(move |mut e| { @@ -666,6 +668,7 @@ where causality_region, &self.inputs.debug_fork, &self.metrics.subgraph, + self.inputs.instrument, ) .await .map_err(move |err| { diff --git a/core/src/subgraph/trigger_processor.rs b/core/src/subgraph/trigger_processor.rs index 2eeb8275500..02002a32c88 100644 --- a/core/src/subgraph/trigger_processor.rs +++ b/core/src/subgraph/trigger_processor.rs @@ -30,6 +30,7 @@ where causality_region: &str, debug_fork: &Option>, subgraph_metrics: &Arc, + instrument: bool, ) -> Result, MappingError> { let error_count = state.deterministic_errors.len(); @@ -71,6 +72,7 @@ where state, proof_of_indexing.cheap_clone(), debug_fork, + instrument, ) .await?; let elapsed = start.elapsed().as_secs_f64(); diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 31c24550fbc..04bc36aa1e6 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -169,6 +169,11 @@ pub trait SubgraphStore: Send + Sync + 'static { hash: &DeploymentHash, raw_yaml: String, ) -> Result<(), StoreError>; + + /// Return `true` if the `instrument` flag for the deployment is set. + /// When this flag is set, indexing of the deployment should log + /// additional diagnostic information + fn instrument(&self, deployment: &DeploymentLocator) -> Result; } pub trait ReadStore: Send + Sync + 'static { diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index 72af7800c66..7152e1705af 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -63,6 +63,7 @@ pub trait RuntimeHost: Send + Sync + 'static { state: BlockState, proof_of_indexing: SharedProofOfIndexing, debug_fork: &Option>, + instrument: bool, ) -> Result, MappingError>; /// Block number in which this host was created. diff --git a/graph/src/components/trigger_processor.rs b/graph/src/components/trigger_processor.rs index ce02a212a6c..ce45caaac59 100644 --- a/graph/src/components/trigger_processor.rs +++ b/graph/src/components/trigger_processor.rs @@ -27,5 +27,6 @@ where causality_region: &str, debug_fork: &Option>, subgraph_metrics: &Arc, + instrument: bool, ) -> Result, MappingError>; } diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index d6cf565b31b..16d95a95a1d 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -116,6 +116,7 @@ pub fn mock_context( host_fns: Arc::new(Vec::new()), debug_fork: None, mapping_logger: Logger::root(slog::Discard, o!()), + instrument: false, } } diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index 8055765084b..cfacd7b8a6c 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -159,6 +159,7 @@ where block_ptr: BlockPtr, proof_of_indexing: SharedProofOfIndexing, debug_fork: &Option>, + instrument: bool, ) -> Result, MappingError> { let handler = trigger.handler_name().to_string(); @@ -186,6 +187,7 @@ where host_fns: self.host_fns.cheap_clone(), debug_fork: debug_fork.cheap_clone(), mapping_logger: Logger::new(&logger, o!("component" => "UserMapping")), + instrument, }, trigger, result_sender, @@ -240,6 +242,7 @@ impl RuntimeHostTrait for RuntimeHost { state: BlockState, proof_of_indexing: SharedProofOfIndexing, debug_fork: &Option>, + instrument: bool, ) -> Result, MappingError> { self.send_mapping_request( logger, @@ -248,6 +251,7 @@ impl RuntimeHostTrait for RuntimeHost { block_ptr, proof_of_indexing, debug_fork, + instrument, ) .await } diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index 8a47420191c..a006030343a 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -126,6 +126,8 @@ pub struct MappingContext { pub debug_fork: Option>, /// Logger for messages coming from mappings pub mapping_logger: Logger, + /// Whether to log details about host fn execution + pub instrument: bool, } impl MappingContext { @@ -139,6 +141,7 @@ impl MappingContext { host_fns: self.host_fns.cheap_clone(), debug_fork: self.debug_fork.cheap_clone(), mapping_logger: Logger::new(&self.logger, o!("component" => "UserMapping")), + instrument: self.instrument, } } } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 9d7151b7751..5996773784f 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -925,10 +925,16 @@ impl WasmInstanceContext { let stopwatch = &self.host_metrics.stopwatch; stopwatch.start_section("host_export_store_set__wasm_instance_context_store_set"); - let entity = asc_get(self, entity_ptr, gas)?; - let id = asc_get(self, id_ptr, gas)?; + let entity: String = asc_get(self, entity_ptr, gas)?; + let id: String = asc_get(self, id_ptr, gas)?; let data = asc_get(self, data_ptr, gas)?; + if self.ctx.instrument { + debug!(self.ctx.logger, "store_set"; + "type" => &entity, + "id" => &id); + } + self.ctx.host_exports.store_set( &self.ctx.logger, &mut self.ctx.state, @@ -950,8 +956,13 @@ impl WasmInstanceContext { entity_ptr: AscPtr, id_ptr: AscPtr, ) -> Result<(), HostExportError> { - let entity = asc_get(self, entity_ptr, gas)?; - let id = asc_get(self, id_ptr, gas)?; + let entity: String = asc_get(self, entity_ptr, gas)?; + let id: String = asc_get(self, id_ptr, gas)?; + if self.ctx.instrument { + debug!(self.ctx.logger, "store_remove"; + "type" => &entity, + "id" => &id); + } self.ctx.host_exports.store_remove( &self.ctx.logger, &mut self.ctx.state, @@ -982,7 +993,12 @@ impl WasmInstanceContext { id.clone(), gas, )?; - + if self.ctx.instrument { + debug!(self.ctx.logger, "store_get"; + "type" => &entity_type, + "id" => &id, + "found" => entity_option.is_some()); + } let ret = match entity_option { Some(entity) => { let _section = self diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 4d5aedaeed2..1ee969d85b3 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -307,32 +307,41 @@ pub struct ManifestInfo { pub description: Option, pub repository: Option, pub spec_version: String, + pub instrument: bool, } impl ManifestInfo { pub fn load(conn: &PgConnection, site: &Site) -> Result { use subgraph_manifest as sm; - let (s, description, repository, spec_version): ( + let (s, description, repository, spec_version, features): ( String, Option, Option, String, + Vec, ) = sm::table .select(( sm::schema, sm::description, sm::repository, sm::spec_version, + sm::features, )) .filter(sm::id.eq(site.id)) .first(conn)?; let input_schema = Schema::parse(s.as_str(), site.deployment.clone())?; + // Using the features field to store the instrument flag is a bit + // backhanded, but since this will be used very rarely, should not + // cause any headaches + let instrument = features.iter().any(|s| s == "instrument"); + Ok(ManifestInfo { input_schema, description, repository, spec_version, + instrument, }) } } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 7a9c8970a87..782b81831ae 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -80,6 +80,7 @@ pub(crate) struct SubgraphInfo { pub(crate) description: Option, pub(crate) repository: Option, pub(crate) poi_version: ProofOfIndexingVersion, + pub(crate) instrument: bool, } pub struct StoreInner { @@ -589,6 +590,7 @@ impl DeploymentStore { description: manifest_info.description, repository: manifest_info.repository, poi_version, + instrument: manifest_info.instrument, }; // Insert the schema into the cache. diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 9ed0ca09c12..f1c635b57fb 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1416,4 +1416,12 @@ impl SubgraphStoreTrait for SubgraphStore { let (store, site) = self.store(hash)?; store.set_manifest_raw_yaml(site, raw_yaml).await } + + fn instrument(&self, deployment: &DeploymentLocator) -> Result { + let site = self.find_site(deployment.id.into())?; + let store = self.for_site(&site)?; + + let info = store.subgraph_info(&site)?; + Ok(info.instrument) + } } From 5da48f4bfbd49933d93282b90bb0518153a620b2 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Mon, 6 Mar 2023 23:03:31 +0000 Subject: [PATCH 0005/2104] add codespace for rust with database and ipfs --- .devcontainer/Dockerfile | 4 +++ .devcontainer/devcontainer.json | 54 ++++++++++++++++++++++++++++++++ .devcontainer/docker-compose.yml | 27 ++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .devcontainer/docker-compose.yml diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000000..18915297304 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,4 @@ +FROM mcr.microsoft.com/devcontainers/rust:0-1-bullseye + +RUN rustup install 1.66.0-x86_64-unknown-linux-gnu +RUN apt update && apt install -y protobuf-compiler \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..ed6e0d2fb5d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,54 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/rust +{ + "name": "Rust", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "dockerComposeFile": "docker-compose.yml", + "service": "devcontainer", + "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}", + "features": { + "ghcr.io/devcontainers/features/rust:1": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "rust-lang.rust-analyzer", // rust analyser + "cschleiden.vscode-github-actions", // github actions + "serayuzgur.crates", // crates + "vadimcn.vscode-lldb" //debug + ], + "settings": { + "editor.formatOnSave": true, + "terminal.integrated.defaultProfile.linux": "zsh" + } + } + }, + + // Use 'mounts' to make the cargo cache persistent in a Docker Volume. + // "mounts": [ + // { + // "source": "devcontainer-cargo-cache-${devcontainerId}", + // "target": "/usr/local/cargo", + // "type": "volume" + // } + // ] + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + "forwardPorts": [ + 8000, // GraphiQL on node-port + 8020, // create and deploy subgraphs + 5001 //ipfs + ] + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "rustc --version", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 00000000000..4c1f8809935 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,27 @@ +version: '3' + +services: + devcontainer: + build: + context: . + dockerfile: Dockerfile + volumes: + - ../..:/workspaces:cached + network_mode: service:database + command: sleep infinity + ipfs: + image: ipfs/kubo:v0.18.1 + restart: unless-stopped + network_mode: service:database + database: + image: postgres:latest + restart: unless-stopped + volumes: + - postgres-data:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + POSTGRES_DB: postgres + +volumes: + postgres-data: \ No newline at end of file From d5dfe24917ee1f4d35d4869811389a5c8b36151e Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Mon, 6 Mar 2023 23:40:48 +0000 Subject: [PATCH 0006/2104] add missing locale for postgres --- .devcontainer/docker-compose.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 4c1f8809935..128a396321d 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -16,12 +16,19 @@ services: database: image: postgres:latest restart: unless-stopped + command: + [ + "postgres", + "-cshared_preload_libraries=pg_stat_statements" + ] volumes: - postgres-data:/var/lib/postgresql/data environment: - POSTGRES_PASSWORD: postgres - POSTGRES_USER: postgres - POSTGRES_DB: postgres + POSTGRES_USER: graph-node + POSTGRES_PASSWORD: let-me-in + POSTGRES_DB: graph-node + + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" volumes: postgres-data: \ No newline at end of file From b8de8ff31e4073e1e5737ed1cde5b0831a012420 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Fri, 10 Mar 2023 17:42:39 -0300 Subject: [PATCH 0007/2104] docker: add rustfmt component --- .devcontainer/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 18915297304..a5200db5f71 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,5 @@ FROM mcr.microsoft.com/devcontainers/rust:0-1-bullseye RUN rustup install 1.66.0-x86_64-unknown-linux-gnu -RUN apt update && apt install -y protobuf-compiler \ No newline at end of file +RUN rustup component add rustfmt-preview +RUN apt update && apt install -y protobuf-compiler \ No newline at end of file From 4fa59a6c5f0c4cbe85da486647d97cce00f174ec Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Fri, 10 Mar 2023 22:57:15 +0000 Subject: [PATCH 0008/2104] codespaces: set rust version to 1.66.0 --- .devcontainer/Dockerfile | 5 ----- .devcontainer/devcontainer.json | 20 ++++---------------- .devcontainer/docker-compose.yml | 4 +--- 3 files changed, 5 insertions(+), 24 deletions(-) delete mode 100644 .devcontainer/Dockerfile diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index a5200db5f71..00000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM mcr.microsoft.com/devcontainers/rust:0-1-bullseye - -RUN rustup install 1.66.0-x86_64-unknown-linux-gnu -RUN rustup component add rustfmt-preview -RUN apt update && apt install -y protobuf-compiler \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ed6e0d2fb5d..4bd1bc06468 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,17 +2,18 @@ // README at: https://github.com/devcontainers/templates/tree/main/src/rust { "name": "Rust", - // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "dockerComposeFile": "docker-compose.yml", "service": "devcontainer", "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}", "features": { - "ghcr.io/devcontainers/features/rust:1": {} + "ghcr.io/devcontainers/features/rust:1": { + "version": "1.66.0" + } }, "customizations": { "vscode": { "extensions": [ - "rust-lang.rust-analyzer", // rust analyser + "rust-lang.rust-analyzer@prerelease", // rust analyser, pre-release has less bugs "cschleiden.vscode-github-actions", // github actions "serayuzgur.crates", // crates "vadimcn.vscode-lldb" //debug @@ -32,23 +33,10 @@ // "type": "volume" // } // ] - - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - - // Use 'forwardPorts' to make a list of ports inside the container available locally. "forwardPorts": [ 8000, // GraphiQL on node-port 8020, // create and deploy subgraphs 5001 //ipfs ] - // Use 'postCreateCommand' to run commands after the container is created. - // "postCreateCommand": "rustc --version", - - // Configure tool-specific properties. - // "customizations": {}, - - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "root" } diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 128a396321d..d26201cc800 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -2,9 +2,7 @@ version: '3' services: devcontainer: - build: - context: . - dockerfile: Dockerfile + image: mcr.microsoft.com/vscode/devcontainers/rust:bullseye volumes: - ../..:/workspaces:cached network_mode: service:database From 62d93f57ce3c844e6dbc9eed46ab5830b9633861 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 1 Mar 2023 23:30:26 -0300 Subject: [PATCH 0009/2104] graph, graphql, store: add filtering with fulltext search --- graph/src/components/store/mod.rs | 3 ++- graphql/src/schema/api.rs | 5 ++++ graphql/src/store/query.rs | 33 +++++++++++++++++------- store/postgres/src/relational_queries.rs | 3 ++- 4 files changed, 33 insertions(+), 11 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 68aac1eaa85..1b9a7c8e06e 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -184,6 +184,7 @@ pub enum EntityFilter { NotEndsWithNoCase(Attribute, Value), ChangeBlockGte(BlockNumber), Child(Child), + Fulltext(Attribute, Value), } // A somewhat concise string representation of a filter @@ -198,7 +199,7 @@ impl fmt::Display for EntityFilter { Or(fs) => { write!(f, "{}", fs.iter().map(|f| f.to_string()).join(" or ")) } - Equal(a, v) => write!(f, "{a} = {v}"), + Equal(a, v) | Fulltext(a, v) => write!(f, "{a} = {v}"), Not(a, v) => write!(f, "{a} != {v}"), GreaterThan(a, v) => write!(f, "{a} > {v}"), LessThan(a, v) => write!(f, "{a} < {v}"), diff --git a/graphql/src/schema/api.rs b/graphql/src/schema/api.rs index dad30d997ae..fc9186e2d6d 100644 --- a/graphql/src/schema/api.rs +++ b/graphql/src/schema/api.rs @@ -592,6 +592,11 @@ fn query_field_for_fulltext(fulltext: &Directive) -> Option { }, // block: BlockHeight block_argument(), + input_value( + "where", + "", + Type::NamedType(format!("{}_filter", entity_name)), + ), ]; arguments.push(subgraph_error_argument()); diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 578b971d76a..67be06dfd1e 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -174,18 +174,33 @@ fn build_filter( field: &a::Field, schema: &ApiSchema, ) -> Result, QueryExecutionError> { - match field.argument_value("where") { + let where_filter = match field.argument_value("where") { Some(r::Value::Object(object)) => match build_filter_from_object(entity, object, schema) { - Ok(filter) => Ok(Some(EntityFilter::And(filter))), + Ok(filter) => Ok(Some(filter)), Err(e) => Err(e), }, - Some(r::Value::Null) => Ok(None), - None => match field.argument_value("text") { - Some(r::Value::Object(filter)) => build_fulltext_filter_from_object(filter), - None => Ok(None), - _ => Err(QueryExecutionError::InvalidFilterError), - }, + Some(r::Value::Null) | None => Ok(None), + _ => Err(QueryExecutionError::InvalidFilterError), + }; + + + let text_filter = match field.argument_value("text") { + Some(r::Value::Object(filter)) => build_fulltext_filter_from_object(filter), + None => Ok(None), _ => Err(QueryExecutionError::InvalidFilterError), + }; + let mut entity_filter: Vec = vec![]; + if let Some(filter) = text_filter? { + entity_filter.push(filter); + } + if let Some(filter) = where_filter? { + entity_filter.extend(filter); + } + + match entity_filter.len() { + 0 => Ok(None), + 1 => Ok(entity_filter.pop()), + _ => Ok(Some(EntityFilter::And(entity_filter))) } } @@ -196,7 +211,7 @@ fn build_fulltext_filter_from_object( Err(QueryExecutionError::FulltextQueryRequiresFilter), |(key, value)| { if let r::Value::String(s) = value { - Ok(Some(EntityFilter::Equal( + Ok(Some(EntityFilter::Fulltext( key.to_string(), Value::String(s.clone()), ))) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index c267ce5defc..a6cd610954a 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -954,6 +954,7 @@ impl<'a> QueryFilter<'a> { | NotContains(attr, _) | NotContainsNoCase(attr, _) | Equal(attr, _) + | Fulltext(attr, _) | Not(attr, _) | GreaterThan(attr, _) | LessThan(attr, _) @@ -1404,7 +1405,7 @@ impl<'a> QueryFragment for QueryFilter<'a> { NotContains(attr, value) => self.contains(attr, value, true, true, out)?, NotContainsNoCase(attr, value) => self.contains(attr, value, true, false, out)?, - Equal(attr, value) => self.equals(attr, value, c::Equal, out)?, + Equal(attr, value) | Fulltext(attr, value) => self.equals(attr, value, c::Equal, out)?, Not(attr, value) => self.equals(attr, value, c::NotEqual, out)?, GreaterThan(attr, value) => self.compare(attr, value, c::Greater, out)?, From 71d39f8c9ba2bbeb393252e27e484b13a9e41dd2 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 14:19:07 -0300 Subject: [PATCH 0010/2104] graphql: cargo format --- graphql/src/store/query.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 67be06dfd1e..3cc2f24dd95 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -183,7 +183,6 @@ fn build_filter( _ => Err(QueryExecutionError::InvalidFilterError), }; - let text_filter = match field.argument_value("text") { Some(r::Value::Object(filter)) => build_fulltext_filter_from_object(filter), None => Ok(None), @@ -200,7 +199,7 @@ fn build_filter( match entity_filter.len() { 0 => Ok(None), 1 => Ok(entity_filter.pop()), - _ => Ok(Some(EntityFilter::And(entity_filter))) + _ => Ok(Some(EntityFilter::And(entity_filter))), } } From 8e5e6ce27cb0ff909478549b1be00d7756c5fc65 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 14:59:15 -0300 Subject: [PATCH 0011/2104] graphql: fix test --- graphql/src/store/query.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 3cc2f24dd95..ce403785ab4 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -198,7 +198,6 @@ fn build_filter( match entity_filter.len() { 0 => Ok(None), - 1 => Ok(entity_filter.pop()), _ => Ok(Some(EntityFilter::And(entity_filter))), } } From c20cb3cc48dfcb1b2d75084c22e7124cf05646a8 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Thu, 9 Mar 2023 16:51:31 -0300 Subject: [PATCH 0012/2104] graphql: use match for where and text filters --- graphql/src/store/query.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index ce403785ab4..9913a0b13b4 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -176,29 +176,23 @@ fn build_filter( ) -> Result, QueryExecutionError> { let where_filter = match field.argument_value("where") { Some(r::Value::Object(object)) => match build_filter_from_object(entity, object, schema) { - Ok(filter) => Ok(Some(filter)), + Ok(filter) => Ok(Some(EntityFilter::And(filter))), Err(e) => Err(e), }, Some(r::Value::Null) | None => Ok(None), _ => Err(QueryExecutionError::InvalidFilterError), - }; + }?; let text_filter = match field.argument_value("text") { Some(r::Value::Object(filter)) => build_fulltext_filter_from_object(filter), None => Ok(None), _ => Err(QueryExecutionError::InvalidFilterError), - }; - let mut entity_filter: Vec = vec![]; - if let Some(filter) = text_filter? { - entity_filter.push(filter); - } - if let Some(filter) = where_filter? { - entity_filter.extend(filter); - } + }?; - match entity_filter.len() { - 0 => Ok(None), - _ => Ok(Some(EntityFilter::And(entity_filter))), + match (where_filter, text_filter) { + (None, None) => Ok(None), + (Some(f), None) | (None, Some(f)) => Ok(Some(f)), + (Some(w), Some(t)) => Ok(Some(EntityFilter::And(vec![t, w]))), } } From bf3551351d1dfaf60ad39772c806f7215e17d163 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Thu, 9 Mar 2023 21:18:15 -0300 Subject: [PATCH 0013/2104] store: fix sort key when full text --- store/postgres/src/relational_queries.rs | 34 +++++++++++++++++------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index a6cd610954a..bb60880935c 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1405,7 +1405,9 @@ impl<'a> QueryFragment for QueryFilter<'a> { NotContains(attr, value) => self.contains(attr, value, true, true, out)?, NotContainsNoCase(attr, value) => self.contains(attr, value, true, false, out)?, - Equal(attr, value) | Fulltext(attr, value) => self.equals(attr, value, c::Equal, out)?, + Equal(attr, value) | Fulltext(attr, value) => { + self.equals(attr, value, c::Equal, out)? + } Not(attr, value) => self.equals(attr, value, c::NotEqual, out)?, GreaterThan(attr, value) => self.compare(attr, value, c::Greater, out)?, @@ -2785,6 +2787,20 @@ impl<'a> SortKey<'a> { block: BlockNumber, layout: &'a Layout, ) -> Result { + fn sort_key_from_value<'a>( + column: &'a Column, + value: &'a Value, + direction: &'static str, + ) -> Result, QueryExecutionError> { + let sort_value = value.as_str(); + + Ok(SortKey::Key { + column, + value: sort_value, + direction, + }) + } + fn with_key<'a>( table: &'a Table, attribute: String, @@ -2795,15 +2811,15 @@ impl<'a> SortKey<'a> { let column = table.column_for_field(&attribute)?; if column.is_fulltext() { match filter { - Some(EntityFilter::Equal(_, value)) => { - let sort_value = value.as_str(); - - Ok(SortKey::Key { - column, - value: sort_value, - direction, - }) + Some(EntityFilter::Fulltext(_, value)) => { + sort_key_from_value(column, value, direction) } + Some(EntityFilter::And(vec)) => match vec.first() { + Some(EntityFilter::Fulltext(_, value)) => { + sort_key_from_value(column, value, direction) + } + _ => unreachable!(), + }, _ => unreachable!(), } } else if column.is_primary_key() { From 403da79304cae3666c84cbcc743f83f699521744 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Thu, 9 Mar 2023 21:19:18 -0300 Subject: [PATCH 0014/2104] graphql: add test for full text search --- graphql/tests/query.rs | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/graphql/tests/query.rs b/graphql/tests/query.rs index 967a494992b..2ebbbceebb1 100644 --- a/graphql/tests/query.rs +++ b/graphql/tests/query.rs @@ -152,6 +152,22 @@ async fn setup( fn test_schema(id: DeploymentHash, id_type: IdType) -> Schema { const SCHEMA: &str = " + + type _Schema_ + @fulltext( + name: \"bandReviewSearch\" + language: en + algorithm: proximityRank + include: [ + { + entity: \"BandReview\" + fields: [ + { name: \"body\" } + ] + } + ] + ) + type Musician @entity { id: ID! name: String! @@ -687,6 +703,30 @@ fn can_query_many_to_many_relationship() { }) } +#[test] +fn can_query_with_fulltext_search() { + const QUERY: &str = " + query { + bandReviewSearch(text: \"musicians\") { + id + body + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + bandReviewSearch: vec![ + object! { id: "r1", body: "Bad musicians", author: object! { name: "Baden" } }, + object! { id: "r5", body: "Very Bad musicians", author: object! { name: "Anonymous 3" } }, + ] + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} #[test] fn can_query_with_sorting_by_child_entity() { const QUERY: &str = " From ffb267acc846c76a5f130bfb6445cdbd990934f2 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Thu, 9 Mar 2023 21:19:43 -0300 Subject: [PATCH 0015/2104] graphql: add test for full text with filter --- graphql/tests/query.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/graphql/tests/query.rs b/graphql/tests/query.rs index 2ebbbceebb1..af3f871fd6a 100644 --- a/graphql/tests/query.rs +++ b/graphql/tests/query.rs @@ -727,6 +727,31 @@ fn can_query_with_fulltext_search() { assert_eq!(data, exp); }) } + +#[test] +fn can_query_with_fulltext_search_filter() { + const QUERY: &str = " + query { + bandReviewSearch(text: \"musicians\", where: { author_: { name: \"Anonymous 3\" } }) { + id + body + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + bandReviewSearch: vec![ + object! { id: "r5", body: "Very Bad musicians", author: object! { name: "Anonymous 3" } }, + ] + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + #[test] fn can_query_with_sorting_by_child_entity() { const QUERY: &str = " From 2224c17f4d5cb3d6cbb0042afa9850945ac7b3ad Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 14 Mar 2023 16:13:25 -0700 Subject: [PATCH 0016/2104] graph: Remove schema composition --- graph/src/data/schema.rs | 448 +-------------------------------- graph/src/data/subgraph/mod.rs | 14 +- 2 files changed, 12 insertions(+), 450 deletions(-) diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 91d5afe9b23..9e794bad793 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,9 +1,9 @@ use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, EntityType, SubgraphStore}; +use crate::components::store::{EntityKey, EntityType}; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::{self, ValueType}; -use crate::data::subgraph::{DeploymentHash, SubgraphName}; +use crate::data::subgraph::DeploymentHash; use crate::prelude::{ anyhow, lazy_static, q::Value, @@ -20,7 +20,6 @@ use thiserror::Error; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryFrom; use std::fmt; -use std::hash::Hash; use std::iter::FromIterator; use std::str::FromStr; use std::sync::Arc; @@ -64,21 +63,11 @@ pub enum SchemaValidationError { InvalidDerivedFrom(String, String, String), // (type, field, reason) #[error("The following type names are reserved: `{0}`")] UsageOfReservedTypes(Strings), - #[error("_Schema_ type is only for @imports and must not have any fields")] + #[error("_Schema_ type is only for @fulltext and must not have any fields")] SchemaTypeWithFields, - #[error("Imported subgraph name `{0}` is invalid")] - ImportedSubgraphNameInvalid(String), - #[error("Imported subgraph id `{0}` is invalid")] - ImportedSubgraphIdInvalid(String), - #[error("The _Schema_ type only allows @import directives")] + #[error("The _Schema_ type only allows @fulltext directives")] InvalidSchemaTypeDirectives, - #[error( - r#"@import directives must have the form \ -@import(types: ["A", {{ name: "B", as: "C"}}], from: {{ name: "org/subgraph"}}) or \ -@import(types: ["A", {{ name: "B", as: "C"}}], from: {{ id: "Qm..."}})"# - )] - ImportDirectiveInvalid, - #[error("Type `{0}`, field `{1}`: type `{2}` is neither defined nor imported")] + #[error("Type `{0}`, field `{1}`: type `{2}` is not defined")] FieldTypeUnknown(String, String, String), // (type_name, field_name, field_type) #[error("Imported type `{0}` does not exist in the `{1}` schema")] ImportedTypeUndefined(String, String), // (type_name, schema) @@ -263,99 +252,6 @@ impl From<&s::Directive> for FulltextDefinition { } } } -#[derive(Debug, Error, PartialEq, Eq, Clone)] -pub enum SchemaImportError { - #[error("Schema for imported subgraph `{0}` was not found")] - ImportedSchemaNotFound(SchemaReference), - #[error("Subgraph for imported schema `{0}` is not deployed")] - ImportedSubgraphNotFound(SchemaReference), -} - -/// The representation of a single type from an import statement. This -/// corresponds either to a string `"Thing"` or an object -/// `{name: "Thing", as: "Stuff"}`. The first form is equivalent to -/// `{name: "Thing", as: "Thing"}` -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ImportedType { - /// The 'name' - name: String, - /// The 'as' alias or a copy of `name` if the user did not specify an alias - alias: String, - /// Whether the alias was explicitly given or is just a copy of the name - explicit: bool, -} - -impl fmt::Display for ImportedType { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - if self.explicit { - write!(f, "name: {}, as: {}", self.name, self.alias) - } else { - write!(f, "{}", self.name) - } - } -} - -impl ImportedType { - fn parse(type_import: &Value) -> Option { - match type_import { - Value::String(type_name) => Some(ImportedType { - name: type_name.to_string(), - alias: type_name.to_string(), - explicit: false, - }), - Value::Object(type_name_as) => { - match (type_name_as.get("name"), type_name_as.get("as")) { - (Some(name), Some(az)) => Some(ImportedType { - name: name.to_string(), - alias: az.to_string(), - explicit: true, - }), - _ => None, - } - } - _ => None, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct SchemaReference { - subgraph: DeploymentHash, -} - -impl fmt::Display for SchemaReference { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "{}", self.subgraph) - } -} - -impl SchemaReference { - fn new(subgraph: DeploymentHash) -> Self { - SchemaReference { subgraph } - } - - pub fn resolve( - &self, - store: Arc, - ) -> Result, SchemaImportError> { - store - .input_schema(&self.subgraph) - .map_err(|_| SchemaImportError::ImportedSchemaNotFound(self.clone())) - } - - fn parse(value: &Value) -> Option { - match value { - Value::Object(map) => match map.get("id") { - Some(Value::String(id)) => match DeploymentHash::new(id) { - Ok(id) => Some(SchemaReference::new(id)), - _ => None, - }, - _ => None, - }, - _ => None, - } - } -} #[derive(Debug)] pub struct ApiSchema { @@ -647,50 +543,6 @@ impl Schema { self.immutable_types.contains(entity_type) } - pub fn resolve_schema_references( - &self, - store: Arc, - ) -> ( - HashMap>, - Vec, - ) { - let mut schemas = HashMap::new(); - let mut visit_log = HashSet::new(); - let import_errors = self.resolve_import_graph(store, &mut schemas, &mut visit_log); - (schemas, import_errors) - } - - fn resolve_import_graph( - &self, - store: Arc, - schemas: &mut HashMap>, - visit_log: &mut HashSet, - ) -> Vec { - // Use the visit log to detect cycles in the import graph - self.imported_schemas() - .into_iter() - .fold(vec![], |mut errors, schema_ref| { - match schema_ref.resolve(store.clone()) { - Ok(schema) => { - schemas.insert(schema_ref, schema.clone()); - // If this node in the graph has already been visited stop traversing - if !visit_log.contains(&schema.id) { - visit_log.insert(schema.id.clone()); - errors.extend(schema.resolve_import_graph( - store.clone(), - schemas, - visit_log, - )); - } - } - Err(err) => { - errors.push(err); - } - } - errors - }) - } - fn collect_interfaces( document: &s::Document, ) -> Result< @@ -760,48 +612,6 @@ impl Schema { Schema::new(id, document).map_err(Into::into) } - fn imported_types(&self) -> HashMap { - fn parse_types(import: &Directive) -> Vec { - import - .argument("types") - .map_or(vec![], |value| match value { - Value::List(types) => types.iter().filter_map(ImportedType::parse).collect(), - _ => vec![], - }) - } - - self.subgraph_schema_object_type() - .map_or(HashMap::new(), |object| { - object - .directives - .iter() - .filter(|directive| directive.name.eq("import")) - .flat_map(|import| { - import.argument("from").map_or(vec![], |from| { - SchemaReference::parse(from).map_or(vec![], |schema_ref| { - parse_types(import) - .into_iter() - .map(|imported_type| (imported_type, schema_ref.clone())) - .collect() - }) - }) - }) - .collect::>() - }) - } - - pub fn imported_schemas(&self) -> Vec { - self.subgraph_schema_object_type().map_or(vec![], |object| { - object - .directives - .iter() - .filter(|directive| directive.name.eq("import")) - .filter_map(|directive| directive.argument("from")) - .filter_map(SchemaReference::parse) - .collect() - }) - } - pub fn name_argument_value_from_directive(directive: &Directive) -> Value { directive .argument("name") @@ -861,10 +671,7 @@ impl Schema { } } - pub fn validate( - &self, - schemas: &HashMap>, - ) -> Result<(), Vec> { + pub fn validate(&self) -> Result<(), Vec> { let mut errors: Vec = [ self.validate_schema_types(), self.validate_derived_from(), @@ -880,9 +687,7 @@ impl Schema { .collect(); errors.append(&mut self.validate_fields()); - errors.append(&mut self.validate_import_directives()); errors.append(&mut self.validate_fulltext_directives()); - errors.append(&mut self.validate_imported_types(schemas)); if errors.is_empty() { Ok(()) @@ -913,9 +718,7 @@ impl Schema { if subgraph_schema_type .directives .iter() - .filter(|directive| { - !directive.name.eq("import") && !directive.name.eq("fulltext") - }) + .filter(|directive| !directive.name.eq("fulltext")) .next() .is_some() { @@ -929,80 +732,6 @@ impl Schema { } } - /// Check the syntax of a single `@import` directive - fn validate_import_directive_arguments(import: &Directive) -> Option { - fn validate_import_type(typ: &Value) -> Result<(), ()> { - match typ { - Value::String(_) => Ok(()), - Value::Object(typ) => match (typ.get("name"), typ.get("as")) { - (Some(Value::String(_)), Some(Value::String(_))) => Ok(()), - _ => Err(()), - }, - _ => Err(()), - } - } - - fn types_are_valid(types: Option<&Value>) -> bool { - // All of the elements in the `types` field are valid: either - // a string or an object with keys `name` and `as` which are strings - if let Some(Value::List(types)) = types { - types - .iter() - .try_for_each(validate_import_type) - .err() - .is_none() - } else { - false - } - } - - fn from_is_valid(from: Option<&Value>) -> bool { - if let Some(Value::Object(from)) = from { - let has_id = matches!(from.get("id"), Some(Value::String(_))); - - let has_name = matches!(from.get("name"), Some(Value::String(_))); - has_id ^ has_name - } else { - false - } - } - - if from_is_valid(import.argument("from")) && types_are_valid(import.argument("types")) { - None - } else { - Some(SchemaValidationError::ImportDirectiveInvalid) - } - } - - fn validate_import_directive_schema_reference_parses( - directive: &Directive, - ) -> Option { - directive.argument("from").and_then(|from| match from { - Value::Object(from) => { - let id_parse_error = match from.get("id") { - Some(Value::String(id)) => match DeploymentHash::new(id) { - Err(_) => { - Some(SchemaValidationError::ImportedSubgraphIdInvalid(id.clone())) - } - _ => None, - }, - _ => None, - }; - let name_parse_error = match from.get("name") { - Some(Value::String(name)) => match SubgraphName::new(name) { - Err(_) => Some(SchemaValidationError::ImportedSubgraphNameInvalid( - name.clone(), - )), - _ => None, - }, - _ => None, - }; - id_parse_error.or(name_parse_error) - } - _ => None, - }) - } - fn validate_fulltext_directives(&self) -> Vec { self.subgraph_schema_object_type() .map_or(vec![], |subgraph_schema_type| { @@ -1182,64 +911,6 @@ impl Schema { vec![] } - fn validate_import_directives(&self) -> Vec { - self.subgraph_schema_object_type() - .map_or(vec![], |subgraph_schema_type| { - subgraph_schema_type - .directives - .iter() - .filter(|directives| directives.name.eq("import")) - .fold(vec![], |mut errors, import| { - Self::validate_import_directive_arguments(import) - .into_iter() - .for_each(|err| errors.push(err)); - Self::validate_import_directive_schema_reference_parses(import) - .into_iter() - .for_each(|err| errors.push(err)); - errors - }) - }) - } - - fn validate_imported_types( - &self, - schemas: &HashMap>, - ) -> Vec { - self.imported_types() - .iter() - .fold(vec![], |mut errors, (imported_type, schema_ref)| { - schemas - .get(schema_ref) - .and_then(|schema| { - let local_types = schema.document.get_object_type_definitions(); - let imported_types = schema.imported_types(); - - // Ensure that the imported type is either local to - // the respective schema or is itself imported - // If the imported type is itself imported, do not - // recursively check the schema - let schema_handle = schema_ref.subgraph.to_string(); - let name = imported_type.name.as_str(); - - let is_local = local_types.iter().any(|object| object.name == name); - let is_imported = imported_types - .iter() - .any(|(import, _)| name == import.alias); - if !is_local && !is_imported { - Some(SchemaValidationError::ImportedTypeUndefined( - name.to_string(), - schema_handle, - )) - } else { - None - } - }) - .into_iter() - .for_each(|err| errors.push(err)); - errors - }) - } - fn validate_fields(&self) -> Vec { let local_types = self.document.get_object_and_interface_type_fields(); let local_enums = self @@ -1248,7 +919,6 @@ impl Schema { .iter() .map(|enu| enu.name.clone()) .collect::>(); - let imported_types = self.imported_types(); local_types .iter() .fold(vec![], |errors, (type_name, fields)| { @@ -1260,12 +930,6 @@ impl Schema { if local_types.contains_key(base) { return errors; } - if imported_types - .iter() - .any(|(imported_type, _)| &imported_type.alias == base) - { - return errors; - } if local_enums.iter().any(|enu| enu.eq(base)) { return errors; } @@ -1628,7 +1292,7 @@ fn interface_implementations_id_type() { }}" ); let schema = Schema::parse(&schema, DeploymentHash::new("dummy").unwrap()).unwrap(); - let res = schema.validate(&HashMap::new()); + let res = schema.validate(); if ok { assert!(matches!(res, Ok(_))); } else { @@ -1743,25 +1407,6 @@ type _Schema_ @illegal"; ) } -#[test] -fn test_imports_directive_from_argument() { - const ROOT_SCHEMA: &str = r#" -type _Schema_ @import(types: ["T", "A", "C"])"#; - - let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - match schema - .validate_import_directives() - .into_iter() - .find(|err| *err == SchemaValidationError::ImportDirectiveInvalid) { - None => panic!( - "Expected validation for `{}` to fail due to an @imports directive without a `from` argument", - ROOT_SCHEMA, - ), - _ => (), - } -} - #[test] fn test_enums_pass_field_validation() { const ROOT_SCHEMA: &str = r#" @@ -1780,79 +1425,6 @@ type A @entity { assert_eq!(schema.validate_fields().len(), 0); } -#[test] -fn test_recursively_imported_type_validates() { - const ROOT_SCHEMA: &str = r#" -type _Schema_ @import(types: ["T"], from: { id: "c1id" })"#; - const CHILD_1_SCHEMA: &str = r#" -type _Schema_ @import(types: ["T"], from: { id: "c2id" })"#; - const CHILD_2_SCHEMA: &str = r#" -type T @entity { id: ID! } -"#; - - let root_document = - graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let child_1_document = - graphql_parser::parse_schema(CHILD_1_SCHEMA).expect("Failed to parse child 1 schema"); - let child_2_document = - graphql_parser::parse_schema(CHILD_2_SCHEMA).expect("Failed to parse child 2 schema"); - - let c1id = DeploymentHash::new("c1id").unwrap(); - let c2id = DeploymentHash::new("c2id").unwrap(); - let root_schema = Schema::new(DeploymentHash::new("rid").unwrap(), root_document).unwrap(); - let child_1_schema = Schema::new(c1id.clone(), child_1_document).unwrap(); - let child_2_schema = Schema::new(c2id.clone(), child_2_document).unwrap(); - - let mut schemas = HashMap::new(); - schemas.insert(SchemaReference::new(c1id), Arc::new(child_1_schema)); - schemas.insert(SchemaReference::new(c2id), Arc::new(child_2_schema)); - - match root_schema.validate_imported_types(&schemas).is_empty() { - false => panic!( - "Expected imported types validation for `{}` to suceed", - ROOT_SCHEMA, - ), - true => (), - } -} - -#[test] -fn test_recursively_imported_type_which_dne_fails_validation() { - const ROOT_SCHEMA: &str = r#" -type _Schema_ @import(types: ["T"], from: { id:"c1id"})"#; - const CHILD_1_SCHEMA: &str = r#" -type _Schema_ @import(types: [{name: "T", as: "A"}], from: { id:"c2id"})"#; - const CHILD_2_SCHEMA: &str = r#" -type T @entity { id: ID! } -"#; - let root_document = - graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let child_1_document = - graphql_parser::parse_schema(CHILD_1_SCHEMA).expect("Failed to parse child 1 schema"); - let child_2_document = - graphql_parser::parse_schema(CHILD_2_SCHEMA).expect("Failed to parse child 2 schema"); - - let c1id = DeploymentHash::new("c1id").unwrap(); - let c2id = DeploymentHash::new("c2id").unwrap(); - let root_schema = Schema::new(DeploymentHash::new("rid").unwrap(), root_document).unwrap(); - let child_1_schema = Schema::new(c1id.clone(), child_1_document).unwrap(); - let child_2_schema = Schema::new(c2id.clone(), child_2_document).unwrap(); - - let mut schemas = HashMap::new(); - schemas.insert(SchemaReference::new(c1id), Arc::new(child_1_schema)); - schemas.insert(SchemaReference::new(c2id), Arc::new(child_2_schema)); - - match root_schema.validate_imported_types(&schemas).into_iter().find(|err| match err { - SchemaValidationError::ImportedTypeUndefined(_, _) => true, - _ => false, - }) { - None => panic!( - "Expected imported types validation to fail because an imported type was missing in the target schema", - ), - _ => (), - } -} - #[test] fn test_reserved_types_validation() { let reserved_types = [ @@ -1876,7 +1448,7 @@ fn test_reserved_types_validation() { let schema = Schema::parse(&schema, dummy_hash.clone()).unwrap(); - let errors = schema.validate(&HashMap::new()).unwrap_err(); + let errors = schema.validate().unwrap_err(); for error in errors { assert!(matches!( error, @@ -1904,7 +1476,7 @@ fn test_reserved_filter_and_group_by_types_validation() { let schema = Schema::parse(SCHEMA, dummy_hash).unwrap(); - let errors = schema.validate(&HashMap::new()).unwrap_err(); + let errors = schema.validate().unwrap_err(); // The only problem in the schema is the usage of reserved types assert_eq!(errors.len(), 1); diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index d7a28da9764..b731d3a50a1 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -33,7 +33,7 @@ use crate::{ data::{ graphql::TryFromValue, query::QueryExecutionError, - schema::{Schema, SchemaImportError, SchemaValidationError}, + schema::{Schema, SchemaValidationError}, store::Entity, subgraph::features::validate_subgraph_features, }, @@ -314,12 +314,6 @@ impl From<::diesel::result::Error> for SubgraphAssignmentProviderError { } } -#[derive(Error, Debug)] -pub enum SubgraphManifestValidationWarning { - #[error("schema validation produced warnings: {0:?}")] - SchemaValidationWarning(SchemaImportError), -} - #[derive(Error, Debug)] pub enum SubgraphManifestValidationError { #[error("subgraph has no data sources")] @@ -332,8 +326,6 @@ pub enum SubgraphManifestValidationError { EthereumNetworkRequired, #[error("the specified block must exist on the Ethereum network")] BlockNotFound(String), - #[error("imported schema(s) are invalid: {0:?}")] - SchemaImportError(Vec), #[error("schema validation failed: {0:?}")] SchemaValidationError(Vec), #[error("the graft base is invalid: {0}")] @@ -534,8 +526,6 @@ impl UnvalidatedSubgraphManifest { store: Arc, validate_graft_base: bool, ) -> Result, Vec> { - let (schemas, _) = self.0.schema.resolve_schema_references(store.clone()); - let mut errors: Vec = vec![]; // Validate that the manifest has at least one data source @@ -570,7 +560,7 @@ impl UnvalidatedSubgraphManifest { self.0 .schema - .validate(&schemas) + .validate() .err() .into_iter() .for_each(|schema_errors| { From 31b92287a82fc6069f21d43673953257929b3f65 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 14 Mar 2023 16:32:02 -0700 Subject: [PATCH 0017/2104] graph: Remove unused function Schema.name_value_from_directive --- graph/src/data/schema.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 9e794bad793..899ef70fc93 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -612,13 +612,6 @@ impl Schema { Schema::new(id, document).map_err(Into::into) } - pub fn name_argument_value_from_directive(directive: &Directive) -> Value { - directive - .argument("name") - .expect("fulltext directive must have name argument") - .clone() - } - /// Returned map has one an entry for each interface in the schema. pub fn types_for_interface(&self) -> &BTreeMap> { &self.types_for_interface From 3000f44b50e501a80ff32659e07d3807587692c0 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 13 Mar 2023 11:00:18 -0700 Subject: [PATCH 0018/2104] graph: Remove dependency on test-store --- Cargo.lock | 1 - graph/Cargo.toml | 1 - graph/src/util/jobs.rs | 9 ++++++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 39511f10789..78a04301cae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1577,7 +1577,6 @@ dependencies = [ "stable-hash 0.4.2", "strum", "strum_macros", - "test-store", "thiserror", "tiny-keccak 1.5.0", "tokio", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 4c3f73766ae..7d2f4c82f79 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -66,7 +66,6 @@ web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-pat serde_plain = "1.0.1" [dev-dependencies] -test-store = { path = "../store/test-store" } clap = { version = "3.2.23", features = ["derive", "env"] } maplit = "1.0.2" diff --git a/graph/src/util/jobs.rs b/graph/src/util/jobs.rs index ccd9007a881..fdda7d365b4 100644 --- a/graph/src/util/jobs.rs +++ b/graph/src/util/jobs.rs @@ -95,8 +95,15 @@ impl Runner { #[cfg(test)] mod tests { use super::*; + use lazy_static::lazy_static; use std::sync::{Arc, Mutex}; - use test_store::LOGGER; + + lazy_static! { + pub static ref LOGGER: Logger = match crate::env::ENV_VARS.log_levels { + Some(_) => crate::log::logger(false), + None => Logger::root(slog::Discard, o!()), + }; + } struct CounterJob { count: Arc>, From 671514fc13cedc49b901f757a906bb34f57a5ccf Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 13 Mar 2023 11:05:20 -0700 Subject: [PATCH 0019/2104] core: Qualify imports from graph crate --- core/src/metrics/registry.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/src/metrics/registry.rs b/core/src/metrics/registry.rs index e46a2487e06..2380329ecbf 100644 --- a/core/src/metrics/registry.rs +++ b/core/src/metrics/registry.rs @@ -2,7 +2,12 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; use graph::components::metrics::{counter_with_labels, gauge_with_labels}; -use graph::prelude::{MetricsRegistry as MetricsRegistryTrait, *}; +use graph::prelude::{Collector, MetricsRegistry as MetricsRegistryTrait}; +use graph::prometheus::{ + Counter, CounterVec, Error as PrometheusError, Gauge, GaugeVec, HistogramOpts, HistogramVec, + Opts, Registry, +}; +use graph::slog::{self, error, o, Logger}; #[derive(Clone)] pub struct MetricsRegistry { From 43cc26b51a6f80ef1aa12541f09d8f6935af4deb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 13 Mar 2023 11:49:36 -0700 Subject: [PATCH 0020/2104] all: Move MetricsRegistry from graph-core to graph crate --- Cargo.lock | 2 -- chain/arweave/src/chain.rs | 4 ++-- chain/cosmos/src/chain.rs | 4 ++-- chain/ethereum/Cargo.toml | 1 - chain/ethereum/src/adapter.rs | 5 +++-- chain/ethereum/src/chain.rs | 11 ++++++----- chain/ethereum/src/network.rs | 4 ++-- chain/near/src/chain.rs | 5 +++-- chain/substreams/Cargo.toml | 1 - chain/substreams/examples/substreams.rs | 3 +-- chain/substreams/src/chain.rs | 7 ++++--- core/Cargo.toml | 4 +++- core/src/lib.rs | 2 -- core/src/metrics/mod.rs | 3 --- core/src/polling_monitor/metrics.rs | 5 +++-- core/src/subgraph/context.rs | 7 ++++--- core/src/subgraph/instance_manager.rs | 5 +++-- graph/src/blockchain/block_stream.rs | 3 ++- graph/src/blockchain/builder.rs | 4 ++-- graph/src/blockchain/firehose_block_stream.rs | 5 +++-- graph/src/blockchain/substreams_block_stream.rs | 5 +++-- graph/src/components/metrics/mod.rs | 6 +++++- .../src => graph/src/components}/metrics/registry.rs | 12 +++++++----- graph/src/components/metrics/stopwatch.rs | 4 +++- graph/src/components/metrics/subgraph.rs | 9 +++++---- graph/src/components/subgraph/host.rs | 3 ++- graph/src/data/graphql/effort.rs | 4 ++-- graph/src/log/factory.rs | 6 +++--- graphql/src/metrics.rs | 7 ++++--- graphql/src/runner.rs | 4 ++-- node/src/bin/manager.rs | 3 +-- node/src/chain.rs | 6 +++--- node/src/lib.rs | 3 +-- node/src/main.rs | 2 +- node/src/manager/commands/config.rs | 4 ++-- node/src/store_builder.rs | 11 ++++++----- runtime/test/src/test.rs | 1 - server/http/tests/server.rs | 1 - store/postgres/src/chain_head_listener.rs | 11 ++++++++--- store/postgres/src/connection_pool.rs | 11 ++++++----- store/postgres/src/jobs.rs | 7 ++++--- store/postgres/src/notification_listener.rs | 3 ++- store/postgres/src/store_events.rs | 9 +++++++-- store/postgres/src/subgraph_store.rs | 11 ++++++----- store/postgres/src/writable.rs | 10 +++++----- store/postgres/tests/relational.rs | 3 +-- store/postgres/tests/relational_bytes.rs | 3 +-- store/postgres/tests/store.rs | 1 - store/test-store/src/store.rs | 1 - tests/src/fixture/ethereum.rs | 5 +++-- tests/src/fixture/mod.rs | 9 +++++---- 51 files changed, 141 insertions(+), 119 deletions(-) delete mode 100644 core/src/metrics/mod.rs rename {core/src => graph/src/components}/metrics/registry.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index 78a04301cae..e6a9794fbd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1641,7 +1641,6 @@ dependencies = [ "envconfig", "futures 0.1.31", "graph", - "graph-core", "graph-runtime-derive", "graph-runtime-wasm", "hex", @@ -1684,7 +1683,6 @@ dependencies = [ "envconfig", "futures 0.1.31", "graph", - "graph-core", "graph-runtime-wasm", "hex", "http", diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index 48ef035bc76..8089d28de56 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -6,10 +6,10 @@ use graph::blockchain::{ EmptyNodeCapabilities, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::FirehoseEndpoint; -use graph::prelude::MetricsRegistry; use graph::{ blockchain::{ block_stream::{ @@ -40,7 +40,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, } impl std::fmt::Debug for Chain { diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index df539bae010..1145dfb93f6 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,5 +1,6 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::BlockIngestor; +use graph::components::metrics::MetricsRegistryTrait; use std::sync::Arc; use graph::blockchain::block_stream::FirehoseCursor; @@ -8,7 +9,6 @@ use graph::blockchain::{BasicBlockchainBuilder, BlockchainBuilder, NoopRuntimeAd use graph::cheap_clone::CheapClone; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; -use graph::prelude::MetricsRegistry; use graph::{ blockchain::{ block_stream::{ @@ -36,7 +36,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, } impl std::fmt::Debug for Chain { diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index b421d3d1777..5d813d0b825 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -25,7 +25,6 @@ graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] -graph-core = { path = "../../core" } test-store = { path = "../../store/test-store" } base64 = "0.20.0" diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index bf5cde3d3ce..12761183d06 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -2,6 +2,7 @@ use anyhow::Error; use ethabi::{Error as ABIError, Function, ParamType, Token}; use futures::Future; use graph::blockchain::ChainIdentifier; +use graph::components::metrics::MetricsRegistryTrait; use graph::firehose::CallToFilter; use graph::firehose::CombinedFilter; use graph::firehose::LogFilter; @@ -731,7 +732,7 @@ pub struct ProviderEthRpcMetrics { } impl ProviderEthRpcMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let request_duration = registry .new_histogram_vec( "eth_rpc_request_duration", @@ -787,7 +788,7 @@ pub struct SubgraphEthRpcMetrics { } impl SubgraphEthRpcMetrics { - pub fn new(registry: Arc, subgraph_hash: &str) -> Self { + pub fn new(registry: Arc, subgraph_hash: &str) -> Self { let request_duration = registry .global_gauge_vec( "deployment_eth_rpc_request_duration", diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index efeb8ac2290..6dfc78376cb 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Error}; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{BlockIngestor, BlockchainKind, TriggersAdapterSelector}; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, ForkStep}; @@ -26,7 +27,7 @@ use graph::{ firehose, prelude::{ async_trait, o, serde_json as json, BlockNumber, ChainStore, EthereumBlockWithCalls, - Future01CompatExt, Logger, LoggerFactory, MetricsRegistry, NodeId, + Future01CompatExt, Logger, LoggerFactory, NodeId, }, }; use prost::Message; @@ -191,7 +192,7 @@ impl BlockRefetcher for EthereumBlockRefetcher { pub struct EthereumAdapterSelector { logger_factory: LoggerFactory, client: Arc>, - registry: Arc, + registry: Arc, chain_store: Arc, } @@ -199,7 +200,7 @@ impl EthereumAdapterSelector { pub fn new( logger_factory: LoggerFactory, client: Arc>, - registry: Arc, + registry: Arc, chain_store: Arc, ) -> Self { Self { @@ -241,7 +242,7 @@ pub struct Chain { logger_factory: LoggerFactory, name: String, node_id: NodeId, - registry: Arc, + registry: Arc, client: Arc>, chain_store: Arc, call_cache: Arc, @@ -267,7 +268,7 @@ impl Chain { logger_factory: LoggerFactory, name: String, node_id: NodeId, - registry: Arc, + registry: Arc, chain_store: Arc, call_cache: Arc, client: Arc>, diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 05ae2679fea..2f77c6c7902 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -220,9 +220,9 @@ impl EthereumNetworks { #[cfg(test)] mod tests { use graph::{ - firehose::SubgraphLimit, prelude::MetricsRegistry as MetricsRegistryTrait, tokio, url::Url, + components::metrics::MetricsRegistryTrait, firehose::SubgraphLimit, + prelude::MetricsRegistry, tokio, url::Url, }; - use graph_core::MetricsRegistry; use http::HeaderMap; use std::sync::Arc; diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 4847cf18d78..675fd5954fa 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -5,10 +5,11 @@ use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::FirehoseEndpoint; -use graph::prelude::{MetricsRegistry, TryFutureExt}; +use graph::prelude::TryFutureExt; use graph::{ anyhow::Result, blockchain::{ @@ -97,7 +98,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, block_stream_builder: Arc>, } diff --git a/chain/substreams/Cargo.toml b/chain/substreams/Cargo.toml index ad557e27c4f..8054b338d93 100644 --- a/chain/substreams/Cargo.toml +++ b/chain/substreams/Cargo.toml @@ -28,5 +28,4 @@ base64 = "0.20.0" itertools = "0.10.5" [dev-dependencies] -graph-core = { path = "../../core" } tokio = { version = "1", features = ["full"] } diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index c8222433552..55858825d18 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -3,11 +3,10 @@ use graph::blockchain::block_stream::BlockStreamEvent; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::endpoint::EndpointMetrics; use graph::firehose::SubgraphLimit; -use graph::prelude::{info, tokio, DeploymentHash, Registry}; +use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; use graph::tokio_stream::StreamExt; use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; use graph_chain_substreams::mapper::Mapper; -use graph_core::MetricsRegistry; use prost::Message; use std::env; use std::sync::Arc; diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 281a0013e6f..f45c858512b 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -2,9 +2,10 @@ use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersA use anyhow::Error; use graph::blockchain::client::ChainClient; use graph::blockchain::{BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter}; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::firehose::FirehoseEndpoints; -use graph::prelude::{BlockHash, LoggerFactory, MetricsRegistry}; +use graph::prelude::{BlockHash, LoggerFactory}; use graph::{ blockchain::{ self, @@ -44,14 +45,14 @@ pub struct Chain { pub(crate) logger_factory: LoggerFactory, pub(crate) client: Arc>, - pub(crate) metrics_registry: Arc, + pub(crate) metrics_registry: Arc, } impl Chain { pub fn new( logger_factory: LoggerFactory, firehose_endpoints: FirehoseEndpoints, - metrics_registry: Arc, + metrics_registry: Arc, chain_store: Arc, block_stream_builder: Arc>, ) -> Self { diff --git a/core/Cargo.toml b/core/Cargo.toml index 8d5902e92bf..6100eeb7aa9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -38,5 +38,7 @@ graphql-parser = "0.4.0" pretty_assertions = "1.3.0" anyhow = "1.0" ipfs-api-backend-hyper = "0.6" -ipfs-api = { version = "0.17.0", features = ["with-hyper-rustls"], default-features = false } +ipfs-api = { version = "0.17.0", features = [ + "with-hyper-rustls", +], default-features = false } uuid = { version = "1.3.0", features = ["v4"] } diff --git a/core/src/lib.rs b/core/src/lib.rs index 972a45e508f..2236519bf3f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,11 +1,9 @@ pub mod polling_monitor; mod link_resolver; -mod metrics; mod subgraph; pub use crate::link_resolver::LinkResolver; -pub use crate::metrics::MetricsRegistry; pub use crate::subgraph::{ SubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar, SubgraphRunner, SubgraphTriggerProcessor, diff --git a/core/src/metrics/mod.rs b/core/src/metrics/mod.rs deleted file mode 100644 index 047d6b24132..00000000000 --- a/core/src/metrics/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod registry; - -pub use registry::MetricsRegistry; diff --git a/core/src/polling_monitor/metrics.rs b/core/src/polling_monitor/metrics.rs index 86d65790a7b..368ad0036ad 100644 --- a/core/src/polling_monitor/metrics.rs +++ b/core/src/polling_monitor/metrics.rs @@ -1,7 +1,8 @@ use std::sync::Arc; use graph::{ - prelude::{DeploymentHash, MetricsRegistry}, + components::metrics::MetricsRegistryTrait, + prelude::DeploymentHash, prometheus::{Counter, Gauge}, }; @@ -13,7 +14,7 @@ pub struct PollingMonitorMetrics { } impl PollingMonitorMetrics { - pub fn new(registry: Arc, subgraph_hash: &DeploymentHash) -> Self { + pub fn new(registry: Arc, subgraph_hash: &DeploymentHash) -> Self { let requests = registry .new_deployment_counter( "polling_monitor_requests", diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index c5cb800d63e..630571d5687 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -6,14 +6,15 @@ use bytes::Bytes; use graph::{ blockchain::Blockchain, components::{ + metrics::MetricsRegistryTrait, store::{DeploymentId, SubgraphFork}, subgraph::{MappingError, SharedProofOfIndexing}, }, data_source::{offchain, CausalityRegion, DataSource, TriggerData}, ipfs_client::CidFile, prelude::{ - BlockNumber, BlockState, CancelGuard, CheapClone, DeploymentHash, MetricsRegistry, - RuntimeHostBuilder, SubgraphCountMetric, SubgraphInstanceMetrics, TriggerProcessor, + BlockNumber, BlockState, CancelGuard, CheapClone, DeploymentHash, RuntimeHostBuilder, + SubgraphCountMetric, SubgraphInstanceMetrics, TriggerProcessor, }, slog::Logger, tokio::sync::mpsc, @@ -191,7 +192,7 @@ pub struct OffchainMonitor { impl OffchainMonitor { pub fn new( logger: Logger, - registry: Arc, + registry: Arc, subgraph_hash: &DeploymentHash, ipfs_service: IpfsService, ) -> Self { diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 08ca909d9df..25dca9b13f3 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -8,6 +8,7 @@ use graph::blockchain::block_stream::BlockStreamMetrics; use graph::blockchain::Blockchain; use graph::blockchain::NodeCapabilities; use graph::blockchain::{BlockchainKind, TriggerFilter}; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::subgraph::ProofOfIndexingVersion; use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; use graph::data_source::causality_region::CausalityRegionSeq; @@ -26,7 +27,7 @@ pub struct SubgraphInstanceManager { logger_factory: LoggerFactory, subgraph_store: Arc, chains: Arc, - metrics_registry: Arc, + metrics_registry: Arc, instances: SubgraphKeepAlive, link_resolver: Arc, ipfs_service: IpfsService, @@ -162,7 +163,7 @@ impl SubgraphInstanceManager { subgraph_store: Arc, chains: Arc, sg_metrics: Arc, - metrics_registry: Arc, + metrics_registry: Arc, link_resolver: Arc, ipfs_service: IpfsService, static_filters: bool, diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index a1ec2cad436..32e5f238af4 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -8,6 +8,7 @@ use tokio::sync::mpsc::{self, Receiver, Sender}; use super::{Block, BlockPtr, Blockchain}; use crate::anyhow::Result; +use crate::components::metrics::MetricsRegistryTrait; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; @@ -386,7 +387,7 @@ pub struct BlockStreamMetrics { impl BlockStreamMetrics { pub fn new( - registry: Arc, + registry: Arc, deployment_id: &DeploymentHash, network: String, shard: String, diff --git a/graph/src/blockchain/builder.rs b/graph/src/blockchain/builder.rs index fa3bcd83fbc..e0782727a21 100644 --- a/graph/src/blockchain/builder.rs +++ b/graph/src/blockchain/builder.rs @@ -1,6 +1,6 @@ use super::Blockchain; use crate::{ - components::{metrics::MetricsRegistry, store::ChainStore}, + components::{metrics::MetricsRegistryTrait, store::ChainStore}, firehose::FirehoseEndpoints, prelude::LoggerFactory, }; @@ -13,7 +13,7 @@ pub struct BasicBlockchainBuilder { pub name: String, pub chain_store: Arc, pub firehose_endpoints: FirehoseEndpoints, - pub metrics_registry: Arc, + pub metrics_registry: Arc, } /// Something that can build a [`Blockchain`]. diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 8004c241548..a3cdf91686e 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -3,6 +3,7 @@ use super::client::ChainClient; use super::{Blockchain, TriggersAdapter}; use crate::blockchain::block_stream::FirehoseCursor; use crate::blockchain::TriggerFilter; +use crate::components::metrics::MetricsRegistryTrait; use crate::prelude::*; use crate::util::backoff::ExponentialBackoff; use crate::{firehose, firehose::FirehoseEndpoint}; @@ -22,7 +23,7 @@ struct FirehoseBlockStreamMetrics { } impl FirehoseBlockStreamMetrics { - pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { + pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { Self { deployment, @@ -115,7 +116,7 @@ where filter: Arc, start_blocks: Vec, logger: Logger, - registry: Arc, + registry: Arc, ) -> Self where F: FirehoseMapper + 'static, diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index b9884870172..10ae4d38ce6 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -1,6 +1,7 @@ use super::block_stream::SubstreamsMapper; use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; use crate::blockchain::Blockchain; +use crate::components::metrics::MetricsRegistryTrait; use crate::firehose::FirehoseEndpoint; use crate::prelude::*; use crate::substreams::response::Message; @@ -25,7 +26,7 @@ struct SubstreamsBlockStreamMetrics { impl SubstreamsBlockStreamMetrics { pub fn new( - registry: Arc, + registry: Arc, deployment: DeploymentHash, provider: String, ) -> Self { @@ -125,7 +126,7 @@ where start_blocks: Vec, end_blocks: Vec, logger: Logger, - registry: Arc, + registry: Arc, ) -> Self where F: SubstreamsMapper + 'static, diff --git a/graph/src/components/metrics/mod.rs b/graph/src/components/metrics/mod.rs index 4a95241d44e..70ee6418e74 100644 --- a/graph/src/components/metrics/mod.rs +++ b/graph/src/components/metrics/mod.rs @@ -3,8 +3,12 @@ pub use prometheus::{ labels, Counter, CounterVec, Error as PrometheusError, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, Opts, Registry, }; + +pub mod registry; pub mod subgraph; +pub use registry::MetricsRegistry; + use std::collections::HashMap; /// Metrics for measuring where time is spent during indexing. @@ -34,7 +38,7 @@ pub fn gauge_with_labels( Gauge::with_opts(opts) } -pub trait MetricsRegistry: Send + Sync + 'static { +pub trait MetricsRegistryTrait: Send + Sync + 'static { fn register(&self, name: &str, c: Box); fn unregister(&self, metric: Box); diff --git a/core/src/metrics/registry.rs b/graph/src/components/metrics/registry.rs similarity index 98% rename from core/src/metrics/registry.rs rename to graph/src/components/metrics/registry.rs index 2380329ecbf..9e108d84f52 100644 --- a/core/src/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -1,13 +1,15 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; -use graph::components::metrics::{counter_with_labels, gauge_with_labels}; -use graph::prelude::{Collector, MetricsRegistry as MetricsRegistryTrait}; -use graph::prometheus::{ +use crate::components::metrics::{counter_with_labels, gauge_with_labels}; +use crate::prelude::Collector; +use crate::prometheus::{ Counter, CounterVec, Error as PrometheusError, Gauge, GaugeVec, HistogramOpts, HistogramVec, Opts, Registry, }; -use graph::slog::{self, error, o, Logger}; +use crate::slog::{self, error, o, Logger}; + +use super::MetricsRegistryTrait; #[derive(Clone)] pub struct MetricsRegistry { @@ -289,7 +291,7 @@ impl MetricsRegistryTrait for MetricsRegistry { #[test] fn global_counters_are_shared() { - use graph::log; + use crate::log; let logger = log::logger(false); let prom_reg = Arc::new(Registry::new()); diff --git a/graph/src/components/metrics/stopwatch.rs b/graph/src/components/metrics/stopwatch.rs index fe56cdb722a..c514670980b 100644 --- a/graph/src/components/metrics/stopwatch.rs +++ b/graph/src/components/metrics/stopwatch.rs @@ -2,6 +2,8 @@ use crate::prelude::*; use std::sync::{atomic::AtomicBool, atomic::Ordering, Mutex}; use std::time::Instant; +use super::MetricsRegistryTrait; + /// This is a "section guard", that closes the section on drop. pub struct Section { id: String, @@ -45,7 +47,7 @@ impl StopwatchMetrics { logger: Logger, subgraph_id: DeploymentHash, stage: &str, - registry: Arc, + registry: Arc, ) -> Self { let stage = stage.to_owned(); let mut inner = StopwatchInner { diff --git a/graph/src/components/metrics/subgraph.rs b/graph/src/components/metrics/subgraph.rs index 7aa59beecdf..cfc2cfb0210 100644 --- a/graph/src/components/metrics/subgraph.rs +++ b/graph/src/components/metrics/subgraph.rs @@ -1,11 +1,12 @@ use prometheus::Counter; use crate::blockchain::block_stream::BlockStreamMetrics; -use crate::prelude::{Gauge, Histogram, HostMetrics, MetricsRegistry}; +use crate::prelude::{Gauge, Histogram, HostMetrics}; use std::collections::HashMap; use std::sync::Arc; use super::stopwatch::StopwatchMetrics; +use super::MetricsRegistryTrait; pub struct SubgraphInstanceMetrics { pub block_trigger_count: Box, @@ -19,7 +20,7 @@ pub struct SubgraphInstanceMetrics { impl SubgraphInstanceMetrics { pub fn new( - registry: Arc, + registry: Arc, subgraph_hash: &str, stopwatch: StopwatchMetrics, ) -> Self { @@ -78,7 +79,7 @@ impl SubgraphInstanceMetrics { self.trigger_processing_duration.observe(duration); } - pub fn unregister(&self, registry: Arc) { + pub fn unregister(&self, registry: Arc) { registry.unregister(self.block_processing_duration.clone()); registry.unregister(self.block_trigger_count.clone()); registry.unregister(self.trigger_processing_duration.clone()); @@ -93,7 +94,7 @@ pub struct SubgraphCountMetric { } impl SubgraphCountMetric { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let running_count = registry .new_gauge( "deployment_running_count", diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index 7152e1705af..ad92ef17d2b 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -6,6 +6,7 @@ use anyhow::Error; use async_trait::async_trait; use futures::sync::mpsc; +use crate::components::metrics::MetricsRegistryTrait; use crate::components::store::SubgraphFork; use crate::data_source::{ DataSource, DataSourceTemplate, MappingTrigger, TriggerData, TriggerWithHandler, @@ -87,7 +88,7 @@ pub struct HostMetrics { impl HostMetrics { pub fn new( - registry: Arc, + registry: Arc, subgraph: &str, stopwatch: StopwatchMetrics, ) -> Self { diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index b345d5e8c7b..63777116b70 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -7,7 +7,7 @@ use std::iter::FromIterator; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use crate::components::metrics::{Counter, Gauge, MetricsRegistry}; +use crate::components::metrics::{Counter, Gauge, MetricsRegistryTrait}; use crate::components::store::PoolWaitStats; use crate::data::graphql::shape_hash::shape_hash; use crate::data::query::{CacheStatus, QueryExecutionError}; @@ -207,7 +207,7 @@ impl LoadManager { pub fn new( logger: &Logger, blocked_queries: Vec>, - registry: Arc, + registry: Arc, ) -> Self { let logger = logger.new(o!("component" => "LoadManager")); let blocked_queries = blocked_queries diff --git a/graph/src/log/factory.rs b/graph/src/log/factory.rs index 1b126a6995d..6be127e3b60 100644 --- a/graph/src/log/factory.rs +++ b/graph/src/log/factory.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use prometheus::Counter; use slog::*; -use crate::components::metrics::MetricsRegistry; +use crate::components::metrics::MetricsRegistryTrait; use crate::components::store::DeploymentLocator; use crate::log::elastic::*; use crate::log::split::*; @@ -24,7 +24,7 @@ pub struct ComponentLoggerConfig { pub struct LoggerFactory { parent: Logger, elastic_config: Option, - metrics_registry: Arc, + metrics_registry: Arc, } impl LoggerFactory { @@ -32,7 +32,7 @@ impl LoggerFactory { pub fn new( logger: Logger, elastic_config: Option, - metrics_registry: Arc, + metrics_registry: Arc, ) -> Self { Self { parent: logger, diff --git a/graphql/src/metrics.rs b/graphql/src/metrics.rs index 69d17ed5c01..9051c446262 100644 --- a/graphql/src/metrics.rs +++ b/graphql/src/metrics.rs @@ -3,8 +3,9 @@ use std::fmt; use std::sync::Arc; use std::time::Duration; +use graph::components::metrics::MetricsRegistryTrait; use graph::data::query::QueryResults; -use graph::prelude::{DeploymentHash, GraphQLMetrics as GraphQLMetricsTrait, MetricsRegistry}; +use graph::prelude::{DeploymentHash, GraphQLMetrics as GraphQLMetricsTrait}; use graph::prometheus::{CounterVec, Gauge, Histogram, HistogramVec}; pub struct GraphQLMetrics { @@ -76,7 +77,7 @@ impl GraphQLMetricsTrait for GraphQLMetrics { } impl GraphQLMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let query_execution_time = registry .new_histogram_vec( "query_execution_time", @@ -140,7 +141,7 @@ impl GraphQLMetrics { // Tests need to construct one of these, but normal code doesn't #[cfg(debug_assertions)] - pub fn make(registry: Arc) -> Self { + pub fn make(registry: Arc) -> Self { Self::new(registry) } diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 89b2c2d182a..fa22b1291ab 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -5,7 +5,7 @@ use crate::metrics::GraphQLMetrics; use crate::prelude::{QueryExecutionOptions, StoreResolver, SubscriptionExecutionOptions}; use crate::query::execute_query; use crate::subscription::execute_prepared_subscription; -use graph::prelude::MetricsRegistry; +use graph::components::metrics::MetricsRegistryTrait; use graph::{ components::store::SubscriptionManager, prelude::{ @@ -46,7 +46,7 @@ where store: Arc, subscription_manager: Arc, load_manager: Arc, - registry: Arc, + registry: Arc, ) -> Self { let logger = logger.new(o!("component" => "GraphQlRunner")); let graphql_metrics = Arc::new(GraphQLMetrics::new(registry)); diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 0294669497c..3362e4b4d98 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -2,7 +2,7 @@ use clap::{Parser, Subcommand}; use config::PoolSize; use git_testament::{git_testament, render_testament}; use graph::bail; -use graph::prelude::BLOCK_NUMBER_MAX; +use graph::prelude::{MetricsRegistry, BLOCK_NUMBER_MAX}; use graph::{data::graphql::effort::LoadManager, prelude::chrono, prometheus::Registry}; use graph::{ log::logger, @@ -13,7 +13,6 @@ use graph::{ url::Url, }; use graph_chain_ethereum::{EthereumAdapter, EthereumNetworks}; -use graph_core::MetricsRegistry; use graph_graphql::prelude::GraphQlRunner; use graph_node::config::{self, Config as Cfg}; use graph_node::manager::color::Terminal; diff --git a/node/src/chain.rs b/node/src/chain.rs index e3278e6d558..fe7f38ade71 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -5,11 +5,12 @@ use futures::TryFutureExt; use graph::anyhow::{bail, Error}; use graph::blockchain::{Block as BlockchainBlock, BlockchainKind, ChainIdentifier}; use graph::cheap_clone::CheapClone; +use graph::components::metrics::MetricsRegistryTrait; use graph::endpoint::EndpointMetrics; use graph::firehose::{FirehoseEndpoint, FirehoseNetworks, SubgraphLimit}; use graph::ipfs_client::IpfsClient; +use graph::prelude::prost; use graph::prelude::{anyhow, tokio}; -use graph::prelude::{prost, MetricsRegistry as MetricsRegistryTrait}; use graph::slog::{debug, error, info, o, Logger}; use graph::url::Url; use graph::util::security::SafeDisplay; @@ -480,10 +481,9 @@ mod test { use crate::chain::create_all_ethereum_networks; use crate::config::{Config, Opt}; use graph::log::logger; - use graph::prelude::tokio; + use graph::prelude::{tokio, MetricsRegistry}; use graph::prometheus::Registry; use graph_chain_ethereum::NodeCapabilities; - use graph_core::MetricsRegistry; use std::sync::Arc; #[tokio::test] diff --git a/node/src/lib.rs b/node/src/lib.rs index 2d4f8ca0f3b..f26f14fef5b 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -1,7 +1,6 @@ use std::sync::Arc; -use graph::prometheus::Registry; -use graph_core::MetricsRegistry; +use graph::{prelude::MetricsRegistry, prometheus::Registry}; #[macro_use] extern crate diesel; diff --git a/node/src/main.rs b/node/src/main.rs index 268bb6d4283..953acbc7652 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -23,7 +23,7 @@ use graph_chain_near::{self as near, HeaderOnlyBlock as NearFirehoseHeaderOnlyBl use graph_chain_substreams as substreams; use graph_core::polling_monitor::ipfs_service; use graph_core::{ - LinkResolver, MetricsRegistry, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, + LinkResolver, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, }; use graph_graphql::prelude::GraphQlRunner; diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 561a1da013e..96ccc9da353 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ anyhow::bail, - components::metrics::MetricsRegistry, + components::metrics::MetricsRegistryTrait, itertools::Itertools, prelude::{ anyhow::{anyhow, Error}, @@ -100,7 +100,7 @@ pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Err pub async fn provider( logger: Logger, config: &Config, - registry: Arc, + registry: Arc, features: String, network: String, ) -> Result<(), Error> { diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index a0b145bf41b..8be33964437 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -3,7 +3,8 @@ use std::{collections::HashMap, sync::Arc}; use futures::future::join_all; use graph::blockchain::ChainIdentifier; -use graph::prelude::{o, MetricsRegistry, NodeId}; +use graph::components::metrics::MetricsRegistryTrait; +use graph::prelude::{o, NodeId}; use graph::url::Url; use graph::{ prelude::{info, CheapClone, Logger}, @@ -40,7 +41,7 @@ impl StoreBuilder { node: &NodeId, config: &Config, fork_base: Option, - registry: Arc, + registry: Arc, ) -> Self { let primary_shard = config.primary_store().clone(); @@ -95,7 +96,7 @@ impl StoreBuilder { node: &NodeId, config: &Config, fork_base: Option, - registry: Arc, + registry: Arc, ) -> ( Arc, HashMap, @@ -199,7 +200,7 @@ impl StoreBuilder { node: &NodeId, name: &str, shard: &Shard, - registry: Arc, + registry: Arc, coord: Arc, ) -> ConnectionPool { let logger = logger.new(o!("pool" => "main")); @@ -235,7 +236,7 @@ impl StoreBuilder { node: &NodeId, name: &str, shard: &Shard, - registry: Arc, + registry: Arc, coord: Arc, ) -> (Vec, Vec) { let mut weights: Vec<_> = vec![shard.weight]; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 07a9830c3ee..90c17daabec 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -6,7 +6,6 @@ use graph::runtime::{AscIndexId, AscType}; use graph::runtime::{AscPtr, ToAscObj}; use graph::{components::store::*, ipfs_client::IpfsClient}; use graph_chain_ethereum::{Chain, DataSource}; -use graph_core::MetricsRegistry; use graph_runtime_wasm::asc_abi::class::{Array, AscBigInt, AscEntity, AscString, Uint8Array}; use graph_runtime_wasm::{ExperimentalFeatures, ValidModule, WasmInstance}; diff --git a/server/http/tests/server.rs b/server/http/tests/server.rs index e74c0b7ba36..76295e79528 100644 --- a/server/http/tests/server.rs +++ b/server/http/tests/server.rs @@ -90,7 +90,6 @@ impl GraphQlRunner for TestGraphQlRunner { #[cfg(test)] mod test { use super::*; - use graph_core::MetricsRegistry; lazy_static! { static ref USERS: DeploymentHash = DeploymentHash::new("users").unwrap(); diff --git a/store/postgres/src/chain_head_listener.rs b/store/postgres/src/chain_head_listener.rs index e3b760ca790..97234545773 100644 --- a/store/postgres/src/chain_head_listener.rs +++ b/store/postgres/src/chain_head_listener.rs @@ -1,5 +1,6 @@ use graph::{ blockchain::ChainHeadUpdateStream, + components::metrics::MetricsRegistryTrait, prelude::{ futures03::{self, FutureExt}, tokio, StoreError, @@ -22,7 +23,7 @@ use graph::blockchain::ChainHeadUpdateListener as ChainHeadUpdateListenerTrait; use graph::prelude::serde::{Deserialize, Serialize}; use graph::prelude::serde_json::{self, json}; use graph::prelude::tokio::sync::{mpsc::Receiver, watch}; -use graph::prelude::{crit, debug, o, CheapClone, Logger, MetricsRegistry, ENV_VARS}; +use graph::prelude::{crit, debug, o, CheapClone, Logger, ENV_VARS}; lazy_static! { pub static ref CHANNEL_NAME: SafeChannelName = @@ -55,7 +56,7 @@ pub struct BlockIngestorMetrics { } impl BlockIngestorMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { Self { chain_head_number: registry .new_gauge_vec( @@ -96,7 +97,11 @@ pub(crate) struct ChainHeadUpdateSender { } impl ChainHeadUpdateListener { - pub fn new(logger: &Logger, registry: Arc, postgres_url: String) -> Self { + pub fn new( + logger: &Logger, + registry: Arc, + postgres_url: String, + ) -> Self { let logger = logger.new(o!("component" => "ChainHeadUpdateListener")); let ingestor_metrics = Arc::new(BlockIngestorMetrics::new(registry.clone())); let counter = registry diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index 6040ecc71e9..c08c63f0e68 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -7,6 +7,7 @@ use diesel::{ use diesel::{sql_query, RunQueryDsl}; use graph::cheap_clone::CheapClone; +use graph::components::metrics::MetricsRegistryTrait; use graph::constraint_violation; use graph::prelude::tokio; use graph::prelude::tokio::time::Instant; @@ -18,7 +19,7 @@ use graph::{ crit, debug, error, info, o, tokio::sync::Semaphore, CancelGuard, CancelHandle, CancelToken as _, CancelableError, Counter, Gauge, Logger, - MetricsRegistry, MovingStats, PoolWaitStats, StoreError, ENV_VARS, + MovingStats, PoolWaitStats, StoreError, ENV_VARS, }, util::security::SafeDisplay, }; @@ -309,7 +310,7 @@ impl ConnectionPool { pool_size: u32, fdw_pool_size: Option, logger: &Logger, - registry: Arc, + registry: Arc, coord: Arc, ) -> ConnectionPool { let state_tracker = PoolStateTracker::new(); @@ -592,7 +593,7 @@ struct EventHandler { impl EventHandler { fn new( logger: Logger, - registry: Arc, + registry: Arc, wait_stats: PoolWaitStats, const_labels: HashMap, state_tracker: PoolStateTracker, @@ -710,7 +711,7 @@ impl PoolInner { pool_size: u32, fdw_pool_size: Option, logger: &Logger, - registry: Arc, + registry: Arc, state_tracker: PoolStateTracker, ) -> PoolInner { let logger_store = logger.new(o!("component" => "Store")); @@ -1156,7 +1157,7 @@ impl PoolCoordinator { postgres_url: String, pool_size: u32, fdw_pool_size: Option, - registry: Arc, + registry: Arc, ) -> ConnectionPool { let is_writable = !pool_name.is_replica(); diff --git a/store/postgres/src/jobs.rs b/store/postgres/src/jobs.rs index a0d89a20b26..ae6203bed50 100644 --- a/store/postgres/src/jobs.rs +++ b/store/postgres/src/jobs.rs @@ -6,7 +6,8 @@ use std::time::{Duration, Instant}; use async_trait::async_trait; use diesel::{prelude::RunQueryDsl, sql_query, sql_types::Double}; -use graph::prelude::{error, Logger, MetricsRegistry, StoreError, ENV_VARS}; +use graph::components::metrics::MetricsRegistryTrait; +use graph::prelude::{error, Logger, StoreError, ENV_VARS}; use graph::prometheus::Gauge; use graph::util::jobs::{Job, Runner}; @@ -17,7 +18,7 @@ pub fn register( runner: &mut Runner, store: Arc, primary_pool: ConnectionPool, - registry: Arc, + registry: Arc, ) { runner.register( Arc::new(VacuumDeploymentsJob::new(store.subgraph_store())), @@ -79,7 +80,7 @@ struct NotificationQueueUsage { } impl NotificationQueueUsage { - fn new(primary: ConnectionPool, registry: Arc) -> Self { + fn new(primary: ConnectionPool, registry: Arc) -> Self { let usage_gauge = registry .new_gauge( "notification_queue_usage", diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 79530c47bd8..af9877cb671 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -1,6 +1,7 @@ use diesel::pg::PgConnection; use diesel::select; use diesel::sql_types::Text; +use graph::components::metrics::MetricsRegistryTrait; use graph::prelude::tokio::sync::mpsc::error::SendTimeoutError; use graph::util::backoff::ExponentialBackoff; use lazy_static::lazy_static; @@ -396,7 +397,7 @@ pub struct NotificationSender { } impl NotificationSender { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let sent_counter = registry .global_counter_vec( "notification_queue_sent", diff --git a/store/postgres/src/store_events.rs b/store/postgres/src/store_events.rs index d60474fd1eb..f6d08675e2c 100644 --- a/store/postgres/src/store_events.rs +++ b/store/postgres/src/store_events.rs @@ -1,4 +1,5 @@ use futures03::TryStreamExt; +use graph::components::metrics::MetricsRegistryTrait; use graph::parking_lot::Mutex; use graph::tokio_stream::wrappers::ReceiverStream; use std::collections::BTreeSet; @@ -21,7 +22,7 @@ impl StoreEventListener { pub fn new( logger: Logger, postgres_url: String, - registry: Arc, + registry: Arc, ) -> (Self, Box + Send>) { let channel = SafeChannelName::i_promise_this_is_safe("store_events"); let (notification_listener, receiver) = @@ -132,7 +133,11 @@ pub struct SubscriptionManager { } impl SubscriptionManager { - pub fn new(logger: Logger, postgres_url: String, registry: Arc) -> Self { + pub fn new( + logger: Logger, + postgres_url: String, + registry: Arc, + ) -> Self { let (listener, store_events) = StoreEventListener::new(logger, postgres_url, registry); let mut manager = SubscriptionManager { diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index f1c635b57fb..70f4d0fb722 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -14,6 +14,7 @@ use std::{iter::FromIterator, time::Duration}; use graph::{ cheap_clone::CheapClone, components::{ + metrics::MetricsRegistryTrait, server::index_node::VersionInfo, store::{ self, BlockStore, DeploymentLocator, DeploymentSchemaVersion, @@ -27,8 +28,8 @@ use graph::{ prelude::{ anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiSchema, ApiVersion, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, - MetricsRegistry, NodeId, PartialBlockPtr, Schema, StoreError, SubgraphDeploymentEntity, - SubgraphName, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, + NodeId, PartialBlockPtr, Schema, StoreError, SubgraphDeploymentEntity, SubgraphName, + SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }, url::Url, util::timed_cache::TimedCache, @@ -218,7 +219,7 @@ impl SubgraphStore { placer: Arc, sender: Arc, fork_base: Option, - registry: Arc, + registry: Arc, ) -> Self { Self { inner: Arc::new(SubgraphStoreInner::new( @@ -274,7 +275,7 @@ pub struct SubgraphStoreInner { placer: Arc, sender: Arc, writables: Mutex>>, - registry: Arc, + registry: Arc, } impl SubgraphStoreInner { @@ -297,7 +298,7 @@ impl SubgraphStoreInner { stores: Vec<(Shard, ConnectionPool, Vec, Vec)>, placer: Arc, sender: Arc, - registry: Arc, + registry: Arc, ) -> Self { let mirror = { let pools = HashMap::from_iter( diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 4d244ac56b5..0820f07df37 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -5,13 +5,13 @@ use std::time::Duration; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; +use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::ReadStore; use graph::components::store::{DeploymentCursorTracker, EntityKey}; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ - BlockNumber, Entity, MetricsRegistry, Schema, SubgraphDeploymentEntity, SubgraphStore as _, - BLOCK_NUMBER_MAX, + BlockNumber, Entity, Schema, SubgraphDeploymentEntity, SubgraphStore as _, BLOCK_NUMBER_MAX, }; use graph::slog::info; use graph::util::bounded_queue::BoundedQueue; @@ -586,7 +586,7 @@ impl Queue { logger: Logger, store: Arc, capacity: usize, - registry: Arc, + registry: Arc, ) -> Arc { async fn start_writer(queue: Arc, logger: Logger) { loop { @@ -855,7 +855,7 @@ impl Writer { logger: Logger, store: Arc, capacity: usize, - registry: Arc, + registry: Arc, ) -> Self { info!(logger, "Starting subgraph writer"; "queue_size" => capacity); if capacity == 0 { @@ -987,7 +987,7 @@ impl WritableStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, - registry: Arc, + registry: Arc, ) -> Result { let store = Arc::new(SyncStore::new(subgraph_store, logger.clone(), site)?); let block_ptr = Mutex::new(store.block_ptr().await?); diff --git a/store/postgres/tests/relational.rs b/store/postgres/tests/relational.rs index fe56b32bdb9..ca68338933e 100644 --- a/store/postgres/tests/relational.rs +++ b/store/postgres/tests/relational.rs @@ -4,12 +4,11 @@ use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::entity; -use graph::prelude::BlockNumber; use graph::prelude::{ o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, EntityOrder, EntityQuery, Logger, Schema, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; -use graph_core::MetricsRegistry; +use graph::prelude::{BlockNumber, MetricsRegistry}; use graph_store_postgres::layout_for_tests::set_account_like; use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; diff --git a/store/postgres/tests/relational_bytes.rs b/store/postgres/tests/relational_bytes.rs index 54a8e179011..e28314826d1 100644 --- a/store/postgres/tests/relational_bytes.rs +++ b/store/postgres/tests/relational_bytes.rs @@ -4,8 +4,7 @@ use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data_source::CausalityRegion; -use graph::prelude::EntityQuery; -use graph_core::MetricsRegistry; +use graph::prelude::{EntityQuery, MetricsRegistry}; use hex_literal::hex; use lazy_static::lazy_static; use std::borrow::Cow; diff --git a/store/postgres/tests/store.rs b/store/postgres/tests/store.rs index f24a38528e5..abbef4c3b3b 100644 --- a/store/postgres/tests/store.rs +++ b/store/postgres/tests/store.rs @@ -3,7 +3,6 @@ use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; use graph_chain_ethereum::{Mapping, MappingABI}; -use graph_core::MetricsRegistry; use hex_literal::hex; use lazy_static::lazy_static; use std::time::Duration; diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 3ed2b0f2249..9d421f4268b 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -13,7 +13,6 @@ use graph::{ components::store::EntityType, components::store::StatusStore, components::store::StoredDynamicDataSource, data::subgraph::status, prelude::NodeId, }; -use graph_core::MetricsRegistry; use graph_graphql::prelude::{ execute_query, Query as PreparedQuery, QueryExecutionOptions, StoreResolver, }; diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index c3f3c79ab30..645e43fd457 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -13,14 +13,15 @@ use graph::endpoint::EndpointMetrics; use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::web3::types::{Address, Log, Transaction, H160}; -use graph::prelude::{ethabi, tiny_keccak, LightEthereumBlock, LoggerFactory, NodeId}; +use graph::prelude::{ + ethabi, tiny_keccak, LightEthereumBlock, LoggerFactory, MetricsRegistry, NodeId, +}; use graph::{blockchain::block_stream::BlockWithTriggers, prelude::ethabi::ethereum_types::U64}; use graph_chain_ethereum::{ chain::BlockFinality, trigger::{EthereumBlockTriggerType, EthereumTrigger}, }; use graph_chain_ethereum::{Chain, ENV_VARS}; -use graph_core::MetricsRegistry; pub async fn chain( blocks: Vec>, diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 8ce95e15687..206c074b078 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -16,6 +16,7 @@ use graph::blockchain::{ TriggersAdapter, TriggersAdapterSelector, }; use graph::cheap_clone::CheapClone; +use graph::components::metrics::{MetricsRegistry, MetricsRegistryTrait}; use graph::components::store::{BlockStore, DeploymentLocator}; use graph::data::graphql::effort::LoadManager; use graph::data::query::{Query, QueryTarget}; @@ -26,14 +27,14 @@ use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::serde_json::{self, json}; use graph::prelude::{ async_trait, r, ApiVersion, BigInt, BlockNumber, DeploymentHash, GraphQlRunner as _, - LoggerFactory, MetricsRegistry as MetricsRegistryTrait, NodeId, QueryError, - SubgraphAssignmentProvider, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, - SubgraphStore as _, SubgraphVersionSwitchingMode, TriggerProcessor, + LoggerFactory, NodeId, QueryError, SubgraphAssignmentProvider, SubgraphCountMetric, + SubgraphName, SubgraphRegistrar, SubgraphStore as _, SubgraphVersionSwitchingMode, + TriggerProcessor, }; use graph::slog::crit; use graph_core::polling_monitor::ipfs_service; use graph_core::{ - LinkResolver, MetricsRegistry, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, + LinkResolver, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, SubgraphTriggerProcessor, }; use graph_node::manager::PanicSubscriptionManager; From d58c9294433364afa7c7cea9fdaeff55e7f19ebe Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 15 Mar 2023 15:56:35 -0700 Subject: [PATCH 0021/2104] store: Make how often jobs run a bit more readable --- store/postgres/src/jobs.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/store/postgres/src/jobs.rs b/store/postgres/src/jobs.rs index ae6203bed50..483150ff208 100644 --- a/store/postgres/src/jobs.rs +++ b/store/postgres/src/jobs.rs @@ -20,25 +20,28 @@ pub fn register( primary_pool: ConnectionPool, registry: Arc, ) { + const ONE_MINUTE: Duration = Duration::from_secs(60); + const ONE_HOUR: Duration = Duration::from_secs(60 * 60); + runner.register( Arc::new(VacuumDeploymentsJob::new(store.subgraph_store())), - Duration::from_secs(60), + ONE_MINUTE, ); runner.register( Arc::new(NotificationQueueUsage::new(primary_pool, registry)), - Duration::from_secs(60), + ONE_MINUTE, ); runner.register( Arc::new(MirrorPrimary::new(store.subgraph_store())), - Duration::from_secs(15 * 60), + 15 * ONE_MINUTE, ); // Remove unused deployments every 2 hours runner.register( Arc::new(UnusedJob::new(store.subgraph_store())), - Duration::from_secs(2 * 60 * 60), + 2 * ONE_HOUR, ) } From fb866868de614644c07524784daee03fc2df4463 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 15 Mar 2023 16:00:07 -0700 Subject: [PATCH 0022/2104] store: Add a job to refresh materialized views --- NEWS.md | 7 +++++-- store/postgres/src/deployment_store.rs | 29 ++++++++++++++++++++++++++ store/postgres/src/jobs.rs | 28 ++++++++++++++++++++++++- store/postgres/src/subgraph_store.rs | 9 ++++++++ 4 files changed, 70 insertions(+), 3 deletions(-) diff --git a/NEWS.md b/NEWS.md index bec220e7112..7dcbe2051d8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,6 +2,9 @@ ## Unreleased +- the materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) + + ## v0.30.0 ### Database locale change @@ -87,7 +90,7 @@ Dependency upgrades: - `Qmccst5mbV5a6vT6VvJMLPKMAA1VRgT6NGbxkLL8eDRsE7` - `Qmd9nZKCH8UZU1pBzk7G8ECJr3jX3a2vAf3vowuTwFvrQg` - + Here's an example [manifest](https://ipfs.io/ipfs/Qmd9nZKCH8UZU1pBzk7G8ECJr3jX3a2vAf3vowuTwFvrQg), taking a look at the data sources of name `ERC721` and `CryptoKitties`, both listen to the `Transfer(...)` event. Considering a block where there's only one occurence of this event, `graph-node` would duplicate it and call `handleTransfer` twice. Now this is fixed and it will be called only once per event/call that happened on chain. In the case you're indexing one of the impacted subgraphs, you should first upgrade the `graph-node` version, then rewind the affected subgraphs to the smallest `startBlock` of their subgraph manifest. To achieve that the `graphman rewind` CLI command can be used. @@ -95,7 +98,7 @@ Dependency upgrades: See [#4055](https://github.com/graphprotocol/graph-node/pull/4055) for more information. * This release fixes another determinism bug that affects a handful of subgraphs. The bug affects all subgraphs which have an `apiVersion` **older than** 0.0.5 using call handlers. While call handlers prior to 0.0.5 should be triggered by both failed and successful transactions, in some cases failed transactions would not trigger the handlers. This resulted in nondeterministic behavior. With this version of `graph-node`, call handlers with an `apiVersion` older than 0.0.5 will always be triggered by both successful and failed transactions. Behavior for `apiVersion` 0.0.5 onward is not affected. - + The affected subgraphs are: - `QmNY7gDNXHECV8SXoEY7hbfg4BX1aDMxTBDiFuG4huaSGA` diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 782b81831ae..f18fbeb71d8 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1658,6 +1658,35 @@ impl DeploymentStore { }); } + pub(crate) async fn refresh_materialized_views(&self, logger: &Logger) { + async fn run(store: &DeploymentStore) -> Result<(), StoreError> { + // We hardcode our materialized views, but could also use + // pg_matviews to list all of them, though that might inadvertently + // refresh materialized views that operators created themselves + const VIEWS: [&str; 3] = [ + "info.table_sizes", + "info.subgraph_sizes", + "info.chain_sizes", + ]; + store + .with_conn(|conn, cancel| { + for view in VIEWS { + let query = format!("refresh materialized view {}", view); + diesel::sql_query(&query).execute(conn)?; + cancel.check_cancel()?; + } + Ok(()) + }) + .await + } + + run(self).await.unwrap_or_else(|e| { + warn!(logger, "Refreshing materialized views failed. We will try again in a few hours"; + "error" => e.to_string(), + "shard" => self.pool.shard.as_str()) + }); + } + pub(crate) async fn health( &self, site: &Site, diff --git a/store/postgres/src/jobs.rs b/store/postgres/src/jobs.rs index 483150ff208..43927dd6c76 100644 --- a/store/postgres/src/jobs.rs +++ b/store/postgres/src/jobs.rs @@ -42,7 +42,12 @@ pub fn register( runner.register( Arc::new(UnusedJob::new(store.subgraph_store())), 2 * ONE_HOUR, - ) + ); + + runner.register( + Arc::new(RefreshMaterializedView::new(store.subgraph_store())), + 6 * ONE_HOUR, + ); } /// A job that vacuums `subgraphs.subgraph_deployment`. With a large number @@ -152,6 +157,27 @@ impl Job for MirrorPrimary { } } +struct RefreshMaterializedView { + store: Arc, +} + +impl RefreshMaterializedView { + fn new(store: Arc) -> Self { + Self { store } + } +} + +#[async_trait] +impl Job for RefreshMaterializedView { + fn name(&self) -> &str { + "Refresh materialized views" + } + + async fn run(&self, logger: &Logger) { + self.store.refresh_materialized_views(logger).await; + } +} + struct UnusedJob { store: Arc, } diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 70f4d0fb722..dde1b9a174c 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1029,6 +1029,15 @@ impl SubgraphStoreInner { .await; } + pub async fn refresh_materialized_views(&self, logger: &Logger) { + join_all( + self.stores + .values() + .map(|store| store.refresh_materialized_views(logger)), + ) + .await; + } + pub fn analyze( &self, deployment: &DeploymentLocator, From 6315dc7877f1d933f9ebb9b1e88e65dc81b75a23 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 17 Mar 2023 06:05:02 -0700 Subject: [PATCH 0023/2104] NEWS.md: Add a note about filtering fulltext searches (#4464) --- NEWS.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/NEWS.md b/NEWS.md index 7dcbe2051d8..215de1f71d5 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,9 @@ - the materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) +### Fixes +- fulltext searches now support additional `where` filters making it possible to narrow the results of a fulltext search with other criteria [#1823](https://github.com/graphprotocol/graph-node/issues/1823) + ## v0.30.0 From 97e607db8cb35c0abeda6995ab578b20fd4cb86a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Mar 2023 14:12:00 +0100 Subject: [PATCH 0024/2104] build(deps): bump chrono from 0.4.23 to 0.4.24 (#4450) Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.23 to 0.4.24. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.23...v0.4.24) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e6a9794fbd9..925dc1df164 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -482,9 +482,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 7d2f4c82f79..3856c5a9e8c 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -13,7 +13,7 @@ bytes = "1.0.1" cid = "0.10.1" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2", "chrono"] } diesel_derives = "1.4" -chrono = "0.4.23" +chrono = "0.4.24" envconfig = "0.10.0" Inflector = "0.11.3" isatty = "0.1.9" From f4fa0960c4a45447b50c22066518d585cddf29bd Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Fri, 17 Mar 2023 15:09:14 +0100 Subject: [PATCH 0025/2104] store, node: instrument `RecentBlocksCache` with metrics (#4440) * store, node: instrument RecentBlocksCache with metrics * store: small RecentBlocksCache code quality improvs. --- node/src/bin/manager.rs | 3 +- node/src/store_builder.rs | 8 +- store/postgres/src/block_store.rs | 9 +- store/postgres/src/chain_store.rs | 137 ++++++++++++++++++++++++++++-- store/postgres/src/lib.rs | 2 +- 5 files changed, 148 insertions(+), 11 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 3362e4b4d98..70602a5f2e9 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -820,7 +820,7 @@ impl Context { &self.node_id, &self.config, self.fork_base, - self.registry, + self.registry.clone(), ); for pool in pools.values() { @@ -833,6 +833,7 @@ impl Context { subgraph_store, HashMap::default(), vec![], + self.registry, ); (store, pools) diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index 8be33964437..a592d7c3ee3 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -15,7 +15,7 @@ use graph_store_postgres::connection_pool::{ }; use graph_store_postgres::{ BlockStore as DieselBlockStore, ChainHeadUpdateListener as PostgresChainHeadUpdateListener, - NotificationSender, Shard as ShardName, Store as DieselStore, SubgraphStore, + ChainStoreMetrics, NotificationSender, Shard as ShardName, Store as DieselStore, SubgraphStore, SubscriptionManager, PRIMARY_SHARD, }; @@ -30,6 +30,7 @@ pub struct StoreBuilder { /// Map network names to the shards where they are/should be stored chains: HashMap, pub coord: Arc, + registry: Arc, } impl StoreBuilder { @@ -85,6 +86,7 @@ impl StoreBuilder { chain_head_update_listener, chains, coord, + registry, } } @@ -166,6 +168,7 @@ impl StoreBuilder { subgraph_store: Arc, chains: HashMap, networks: Vec<(String, Vec)>, + registry: Arc, ) -> Arc { let networks = networks .into_iter() @@ -177,12 +180,14 @@ impl StoreBuilder { let logger = logger.new(o!("component" => "BlockStore")); + let chain_store_metrics = Arc::new(ChainStoreMetrics::new(registry)); let block_store = Arc::new( DieselBlockStore::new( logger, networks, pools, subgraph_store.notification_sender(), + chain_store_metrics, ) .expect("Creating the BlockStore works"), ); @@ -283,6 +288,7 @@ impl StoreBuilder { self.subgraph_store, self.chains, networks, + self.registry, ) } diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index 79bc4db40f7..17962a91095 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -20,8 +20,9 @@ use graph::{ }; use crate::{ - chain_head_listener::ChainHeadUpdateSender, connection_pool::ConnectionPool, - primary::Mirror as PrimaryMirror, ChainStore, NotificationSender, Shard, PRIMARY_SHARD, + chain_head_listener::ChainHeadUpdateSender, chain_store::ChainStoreMetrics, + connection_pool::ConnectionPool, primary::Mirror as PrimaryMirror, ChainStore, + NotificationSender, Shard, PRIMARY_SHARD, }; #[cfg(debug_assertions)] @@ -183,6 +184,7 @@ pub struct BlockStore { sender: Arc, mirror: PrimaryMirror, chain_head_cache: TimedCache>, + chain_store_metrics: Arc, } impl BlockStore { @@ -204,6 +206,7 @@ impl BlockStore { // shard -> pool pools: HashMap, sender: Arc, + chain_store_metrics: Arc, ) -> Result { // Cache chain head pointers for this long when returning // information from `chain_head_pointers` @@ -220,6 +223,7 @@ impl BlockStore { sender, mirror, chain_head_cache, + chain_store_metrics, }; fn reduce_idents( @@ -373,6 +377,7 @@ impl BlockStore { sender, pool, ENV_VARS.store.recent_blocks_cache_capacity, + self.chain_store_metrics.clone(), ); if create { store.create(&ident)?; diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index c6f423a6dfa..b7835fc4589 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -3,7 +3,9 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use diesel::sql_types::Text; use diesel::{insert_into, update}; +use graph::components::metrics::MetricsRegistryTrait; use graph::parking_lot::RwLock; +use graph::prometheus::{CounterVec, GaugeVec}; use std::{ collections::HashMap, @@ -1322,6 +1324,90 @@ from ( } } +#[derive(Debug)] +pub struct ChainStoreMetrics { + chain_head_cache_size: Box, + chain_head_cache_oldest_block_num: Box, + chain_head_cache_latest_block_num: Box, + chain_head_cache_hits: Box, + chain_head_cache_misses: Box, +} + +impl ChainStoreMetrics { + pub fn new(registry: Arc) -> Self { + let chain_head_cache_size = registry + .new_gauge_vec( + "chain_head_cache_num_blocks", + "Number of blocks in the chain head cache", + vec!["network".to_string()], + ) + .expect("Can't register the gauge"); + let chain_head_cache_oldest_block_num = registry + .new_gauge_vec( + "chain_head_cache_oldest_block", + "Block number of the oldest block currently present in the chain head cache", + vec!["network".to_string()], + ) + .expect("Can't register the gauge"); + let chain_head_cache_latest_block_num = registry + .new_gauge_vec( + "chain_head_cache_latest_block", + "Block number of the latest block currently present in the chain head cache", + vec!["network".to_string()], + ) + .expect("Can't register the gauge"); + + let chain_head_cache_hits = registry + .new_counter_vec( + "chain_head_cache_hits", + "Number of times the chain head cache was hit", + vec!["network".to_string()], + ) + .expect("Can't register the counter"); + let chain_head_cache_misses = registry + .new_counter_vec( + "chain_head_cache_misses", + "Number of times the chain head cache was missed", + vec!["network".to_string()], + ) + .expect("Can't register the counter"); + + Self { + chain_head_cache_size, + chain_head_cache_oldest_block_num, + chain_head_cache_latest_block_num, + chain_head_cache_hits, + chain_head_cache_misses, + } + } + + pub fn add_block(&self, network: &str) { + self.chain_head_cache_size + .with_label_values(&[network]) + .inc(); + } + + pub fn remove_block(&self, network: &str) { + self.chain_head_cache_size + .with_label_values(&[network]) + .dec(); + } + + pub fn record_cache_hit(&self, network: &str) { + self.chain_head_cache_hits + .get_metric_with_label_values(&[network]) + .unwrap() + .inc(); + } + + pub fn record_cache_miss(&self, network: &str) { + self.chain_head_cache_misses + .get_metric_with_label_values(&[network]) + .unwrap() + .inc(); + } +} + pub struct ChainStore { pool: ConnectionPool, pub chain: String, @@ -1347,7 +1433,10 @@ impl ChainStore { chain_head_update_sender: ChainHeadUpdateSender, pool: ConnectionPool, recent_blocks_cache_capacity: usize, + metrics: Arc, ) -> Self { + let recent_blocks_cache = + RecentBlocksCache::new(recent_blocks_cache_capacity, chain.clone(), metrics); ChainStore { pool, chain, @@ -1355,7 +1444,7 @@ impl ChainStore { genesis_block_ptr: BlockPtr::new(net_identifier.genesis_block_hash.clone(), 0), status, chain_head_update_sender, - recent_blocks_cache: RecentBlocksCache::new(recent_blocks_cache_capacity), + recent_blocks_cache, } } @@ -1832,6 +1921,8 @@ mod recent_blocks_cache { } struct Inner { + network: String, + metrics: Arc, // Note: we only ever store blocks in this cache that have a continuous // line of ancestry between each other. Line of ancestry is verified by // comparing parent hashes. Because of NEAR, however, we cannot @@ -1877,6 +1968,26 @@ mod recent_blocks_cache { } } + fn update_write_metrics(&self) { + self.metrics + .chain_head_cache_size + .get_metric_with_label_values(&[&self.network]) + .unwrap() + .set(self.blocks.len() as f64); + + self.metrics + .chain_head_cache_oldest_block_num + .get_metric_with_label_values(&[&self.network]) + .unwrap() + .set(self.earliest_block().map(|b| b.ptr.number).unwrap_or(0) as f64); + + self.metrics + .chain_head_cache_latest_block_num + .get_metric_with_label_values(&[&self.network]) + .unwrap() + .set(self.chain_head().map(|b| b.number).unwrap_or(0) as f64); + } + fn insert_block( &mut self, ptr: BlockPtr, @@ -1936,9 +2047,11 @@ mod recent_blocks_cache { } impl RecentBlocksCache { - pub fn new(capacity: usize) -> Self { + pub fn new(capacity: usize, network: String, metrics: Arc) -> Self { RecentBlocksCache { inner: RwLock::new(Inner { + network, + metrics, blocks: BTreeMap::new(), capacity, }), @@ -1951,7 +2064,8 @@ mod recent_blocks_cache { } pub fn clear(&self) { - self.inner.write().blocks.clear() + self.inner.write().blocks.clear(); + self.inner.read().update_write_metrics(); } pub fn get_block( @@ -1959,10 +2073,20 @@ mod recent_blocks_cache { child: &BlockPtr, offset: BlockNumber, ) -> Option<(BlockPtr, Option)> { - self.inner + let block_opt = self + .inner .read() .get_block(child, offset) - .map(|b| (b.0.clone(), b.1.cloned())) + .map(|b| (b.0.clone(), b.1.cloned())); + + let inner = self.inner.read(); + if block_opt.is_some() { + inner.metrics.record_cache_hit(&inner.network); + } else { + inner.metrics.record_cache_miss(&inner.network); + } + + block_opt } /// Tentatively caches the `ancestor` of a [`BlockPtr`] (`child`), together with @@ -1975,7 +2099,8 @@ mod recent_blocks_cache { data: Option, parent_hash: BlockHash, ) { - self.inner.write().insert_block(ptr, data, parent_hash) + self.inner.write().insert_block(ptr, data, parent_hash); + self.inner.read().update_write_metrics(); } } } diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index cad392e3cfc..73b081e8ca9 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -55,7 +55,7 @@ pub mod layout_for_tests { pub use self::block_store::BlockStore; pub use self::chain_head_listener::ChainHeadUpdateListener; -pub use self::chain_store::ChainStore; +pub use self::chain_store::{ChainStore, ChainStoreMetrics}; pub use self::detail::DeploymentDetail; pub use self::jobs::register as register_jobs; pub use self::notification_listener::NotificationSender; From 06a73e707fe5481b1de7fdfb544f2eb9e4a7cde7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Mar 2023 15:46:06 +0100 Subject: [PATCH 0026/2104] build(deps): bump crossbeam-channel from 0.5.5 to 0.5.7 (#4428) Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.5 to 0.5.7. - [Release notes](https://github.com/crossbeam-rs/crossbeam/releases) - [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md) - [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.5...crossbeam-channel-0.5.7) --- updated-dependencies: - dependency-name: crossbeam-channel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- node/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 925dc1df164..2f6469d393a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -788,9 +788,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", diff --git a/node/Cargo.toml b/node/Cargo.toml index 8ff16764655..ff2a2213112 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -20,7 +20,7 @@ graphql-parser = "0.4.0" futures = { version = "0.3.1", features = ["compat"] } lazy_static = "1.2.0" url = "2.3.1" -crossbeam-channel = "0.5.5" +crossbeam-channel = "0.5.7" graph = { path = "../graph" } graph-core = { path = "../core" } graph-chain-arweave = { path = "../chain/arweave" } From db605f7fa7ca8438f21524f66287bb0b22607682 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Fri, 17 Mar 2023 18:18:37 +0100 Subject: [PATCH 0027/2104] graph: make MetricsRegistry !Clone (#4472) --- graph/src/components/metrics/registry.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index 9e108d84f52..abb469f7304 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -11,7 +11,6 @@ use crate::slog::{self, error, o, Logger}; use super::MetricsRegistryTrait; -#[derive(Clone)] pub struct MetricsRegistry { logger: Logger, registry: Arc, @@ -21,11 +20,11 @@ pub struct MetricsRegistry { /// Global metrics are lazily initialized and identified by /// the `Desc.id` that hashes the name and const label values - global_counters: Arc>>, - global_counter_vecs: Arc>>, - global_gauges: Arc>>, - global_gauge_vecs: Arc>>, - global_histogram_vecs: Arc>>, + global_counters: RwLock>, + global_counter_vecs: RwLock>, + global_gauges: RwLock>, + global_gauge_vecs: RwLock>, + global_histogram_vecs: RwLock>, } impl MetricsRegistry { @@ -41,11 +40,11 @@ impl MetricsRegistry { register_errors, unregister_errors, registered_metrics, - global_counters: Arc::new(RwLock::new(HashMap::new())), - global_counter_vecs: Arc::new(RwLock::new(HashMap::new())), - global_gauges: Arc::new(RwLock::new(HashMap::new())), - global_gauge_vecs: Arc::new(RwLock::new(HashMap::new())), - global_histogram_vecs: Arc::new(RwLock::new(HashMap::new())), + global_counters: RwLock::new(HashMap::new()), + global_counter_vecs: RwLock::new(HashMap::new()), + global_gauges: RwLock::new(HashMap::new()), + global_gauge_vecs: RwLock::new(HashMap::new()), + global_histogram_vecs: RwLock::new(HashMap::new()), } } From 2eb8e06211b93a719b97b7d35c64b12de987e1e9 Mon Sep 17 00:00:00 2001 From: Yaro Shkvorets Date: Fri, 17 Mar 2023 14:17:23 -0400 Subject: [PATCH 0028/2104] graph: fix substreams `startBlock` selection (#4463) * graph: fix substreams initial_block selection Closes #4449 --- chain/substreams/src/data_source.rs | 92 ++++++++++++++++++++++++----- 1 file changed, 78 insertions(+), 14 deletions(-) diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index 9e3389189ef..b3006c7b463 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -172,11 +172,32 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { let package = graph::substreams::Package::decode(content.as_ref())?; - let initial_block: Option = match package.modules { - Some(ref modules) => modules.modules.iter().map(|x| x.initial_block).min(), + let module = match package.modules { + Some(ref modules) => modules + .modules + .iter() + .find(|module| module.name == self.source.package.module_name), None => None, }; + let initial_block: Option = match module { + Some(module) => match &module.kind { + Some(graph::substreams::module::Kind::KindMap(_)) => Some(module.initial_block), + _ => { + return Err(anyhow!( + "Substreams module {} must be of 'map' kind", + module.name + )) + } + }, + None => { + return Err(anyhow!( + "Substreams module {} does not exist", + self.source.package.module_name + )) + } + }; + let initial_block: Option = initial_block .map_or(Ok(None), |x: u64| TryInto::::try_into(x).map(Some)) .map_err(anyhow::Error::from)?; @@ -259,18 +280,13 @@ mod test { components::link_resolver::LinkResolver, prelude::{async_trait, serde_yaml, JsonValueStream, Link}, slog::{o, Discard, Logger}, + substreams::module::{Kind, KindMap, KindStore}, + substreams::{Module, Modules, Package}, }; + use prost::Message; use crate::{DataSource, Mapping, UnresolvedDataSource, UnresolvedMapping, SUBSTREAMS_KIND}; - const EMPTY_PACKAGE: graph::substreams::Package = graph::substreams::Package { - proto_files: vec![], - version: 0, - modules: None, - module_meta: vec![], - package_meta: vec![], - }; - #[test] fn parse_data_source() { let ds: UnresolvedDataSource = serde_yaml::from_str(TEMPLATE_DATA_SOURCE).unwrap(); @@ -306,14 +322,14 @@ mod test { name: "Uniswap".into(), source: crate::Source { module_name: "output".into(), - package: EMPTY_PACKAGE, + package: gen_package(), }, mapping: Mapping { api_version: semver::Version::from_str("0.0.7").unwrap(), kind: "substreams/graph-entities".into(), }, context: Arc::new(None), - initial_block: None, + initial_block: Some(123), }; assert_eq!(ds, expected); } @@ -340,6 +356,54 @@ mod test { ); } + fn gen_package() -> Package { + Package { + proto_files: vec![], + version: 0, + modules: Some(Modules { + modules: vec![ + Module { + name: "output".into(), + initial_block: 123, + binary_entrypoint: "output".into(), + binary_index: 0, + kind: Some(Kind::KindMap(KindMap { + output_type: "proto".into(), + })), + inputs: vec![], + output: None, + }, + Module { + name: "store_mod".into(), + initial_block: 0, + binary_entrypoint: "store_mod".into(), + binary_index: 0, + kind: Some(Kind::KindStore(KindStore { + update_policy: 1, + value_type: "proto1".into(), + })), + inputs: vec![], + output: None, + }, + Module { + name: "map_mod".into(), + initial_block: 123456, + binary_entrypoint: "other2".into(), + binary_index: 0, + kind: Some(Kind::KindMap(KindMap { + output_type: "proto2".into(), + })), + inputs: vec![], + output: None, + }, + ], + binaries: vec![], + }), + module_meta: vec![], + package_meta: vec![], + } + } + fn gen_data_source() -> DataSource { DataSource { kind: SUBSTREAMS_KIND.into(), @@ -347,7 +411,7 @@ mod test { name: "Uniswap".into(), source: crate::Source { module_name: "".to_string(), - package: EMPTY_PACKAGE, + package: gen_package(), }, mapping: Mapping { api_version: semver::Version::from_str("0.0.7").unwrap(), @@ -387,7 +451,7 @@ mod test { } async fn cat(&self, _logger: &Logger, _link: &Link) -> Result, Error> { - Ok(vec![]) + Ok(gen_package().encode_to_vec()) } async fn get_block(&self, _logger: &Logger, _link: &Link) -> Result, Error> { From 87314e2088bf93a7ea16553500df7e80ef40b7a1 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Fri, 17 Mar 2023 20:53:27 +0100 Subject: [PATCH 0029/2104] graphql: remove unused lazy statics (#4473) --- graphql/src/store/prefetch.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 923c12ffb09..7ee678e7d00 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -8,7 +8,6 @@ use graph::data::value::{Object, Word}; use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; use graph::util::cache_weight; -use lazy_static::lazy_static; use std::collections::BTreeMap; use std::rc::Rc; use std::time::Instant; @@ -29,11 +28,7 @@ use crate::schema::ast as sast; use crate::store::query::build_query; use crate::store::StoreResolver; -lazy_static! { - static ref ARG_FIRST: String = String::from("first"); - static ref ARG_SKIP: String = String::from("skip"); - static ref ARG_ID: String = String::from("id"); -} +pub const ARG_ID: &str = "id"; /// Intermediate data structure to hold the results of prefetching entities /// and their nested associations. For each association of `entity`, `children` @@ -685,7 +680,7 @@ fn fetch( } query.logger = Some(ctx.logger.cheap_clone()); - if let Some(r::Value::String(id)) = field.argument_value(ARG_ID.as_str()) { + if let Some(r::Value::String(id)) = field.argument_value(ARG_ID) { query.filter = Some( EntityFilter::Equal(ARG_ID.to_owned(), StoreValue::from(id.clone())) .and_maybe(query.filter), From c2ba7878c545104de7bab62ec655ce08bad87242 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 16 Mar 2023 11:42:24 -0700 Subject: [PATCH 0030/2104] graph: Remove unneeded From implementations --- graph/src/data/store/mod.rs | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 534bc5dec6c..3d1b38a095c 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -14,10 +14,7 @@ use std::convert::TryFrom; use std::fmt; use std::iter::FromIterator; use std::str::FromStr; -use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap}, -}; +use std::{borrow::Cow, collections::HashMap}; use strum::AsStaticRef as _; use strum_macros::AsStaticStr; @@ -822,18 +819,6 @@ impl Entity { } } -impl From for BTreeMap { - fn from(entity: Entity) -> BTreeMap { - entity.0.into_iter().map(|(k, v)| (k, v.into())).collect() - } -} - -impl From for q::Value { - fn from(entity: Entity) -> q::Value { - q::Value::Object(entity.into()) - } -} - impl From> for Entity { fn from(m: HashMap) -> Entity { Entity(m) From 6793dc28930a5fc90da41231a2bcffed3a1653d5 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Mon, 20 Mar 2023 11:09:32 +0100 Subject: [PATCH 0031/2104] github: use GitHub issue forms (#4469) * github: new issue forms * github: delete old .md issue template --- .github/ISSUE_TEMPLATE.md | 7 ---- .github/ISSUE_TEMPLATE/bug.yml | 55 ++++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature.yml | 26 ++++++++++++++ 3 files changed, 81 insertions(+), 7 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/bug.yml create mode 100644 .github/ISSUE_TEMPLATE/feature.yml diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index cb67232349c..00000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,7 +0,0 @@ -**Do you want to request a *feature* or report a *bug*?** - -**What is the current behavior?** - -**If the current behavior is a bug, please provide the steps to reproduce and if possible a minimal demo of the problem.** - -**What is the expected behavior?** diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 00000000000..4e024149821 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,55 @@ +name: Bug report +description: Use this issue template if something is not working the way it should be. +title: "[Bug] " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: bug-report + attributes: + label: Bug report + description: Please provide a detailed overview of the expected behavior, and what happens instead. The more details, the better. You can use Markdown. + render: Markdown + - type: textarea + id: graph-node-logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output (either graph-node or hosted service logs). This will be automatically formatted into code, so no need for backticks. Leave black if it doesn't apply. + render: Shell + - type: markdown + attributes: + value: Does this bug affect a specific subgraph deployment? If not, leave the following blank. + - type: input + attributes: + label: IPFS hash + placeholder: e.g. QmST8VZnjHrwhrW5gTyaiWJDhVcx6TooRv85B49zG7ziLH + validations: + required: false + - type: input + attributes: + label: Subgraph name or link to explorer + placeholder: e.g. https://thegraph.com/explorer/subgraphs/3nXfK3RbFrj6mhkGdoKRowEEti2WvmUdxmz73tben6Mb?view=Overview&chain=mainnet + validations: + required: false + - type: checkboxes + id: checkboxes + attributes: + label: Some information to help us out + options: + - label: Tick this box if this bug is caused by a regression found in the latest release. + - label: Tick this box if this bug is specific to the hosted service. + - label: I have searched the issue tracker to make sure this issue is not a duplicate. + required: true + - type: dropdown + id: operating-system + attributes: + label: OS information + description: What OS are you running? Leave blank if it doesn't apply. + options: + - Windows + - macOS + - Linux + - Other (please specify in your bug report) diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml new file mode 100644 index 00000000000..8581c970b91 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -0,0 +1,26 @@ +name: Feature request +description: To request or discuss new features. +title: "[Feature] " +labels: ["enhancement"] +body: + - type: textarea + id: bug-report + attributes: + label: Description + description: Please provide a detailed overview of the desired feature or improvement, along with any examples or useful information. You can use Markdown. + render: Markdown + - type: textarea + id: blockers + attributes: + label: Are you aware of any blockers that must be resolved before implementing this feature? If so, which? Link to any relevant GitHub issues. + render: Markdown + validations: + required: false + - type: checkboxes + id: checkboxes + attributes: + label: Some information to help us out + options: + - label: Tick this box if you plan on implementing this feature yourself. + - label: I have searched the issue tracker to make sure this issue is not a duplicate. + required: true From 1b0031a8c51253f92791425c8299e90b6a44be6b Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Mon, 20 Mar 2023 12:20:17 +0100 Subject: [PATCH 0032/2104] *: remove MetricsRegistryTrait (#4470) --- chain/arweave/src/chain.rs | 4 +- chain/cosmos/src/chain.rs | 4 +- chain/ethereum/src/adapter.rs | 5 +- chain/ethereum/src/chain.rs | 11 +- chain/ethereum/src/network.rs | 13 +- chain/near/src/chain.rs | 5 +- chain/substreams/src/chain.rs | 7 +- core/src/polling_monitor/metrics.rs | 5 +- core/src/subgraph/context.rs | 7 +- core/src/subgraph/instance_manager.rs | 5 +- graph/src/blockchain/block_stream.rs | 3 +- graph/src/blockchain/builder.rs | 7 +- graph/src/blockchain/firehose_block_stream.rs | 5 +- .../src/blockchain/substreams_block_stream.rs | 5 +- graph/src/components/metrics/mod.rs | 267 ------------------ graph/src/components/metrics/registry.rs | 240 +++++++++++++++- graph/src/components/metrics/stopwatch.rs | 4 +- graph/src/components/metrics/subgraph.rs | 8 +- graph/src/components/subgraph/host.rs | 3 +- graph/src/data/graphql/effort.rs | 4 +- graph/src/log/factory.rs | 6 +- graphql/src/metrics.rs | 7 +- graphql/src/runner.rs | 4 +- node/src/chain.rs | 5 +- node/src/manager/commands/config.rs | 5 +- node/src/store_builder.rs | 15 +- store/postgres/src/chain_head_listener.rs | 11 +- store/postgres/src/chain_store.rs | 4 +- store/postgres/src/connection_pool.rs | 11 +- store/postgres/src/jobs.rs | 7 +- store/postgres/src/notification_listener.rs | 3 +- store/postgres/src/store_events.rs | 9 +- store/postgres/src/subgraph_store.rs | 11 +- store/postgres/src/writable.rs | 10 +- tests/src/fixture/mod.rs | 6 +- 35 files changed, 320 insertions(+), 406 deletions(-) diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index 8089d28de56..d371bbe7c9c 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -6,10 +6,10 @@ use graph::blockchain::{ EmptyNodeCapabilities, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::FirehoseEndpoint; +use graph::prelude::MetricsRegistry; use graph::{ blockchain::{ block_stream::{ @@ -40,7 +40,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, } impl std::fmt::Debug for Chain { diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index 1145dfb93f6..6ebd291a269 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,6 +1,6 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::BlockIngestor; -use graph::components::metrics::MetricsRegistryTrait; +use graph::prelude::MetricsRegistry; use std::sync::Arc; use graph::blockchain::block_stream::FirehoseCursor; @@ -36,7 +36,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, } impl std::fmt::Debug for Chain { diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 12761183d06..3ffa23f1f32 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -2,7 +2,6 @@ use anyhow::Error; use ethabi::{Error as ABIError, Function, ParamType, Token}; use futures::Future; use graph::blockchain::ChainIdentifier; -use graph::components::metrics::MetricsRegistryTrait; use graph::firehose::CallToFilter; use graph::firehose::CombinedFilter; use graph::firehose::LogFilter; @@ -732,7 +731,7 @@ pub struct ProviderEthRpcMetrics { } impl ProviderEthRpcMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let request_duration = registry .new_histogram_vec( "eth_rpc_request_duration", @@ -788,7 +787,7 @@ pub struct SubgraphEthRpcMetrics { } impl SubgraphEthRpcMetrics { - pub fn new(registry: Arc, subgraph_hash: &str) -> Self { + pub fn new(registry: Arc, subgraph_hash: &str) -> Self { let request_duration = registry .global_gauge_vec( "deployment_eth_rpc_request_duration", diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 6dfc78376cb..eb29003b2da 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -3,13 +3,12 @@ use anyhow::{Context, Error}; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{BlockIngestor, BlockchainKind, TriggersAdapterSelector}; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, ForkStep}; use graph::prelude::{ BlockHash, ComponentLoggerConfig, ElasticComponentLoggerConfig, EthereumBlock, - EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, + EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, MetricsRegistry, }; use graph::{ blockchain::{ @@ -192,7 +191,7 @@ impl BlockRefetcher for EthereumBlockRefetcher { pub struct EthereumAdapterSelector { logger_factory: LoggerFactory, client: Arc>, - registry: Arc, + registry: Arc, chain_store: Arc, } @@ -200,7 +199,7 @@ impl EthereumAdapterSelector { pub fn new( logger_factory: LoggerFactory, client: Arc>, - registry: Arc, + registry: Arc, chain_store: Arc, ) -> Self { Self { @@ -242,7 +241,7 @@ pub struct Chain { logger_factory: LoggerFactory, name: String, node_id: NodeId, - registry: Arc, + registry: Arc, client: Arc>, chain_store: Arc, call_cache: Arc, @@ -268,7 +267,7 @@ impl Chain { logger_factory: LoggerFactory, name: String, node_id: NodeId, - registry: Arc, + registry: Arc, chain_store: Arc, call_cache: Arc, client: Arc>, diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 2f77c6c7902..0b90ab6e1be 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -219,10 +219,7 @@ impl EthereumNetworks { #[cfg(test)] mod tests { - use graph::{ - components::metrics::MetricsRegistryTrait, firehose::SubgraphLimit, - prelude::MetricsRegistry, tokio, url::Url, - }; + use graph::{firehose::SubgraphLimit, prelude::MetricsRegistry, tokio, url::Url}; use http::HeaderMap; use std::sync::Arc; @@ -289,7 +286,7 @@ mod tests { async fn adapter_selector_selects_eth_call() { let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -392,7 +389,7 @@ mod tests { async fn adapter_selector_unlimited() { let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -460,7 +457,7 @@ mod tests { async fn adapter_selector_disable_call_only_fallback() { let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -526,7 +523,7 @@ mod tests { async fn adapter_selector_no_call_only_fallback() { let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let transport = Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 675fd5954fa..b689493bdd9 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -5,11 +5,10 @@ use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::FirehoseEndpoint; -use graph::prelude::TryFutureExt; +use graph::prelude::{MetricsRegistry, TryFutureExt}; use graph::{ anyhow::Result, blockchain::{ @@ -98,7 +97,7 @@ pub struct Chain { name: String, client: Arc>, chain_store: Arc, - metrics_registry: Arc, + metrics_registry: Arc, block_stream_builder: Arc>, } diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index f45c858512b..aa0d1b287a3 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -2,10 +2,9 @@ use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersA use anyhow::Error; use graph::blockchain::client::ChainClient; use graph::blockchain::{BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter}; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::DeploymentCursorTracker; use graph::firehose::FirehoseEndpoints; -use graph::prelude::{BlockHash, LoggerFactory}; +use graph::prelude::{BlockHash, LoggerFactory, MetricsRegistry}; use graph::{ blockchain::{ self, @@ -45,14 +44,14 @@ pub struct Chain { pub(crate) logger_factory: LoggerFactory, pub(crate) client: Arc>, - pub(crate) metrics_registry: Arc, + pub(crate) metrics_registry: Arc, } impl Chain { pub fn new( logger_factory: LoggerFactory, firehose_endpoints: FirehoseEndpoints, - metrics_registry: Arc, + metrics_registry: Arc, chain_store: Arc, block_stream_builder: Arc>, ) -> Self { diff --git a/core/src/polling_monitor/metrics.rs b/core/src/polling_monitor/metrics.rs index 368ad0036ad..962216845f1 100644 --- a/core/src/polling_monitor/metrics.rs +++ b/core/src/polling_monitor/metrics.rs @@ -1,8 +1,7 @@ use std::sync::Arc; use graph::{ - components::metrics::MetricsRegistryTrait, - prelude::DeploymentHash, + prelude::{DeploymentHash, MetricsRegistry}, prometheus::{Counter, Gauge}, }; @@ -14,7 +13,7 @@ pub struct PollingMonitorMetrics { } impl PollingMonitorMetrics { - pub fn new(registry: Arc, subgraph_hash: &DeploymentHash) -> Self { + pub fn new(registry: Arc, subgraph_hash: &DeploymentHash) -> Self { let requests = registry .new_deployment_counter( "polling_monitor_requests", diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index 630571d5687..6dd8c196a93 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -6,15 +6,14 @@ use bytes::Bytes; use graph::{ blockchain::Blockchain, components::{ - metrics::MetricsRegistryTrait, store::{DeploymentId, SubgraphFork}, subgraph::{MappingError, SharedProofOfIndexing}, }, data_source::{offchain, CausalityRegion, DataSource, TriggerData}, ipfs_client::CidFile, prelude::{ - BlockNumber, BlockState, CancelGuard, CheapClone, DeploymentHash, RuntimeHostBuilder, - SubgraphCountMetric, SubgraphInstanceMetrics, TriggerProcessor, + BlockNumber, BlockState, CancelGuard, CheapClone, DeploymentHash, MetricsRegistry, + RuntimeHostBuilder, SubgraphCountMetric, SubgraphInstanceMetrics, TriggerProcessor, }, slog::Logger, tokio::sync::mpsc, @@ -192,7 +191,7 @@ pub struct OffchainMonitor { impl OffchainMonitor { pub fn new( logger: Logger, - registry: Arc, + registry: Arc, subgraph_hash: &DeploymentHash, ipfs_service: IpfsService, ) -> Self { diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 25dca9b13f3..af3880f980b 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -8,7 +8,6 @@ use graph::blockchain::block_stream::BlockStreamMetrics; use graph::blockchain::Blockchain; use graph::blockchain::NodeCapabilities; use graph::blockchain::{BlockchainKind, TriggerFilter}; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::subgraph::ProofOfIndexingVersion; use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; use graph::data_source::causality_region::CausalityRegionSeq; @@ -27,7 +26,7 @@ pub struct SubgraphInstanceManager { logger_factory: LoggerFactory, subgraph_store: Arc, chains: Arc, - metrics_registry: Arc, + metrics_registry: Arc, instances: SubgraphKeepAlive, link_resolver: Arc, ipfs_service: IpfsService, @@ -163,7 +162,7 @@ impl SubgraphInstanceManager { subgraph_store: Arc, chains: Arc, sg_metrics: Arc, - metrics_registry: Arc, + metrics_registry: Arc, link_resolver: Arc, ipfs_service: IpfsService, static_filters: bool, diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 32e5f238af4..9522734c8a1 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -8,7 +8,6 @@ use tokio::sync::mpsc::{self, Receiver, Sender}; use super::{Block, BlockPtr, Blockchain}; use crate::anyhow::Result; -use crate::components::metrics::MetricsRegistryTrait; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; @@ -387,7 +386,7 @@ pub struct BlockStreamMetrics { impl BlockStreamMetrics { pub fn new( - registry: Arc, + registry: Arc, deployment_id: &DeploymentHash, network: String, shard: String, diff --git a/graph/src/blockchain/builder.rs b/graph/src/blockchain/builder.rs index e0782727a21..dd91610552e 100644 --- a/graph/src/blockchain/builder.rs +++ b/graph/src/blockchain/builder.rs @@ -1,8 +1,7 @@ use super::Blockchain; use crate::{ - components::{metrics::MetricsRegistryTrait, store::ChainStore}, - firehose::FirehoseEndpoints, - prelude::LoggerFactory, + components::store::ChainStore, firehose::FirehoseEndpoints, prelude::LoggerFactory, + prelude::MetricsRegistry, }; use std::sync::Arc; @@ -13,7 +12,7 @@ pub struct BasicBlockchainBuilder { pub name: String, pub chain_store: Arc, pub firehose_endpoints: FirehoseEndpoints, - pub metrics_registry: Arc, + pub metrics_registry: Arc, } /// Something that can build a [`Blockchain`]. diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index a3cdf91686e..3bd277652d0 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -3,7 +3,6 @@ use super::client::ChainClient; use super::{Blockchain, TriggersAdapter}; use crate::blockchain::block_stream::FirehoseCursor; use crate::blockchain::TriggerFilter; -use crate::components::metrics::MetricsRegistryTrait; use crate::prelude::*; use crate::util::backoff::ExponentialBackoff; use crate::{firehose, firehose::FirehoseEndpoint}; @@ -23,7 +22,7 @@ struct FirehoseBlockStreamMetrics { } impl FirehoseBlockStreamMetrics { - pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { + pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { Self { deployment, @@ -116,7 +115,7 @@ where filter: Arc, start_blocks: Vec, logger: Logger, - registry: Arc, + registry: Arc, ) -> Self where F: FirehoseMapper + 'static, diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 10ae4d38ce6..6adf8299ee2 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -1,7 +1,6 @@ use super::block_stream::SubstreamsMapper; use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; use crate::blockchain::Blockchain; -use crate::components::metrics::MetricsRegistryTrait; use crate::firehose::FirehoseEndpoint; use crate::prelude::*; use crate::substreams::response::Message; @@ -26,7 +25,7 @@ struct SubstreamsBlockStreamMetrics { impl SubstreamsBlockStreamMetrics { pub fn new( - registry: Arc, + registry: Arc, deployment: DeploymentHash, provider: String, ) -> Self { @@ -126,7 +125,7 @@ where start_blocks: Vec, end_blocks: Vec, logger: Logger, - registry: Arc, + registry: Arc, ) -> Self where F: SubstreamsMapper + 'static, diff --git a/graph/src/components/metrics/mod.rs b/graph/src/components/metrics/mod.rs index 70ee6418e74..4442d522fda 100644 --- a/graph/src/components/metrics/mod.rs +++ b/graph/src/components/metrics/mod.rs @@ -14,10 +14,6 @@ use std::collections::HashMap; /// Metrics for measuring where time is spent during indexing. pub mod stopwatch; -fn deployment_labels(subgraph: &str) -> HashMap { - labels! { String::from("deployment") => String::from(subgraph), } -} - /// Create an unregistered counter with labels pub fn counter_with_labels( name: &str, @@ -37,266 +33,3 @@ pub fn gauge_with_labels( let opts = Opts::new(name, help).const_labels(const_labels); Gauge::with_opts(opts) } - -pub trait MetricsRegistryTrait: Send + Sync + 'static { - fn register(&self, name: &str, c: Box); - - fn unregister(&self, metric: Box); - - fn global_counter( - &self, - name: &str, - help: &str, - const_labels: HashMap, - ) -> Result; - - fn global_counter_vec( - &self, - name: &str, - help: &str, - variable_labels: &[&str], - ) -> Result; - - fn global_deployment_counter( - &self, - name: &str, - help: &str, - subgraph: &str, - ) -> Result { - self.global_counter(name, help, deployment_labels(subgraph)) - } - - fn global_deployment_counter_vec( - &self, - name: &str, - help: &str, - subgraph: &str, - variable_labels: &[&str], - ) -> Result; - - fn global_gauge( - &self, - name: &str, - help: &str, - const_labels: HashMap, - ) -> Result; - - fn global_gauge_vec( - &self, - name: &str, - help: &str, - variable_labels: &[&str], - ) -> Result; - - fn new_gauge( - &self, - name: &str, - help: &str, - const_labels: HashMap, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help).const_labels(const_labels); - let gauge = Box::new(Gauge::with_opts(opts)?); - self.register(name, gauge.clone()); - Ok(gauge) - } - - fn new_deployment_gauge( - &self, - name: &str, - help: &str, - subgraph: &str, - ) -> Result { - let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); - let gauge = Gauge::with_opts(opts)?; - self.register(name, Box::new(gauge.clone())); - Ok(gauge) - } - - fn new_gauge_vec( - &self, - name: &str, - help: &str, - variable_labels: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help); - let gauges = Box::new(GaugeVec::new( - opts, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, gauges.clone()); - Ok(gauges) - } - - fn new_deployment_gauge_vec( - &self, - name: &str, - help: &str, - subgraph: &str, - variable_labels: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); - let gauges = Box::new(GaugeVec::new( - opts, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, gauges.clone()); - Ok(gauges) - } - - fn new_counter(&self, name: &str, help: &str) -> Result, PrometheusError> { - let opts = Opts::new(name, help); - let counter = Box::new(Counter::with_opts(opts)?); - self.register(name, counter.clone()); - Ok(counter) - } - - fn new_counter_with_labels( - &self, - name: &str, - help: &str, - const_labels: HashMap, - ) -> Result, PrometheusError> { - let counter = Box::new(counter_with_labels(name, help, const_labels)?); - self.register(name, counter.clone()); - Ok(counter) - } - - fn new_deployment_counter( - &self, - name: &str, - help: &str, - subgraph: &str, - ) -> Result { - let counter = counter_with_labels(name, help, deployment_labels(subgraph))?; - self.register(name, Box::new(counter.clone())); - Ok(counter) - } - - fn new_counter_vec( - &self, - name: &str, - help: &str, - variable_labels: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help); - let counters = Box::new(CounterVec::new( - opts, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, counters.clone()); - Ok(counters) - } - - fn new_deployment_counter_vec( - &self, - name: &str, - help: &str, - subgraph: &str, - variable_labels: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); - let counters = Box::new(CounterVec::new( - opts, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, counters.clone()); - Ok(counters) - } - - fn new_deployment_histogram( - &self, - name: &str, - help: &str, - subgraph: &str, - buckets: Vec, - ) -> Result, PrometheusError> { - let opts = HistogramOpts::new(name, help) - .const_labels(deployment_labels(subgraph)) - .buckets(buckets); - let histogram = Box::new(Histogram::with_opts(opts)?); - self.register(name, histogram.clone()); - Ok(histogram) - } - - fn new_histogram( - &self, - name: &str, - help: &str, - buckets: Vec, - ) -> Result, PrometheusError> { - let opts = HistogramOpts::new(name, help).buckets(buckets); - let histogram = Box::new(Histogram::with_opts(opts)?); - self.register(name, histogram.clone()); - Ok(histogram) - } - - fn new_histogram_vec( - &self, - name: &str, - help: &str, - variable_labels: Vec, - buckets: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help); - let histograms = Box::new(HistogramVec::new( - HistogramOpts { - common_opts: opts, - buckets, - }, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, histograms.clone()); - Ok(histograms) - } - - fn new_deployment_histogram_vec( - &self, - name: &str, - help: &str, - subgraph: &str, - variable_labels: Vec, - buckets: Vec, - ) -> Result, PrometheusError> { - let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); - let histograms = Box::new(HistogramVec::new( - HistogramOpts { - common_opts: opts, - buckets, - }, - variable_labels - .iter() - .map(String::as_str) - .collect::>() - .as_slice(), - )?); - self.register(name, histograms.clone()); - Ok(histograms) - } - - fn global_histogram_vec( - &self, - name: &str, - help: &str, - variable_labels: &[&str], - ) -> Result; -} diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index abb469f7304..bd9fe6fe0f6 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -1,6 +1,8 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; +use prometheus::{labels, Histogram}; + use crate::components::metrics::{counter_with_labels, gauge_with_labels}; use crate::prelude::Collector; use crate::prometheus::{ @@ -9,8 +11,6 @@ use crate::prometheus::{ }; use crate::slog::{self, error, o, Logger}; -use super::MetricsRegistryTrait; - pub struct MetricsRegistry { logger: Logger, registry: Arc, @@ -119,10 +119,8 @@ impl MetricsRegistry { Ok(counters) } } -} -impl MetricsRegistryTrait for MetricsRegistry { - fn register(&self, name: &str, c: Box) { + pub fn register(&self, name: &str, c: Box) { let err = match self.registry.register(c).err() { None => { self.registered_metrics.inc(); @@ -170,7 +168,7 @@ impl MetricsRegistryTrait for MetricsRegistry { }; } - fn global_counter( + pub fn global_counter( &self, name: &str, help: &str, @@ -191,7 +189,16 @@ impl MetricsRegistryTrait for MetricsRegistry { } } - fn global_counter_vec( + pub fn global_deployment_counter( + &self, + name: &str, + help: &str, + subgraph: &str, + ) -> Result { + self.global_counter(name, help, deployment_labels(subgraph)) + } + + pub fn global_counter_vec( &self, name: &str, help: &str, @@ -200,7 +207,7 @@ impl MetricsRegistryTrait for MetricsRegistry { self.global_counter_vec_internal(name, help, None, variable_labels) } - fn global_deployment_counter_vec( + pub fn global_deployment_counter_vec( &self, name: &str, help: &str, @@ -210,7 +217,7 @@ impl MetricsRegistryTrait for MetricsRegistry { self.global_counter_vec_internal(name, help, Some(subgraph), variable_labels) } - fn global_gauge( + pub fn global_gauge( &self, name: &str, help: &str, @@ -231,7 +238,7 @@ impl MetricsRegistryTrait for MetricsRegistry { } } - fn global_gauge_vec( + pub fn global_gauge_vec( &self, name: &str, help: &str, @@ -253,7 +260,7 @@ impl MetricsRegistryTrait for MetricsRegistry { } } - fn global_histogram_vec( + pub fn global_histogram_vec( &self, name: &str, help: &str, @@ -275,7 +282,7 @@ impl MetricsRegistryTrait for MetricsRegistry { } } - fn unregister(&self, metric: Box) { + pub fn unregister(&self, metric: Box) { match self.registry.unregister(metric) { Ok(_) => { self.registered_metrics.dec(); @@ -286,6 +293,215 @@ impl MetricsRegistryTrait for MetricsRegistry { } }; } + + pub fn new_gauge( + &self, + name: &str, + help: &str, + const_labels: HashMap, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help).const_labels(const_labels); + let gauge = Box::new(Gauge::with_opts(opts)?); + self.register(name, gauge.clone()); + Ok(gauge) + } + + pub fn new_deployment_gauge( + &self, + name: &str, + help: &str, + subgraph: &str, + ) -> Result { + let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); + let gauge = Gauge::with_opts(opts)?; + self.register(name, Box::new(gauge.clone())); + Ok(gauge) + } + + pub fn new_gauge_vec( + &self, + name: &str, + help: &str, + variable_labels: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help); + let gauges = Box::new(GaugeVec::new( + opts, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, gauges.clone()); + Ok(gauges) + } + + pub fn new_deployment_gauge_vec( + &self, + name: &str, + help: &str, + subgraph: &str, + variable_labels: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); + let gauges = Box::new(GaugeVec::new( + opts, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, gauges.clone()); + Ok(gauges) + } + + pub fn new_counter(&self, name: &str, help: &str) -> Result, PrometheusError> { + let opts = Opts::new(name, help); + let counter = Box::new(Counter::with_opts(opts)?); + self.register(name, counter.clone()); + Ok(counter) + } + + pub fn new_counter_with_labels( + &self, + name: &str, + help: &str, + const_labels: HashMap, + ) -> Result, PrometheusError> { + let counter = Box::new(counter_with_labels(name, help, const_labels)?); + self.register(name, counter.clone()); + Ok(counter) + } + + pub fn new_deployment_counter( + &self, + name: &str, + help: &str, + subgraph: &str, + ) -> Result { + let counter = counter_with_labels(name, help, deployment_labels(subgraph))?; + self.register(name, Box::new(counter.clone())); + Ok(counter) + } + + pub fn new_counter_vec( + &self, + name: &str, + help: &str, + variable_labels: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help); + let counters = Box::new(CounterVec::new( + opts, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, counters.clone()); + Ok(counters) + } + + pub fn new_deployment_counter_vec( + &self, + name: &str, + help: &str, + subgraph: &str, + variable_labels: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); + let counters = Box::new(CounterVec::new( + opts, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, counters.clone()); + Ok(counters) + } + + pub fn new_deployment_histogram( + &self, + name: &str, + help: &str, + subgraph: &str, + buckets: Vec, + ) -> Result, PrometheusError> { + let opts = HistogramOpts::new(name, help) + .const_labels(deployment_labels(subgraph)) + .buckets(buckets); + let histogram = Box::new(Histogram::with_opts(opts)?); + self.register(name, histogram.clone()); + Ok(histogram) + } + + pub fn new_histogram( + &self, + name: &str, + help: &str, + buckets: Vec, + ) -> Result, PrometheusError> { + let opts = HistogramOpts::new(name, help).buckets(buckets); + let histogram = Box::new(Histogram::with_opts(opts)?); + self.register(name, histogram.clone()); + Ok(histogram) + } + + pub fn new_histogram_vec( + &self, + name: &str, + help: &str, + variable_labels: Vec, + buckets: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help); + let histograms = Box::new(HistogramVec::new( + HistogramOpts { + common_opts: opts, + buckets, + }, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, histograms.clone()); + Ok(histograms) + } + + pub fn new_deployment_histogram_vec( + &self, + name: &str, + help: &str, + subgraph: &str, + variable_labels: Vec, + buckets: Vec, + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help).const_labels(deployment_labels(subgraph)); + let histograms = Box::new(HistogramVec::new( + HistogramOpts { + common_opts: opts, + buckets, + }, + variable_labels + .iter() + .map(String::as_str) + .collect::>() + .as_slice(), + )?); + self.register(name, histograms.clone()); + Ok(histograms) + } +} + +fn deployment_labels(subgraph: &str) -> HashMap { + labels! { String::from("deployment") => String::from(subgraph), } } #[test] diff --git a/graph/src/components/metrics/stopwatch.rs b/graph/src/components/metrics/stopwatch.rs index c514670980b..11dac8af297 100644 --- a/graph/src/components/metrics/stopwatch.rs +++ b/graph/src/components/metrics/stopwatch.rs @@ -2,8 +2,6 @@ use crate::prelude::*; use std::sync::{atomic::AtomicBool, atomic::Ordering, Mutex}; use std::time::Instant; -use super::MetricsRegistryTrait; - /// This is a "section guard", that closes the section on drop. pub struct Section { id: String, @@ -47,7 +45,7 @@ impl StopwatchMetrics { logger: Logger, subgraph_id: DeploymentHash, stage: &str, - registry: Arc, + registry: Arc, ) -> Self { let stage = stage.to_owned(); let mut inner = StopwatchInner { diff --git a/graph/src/components/metrics/subgraph.rs b/graph/src/components/metrics/subgraph.rs index cfc2cfb0210..cfee0af723c 100644 --- a/graph/src/components/metrics/subgraph.rs +++ b/graph/src/components/metrics/subgraph.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use std::sync::Arc; use super::stopwatch::StopwatchMetrics; -use super::MetricsRegistryTrait; +use super::MetricsRegistry; pub struct SubgraphInstanceMetrics { pub block_trigger_count: Box, @@ -20,7 +20,7 @@ pub struct SubgraphInstanceMetrics { impl SubgraphInstanceMetrics { pub fn new( - registry: Arc, + registry: Arc, subgraph_hash: &str, stopwatch: StopwatchMetrics, ) -> Self { @@ -79,7 +79,7 @@ impl SubgraphInstanceMetrics { self.trigger_processing_duration.observe(duration); } - pub fn unregister(&self, registry: Arc) { + pub fn unregister(&self, registry: Arc) { registry.unregister(self.block_processing_duration.clone()); registry.unregister(self.block_trigger_count.clone()); registry.unregister(self.trigger_processing_duration.clone()); @@ -94,7 +94,7 @@ pub struct SubgraphCountMetric { } impl SubgraphCountMetric { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let running_count = registry .new_gauge( "deployment_running_count", diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index ad92ef17d2b..9ef35254c18 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -6,7 +6,6 @@ use anyhow::Error; use async_trait::async_trait; use futures::sync::mpsc; -use crate::components::metrics::MetricsRegistryTrait; use crate::components::store::SubgraphFork; use crate::data_source::{ DataSource, DataSourceTemplate, MappingTrigger, TriggerData, TriggerWithHandler, @@ -88,7 +87,7 @@ pub struct HostMetrics { impl HostMetrics { pub fn new( - registry: Arc, + registry: Arc, subgraph: &str, stopwatch: StopwatchMetrics, ) -> Self { diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index 63777116b70..d4f18862e4a 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -7,7 +7,7 @@ use std::iter::FromIterator; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use crate::components::metrics::{Counter, Gauge, MetricsRegistryTrait}; +use crate::components::metrics::{Counter, Gauge, MetricsRegistry}; use crate::components::store::PoolWaitStats; use crate::data::graphql::shape_hash::shape_hash; use crate::data::query::{CacheStatus, QueryExecutionError}; @@ -207,7 +207,7 @@ impl LoadManager { pub fn new( logger: &Logger, blocked_queries: Vec>, - registry: Arc, + registry: Arc, ) -> Self { let logger = logger.new(o!("component" => "LoadManager")); let blocked_queries = blocked_queries diff --git a/graph/src/log/factory.rs b/graph/src/log/factory.rs index 6be127e3b60..c7aded99160 100644 --- a/graph/src/log/factory.rs +++ b/graph/src/log/factory.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use prometheus::Counter; use slog::*; -use crate::components::metrics::MetricsRegistryTrait; +use crate::components::metrics::MetricsRegistry; use crate::components::store::DeploymentLocator; use crate::log::elastic::*; use crate::log::split::*; @@ -24,7 +24,7 @@ pub struct ComponentLoggerConfig { pub struct LoggerFactory { parent: Logger, elastic_config: Option, - metrics_registry: Arc, + metrics_registry: Arc, } impl LoggerFactory { @@ -32,7 +32,7 @@ impl LoggerFactory { pub fn new( logger: Logger, elastic_config: Option, - metrics_registry: Arc, + metrics_registry: Arc, ) -> Self { Self { parent: logger, diff --git a/graphql/src/metrics.rs b/graphql/src/metrics.rs index 9051c446262..5427a56f383 100644 --- a/graphql/src/metrics.rs +++ b/graphql/src/metrics.rs @@ -3,9 +3,8 @@ use std::fmt; use std::sync::Arc; use std::time::Duration; -use graph::components::metrics::MetricsRegistryTrait; use graph::data::query::QueryResults; -use graph::prelude::{DeploymentHash, GraphQLMetrics as GraphQLMetricsTrait}; +use graph::prelude::{DeploymentHash, GraphQLMetrics as GraphQLMetricsTrait, MetricsRegistry}; use graph::prometheus::{CounterVec, Gauge, Histogram, HistogramVec}; pub struct GraphQLMetrics { @@ -77,7 +76,7 @@ impl GraphQLMetricsTrait for GraphQLMetrics { } impl GraphQLMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let query_execution_time = registry .new_histogram_vec( "query_execution_time", @@ -141,7 +140,7 @@ impl GraphQLMetrics { // Tests need to construct one of these, but normal code doesn't #[cfg(debug_assertions)] - pub fn make(registry: Arc) -> Self { + pub fn make(registry: Arc) -> Self { Self::new(registry) } diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index fa22b1291ab..30019deb492 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -5,7 +5,7 @@ use crate::metrics::GraphQLMetrics; use crate::prelude::{QueryExecutionOptions, StoreResolver, SubscriptionExecutionOptions}; use crate::query::execute_query; use crate::subscription::execute_prepared_subscription; -use graph::components::metrics::MetricsRegistryTrait; +use graph::prelude::MetricsRegistry; use graph::{ components::store::SubscriptionManager, prelude::{ @@ -46,7 +46,7 @@ where store: Arc, subscription_manager: Arc, load_manager: Arc, - registry: Arc, + registry: Arc, ) -> Self { let logger = logger.new(o!("component" => "GraphQlRunner")); let graphql_metrics = Arc::new(GraphQLMetrics::new(registry)); diff --git a/node/src/chain.rs b/node/src/chain.rs index fe7f38ade71..7efbd053d43 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -5,12 +5,11 @@ use futures::TryFutureExt; use graph::anyhow::{bail, Error}; use graph::blockchain::{Block as BlockchainBlock, BlockchainKind, ChainIdentifier}; use graph::cheap_clone::CheapClone; -use graph::components::metrics::MetricsRegistryTrait; use graph::endpoint::EndpointMetrics; use graph::firehose::{FirehoseEndpoint, FirehoseNetworks, SubgraphLimit}; use graph::ipfs_client::IpfsClient; -use graph::prelude::prost; use graph::prelude::{anyhow, tokio}; +use graph::prelude::{prost, MetricsRegistry}; use graph::slog::{debug, error, info, o, Logger}; use graph::url::Url; use graph::util::security::SafeDisplay; @@ -386,7 +385,7 @@ where /// `EthereumAdapter`. pub async fn create_all_ethereum_networks( logger: Logger, - registry: Arc, + registry: Arc, config: &Config, ) -> anyhow::Result { let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 96ccc9da353..fd79caf7c47 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -2,11 +2,10 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ anyhow::bail, - components::metrics::MetricsRegistryTrait, itertools::Itertools, prelude::{ anyhow::{anyhow, Error}, - NodeId, + MetricsRegistry, NodeId, }, slog::Logger, }; @@ -100,7 +99,7 @@ pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Err pub async fn provider( logger: Logger, config: &Config, - registry: Arc, + registry: Arc, features: String, network: String, ) -> Result<(), Error> { diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index a592d7c3ee3..3675155eefe 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -3,8 +3,7 @@ use std::{collections::HashMap, sync::Arc}; use futures::future::join_all; use graph::blockchain::ChainIdentifier; -use graph::components::metrics::MetricsRegistryTrait; -use graph::prelude::{o, NodeId}; +use graph::prelude::{o, MetricsRegistry, NodeId}; use graph::url::Url; use graph::{ prelude::{info, CheapClone, Logger}, @@ -30,7 +29,7 @@ pub struct StoreBuilder { /// Map network names to the shards where they are/should be stored chains: HashMap, pub coord: Arc, - registry: Arc, + registry: Arc, } impl StoreBuilder { @@ -42,7 +41,7 @@ impl StoreBuilder { node: &NodeId, config: &Config, fork_base: Option, - registry: Arc, + registry: Arc, ) -> Self { let primary_shard = config.primary_store().clone(); @@ -98,7 +97,7 @@ impl StoreBuilder { node: &NodeId, config: &Config, fork_base: Option, - registry: Arc, + registry: Arc, ) -> ( Arc, HashMap, @@ -168,7 +167,7 @@ impl StoreBuilder { subgraph_store: Arc, chains: HashMap, networks: Vec<(String, Vec)>, - registry: Arc, + registry: Arc, ) -> Arc { let networks = networks .into_iter() @@ -205,7 +204,7 @@ impl StoreBuilder { node: &NodeId, name: &str, shard: &Shard, - registry: Arc, + registry: Arc, coord: Arc, ) -> ConnectionPool { let logger = logger.new(o!("pool" => "main")); @@ -241,7 +240,7 @@ impl StoreBuilder { node: &NodeId, name: &str, shard: &Shard, - registry: Arc, + registry: Arc, coord: Arc, ) -> (Vec, Vec) { let mut weights: Vec<_> = vec![shard.weight]; diff --git a/store/postgres/src/chain_head_listener.rs b/store/postgres/src/chain_head_listener.rs index 97234545773..91109c46b3b 100644 --- a/store/postgres/src/chain_head_listener.rs +++ b/store/postgres/src/chain_head_listener.rs @@ -1,9 +1,8 @@ use graph::{ blockchain::ChainHeadUpdateStream, - components::metrics::MetricsRegistryTrait, prelude::{ futures03::{self, FutureExt}, - tokio, StoreError, + tokio, MetricsRegistry, StoreError, }, prometheus::{CounterVec, GaugeVec}, util::timed_rw_lock::TimedRwLock, @@ -56,7 +55,7 @@ pub struct BlockIngestorMetrics { } impl BlockIngestorMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { Self { chain_head_number: registry .new_gauge_vec( @@ -97,11 +96,7 @@ pub(crate) struct ChainHeadUpdateSender { } impl ChainHeadUpdateListener { - pub fn new( - logger: &Logger, - registry: Arc, - postgres_url: String, - ) -> Self { + pub fn new(logger: &Logger, registry: Arc, postgres_url: String) -> Self { let logger = logger.new(o!("component" => "ChainHeadUpdateListener")); let ingestor_metrics = Arc::new(BlockIngestorMetrics::new(registry.clone())); let counter = registry diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index b7835fc4589..882fd7e6b04 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -3,8 +3,8 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use diesel::sql_types::Text; use diesel::{insert_into, update}; -use graph::components::metrics::MetricsRegistryTrait; use graph::parking_lot::RwLock; +use graph::prelude::MetricsRegistry; use graph::prometheus::{CounterVec, GaugeVec}; use std::{ @@ -1334,7 +1334,7 @@ pub struct ChainStoreMetrics { } impl ChainStoreMetrics { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let chain_head_cache_size = registry .new_gauge_vec( "chain_head_cache_num_blocks", diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index c08c63f0e68..b1749abead2 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -7,10 +7,9 @@ use diesel::{ use diesel::{sql_query, RunQueryDsl}; use graph::cheap_clone::CheapClone; -use graph::components::metrics::MetricsRegistryTrait; use graph::constraint_violation; -use graph::prelude::tokio; use graph::prelude::tokio::time::Instant; +use graph::prelude::{tokio, MetricsRegistry}; use graph::slog::warn; use graph::util::timed_rw_lock::TimedMutex; use graph::{ @@ -310,7 +309,7 @@ impl ConnectionPool { pool_size: u32, fdw_pool_size: Option, logger: &Logger, - registry: Arc, + registry: Arc, coord: Arc, ) -> ConnectionPool { let state_tracker = PoolStateTracker::new(); @@ -593,7 +592,7 @@ struct EventHandler { impl EventHandler { fn new( logger: Logger, - registry: Arc, + registry: Arc, wait_stats: PoolWaitStats, const_labels: HashMap, state_tracker: PoolStateTracker, @@ -711,7 +710,7 @@ impl PoolInner { pool_size: u32, fdw_pool_size: Option, logger: &Logger, - registry: Arc, + registry: Arc, state_tracker: PoolStateTracker, ) -> PoolInner { let logger_store = logger.new(o!("component" => "Store")); @@ -1157,7 +1156,7 @@ impl PoolCoordinator { postgres_url: String, pool_size: u32, fdw_pool_size: Option, - registry: Arc, + registry: Arc, ) -> ConnectionPool { let is_writable = !pool_name.is_replica(); diff --git a/store/postgres/src/jobs.rs b/store/postgres/src/jobs.rs index 43927dd6c76..1cff94f3648 100644 --- a/store/postgres/src/jobs.rs +++ b/store/postgres/src/jobs.rs @@ -6,8 +6,7 @@ use std::time::{Duration, Instant}; use async_trait::async_trait; use diesel::{prelude::RunQueryDsl, sql_query, sql_types::Double}; -use graph::components::metrics::MetricsRegistryTrait; -use graph::prelude::{error, Logger, StoreError, ENV_VARS}; +use graph::prelude::{error, Logger, MetricsRegistry, StoreError, ENV_VARS}; use graph::prometheus::Gauge; use graph::util::jobs::{Job, Runner}; @@ -18,7 +17,7 @@ pub fn register( runner: &mut Runner, store: Arc, primary_pool: ConnectionPool, - registry: Arc, + registry: Arc, ) { const ONE_MINUTE: Duration = Duration::from_secs(60); const ONE_HOUR: Duration = Duration::from_secs(60 * 60); @@ -88,7 +87,7 @@ struct NotificationQueueUsage { } impl NotificationQueueUsage { - fn new(primary: ConnectionPool, registry: Arc) -> Self { + fn new(primary: ConnectionPool, registry: Arc) -> Self { let usage_gauge = registry .new_gauge( "notification_queue_usage", diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index af9877cb671..9ae012246e0 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -1,7 +1,6 @@ use diesel::pg::PgConnection; use diesel::select; use diesel::sql_types::Text; -use graph::components::metrics::MetricsRegistryTrait; use graph::prelude::tokio::sync::mpsc::error::SendTimeoutError; use graph::util::backoff::ExponentialBackoff; use lazy_static::lazy_static; @@ -397,7 +396,7 @@ pub struct NotificationSender { } impl NotificationSender { - pub fn new(registry: Arc) -> Self { + pub fn new(registry: Arc) -> Self { let sent_counter = registry .global_counter_vec( "notification_queue_sent", diff --git a/store/postgres/src/store_events.rs b/store/postgres/src/store_events.rs index f6d08675e2c..a2a760ddbfe 100644 --- a/store/postgres/src/store_events.rs +++ b/store/postgres/src/store_events.rs @@ -1,5 +1,4 @@ use futures03::TryStreamExt; -use graph::components::metrics::MetricsRegistryTrait; use graph::parking_lot::Mutex; use graph::tokio_stream::wrappers::ReceiverStream; use std::collections::BTreeSet; @@ -22,7 +21,7 @@ impl StoreEventListener { pub fn new( logger: Logger, postgres_url: String, - registry: Arc, + registry: Arc, ) -> (Self, Box + Send>) { let channel = SafeChannelName::i_promise_this_is_safe("store_events"); let (notification_listener, receiver) = @@ -133,11 +132,7 @@ pub struct SubscriptionManager { } impl SubscriptionManager { - pub fn new( - logger: Logger, - postgres_url: String, - registry: Arc, - ) -> Self { + pub fn new(logger: Logger, postgres_url: String, registry: Arc) -> Self { let (listener, store_events) = StoreEventListener::new(logger, postgres_url, registry); let mut manager = SubscriptionManager { diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index dde1b9a174c..ca4a46608b2 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -14,7 +14,6 @@ use std::{iter::FromIterator, time::Duration}; use graph::{ cheap_clone::CheapClone, components::{ - metrics::MetricsRegistryTrait, server::index_node::VersionInfo, store::{ self, BlockStore, DeploymentLocator, DeploymentSchemaVersion, @@ -28,8 +27,8 @@ use graph::{ prelude::{ anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiSchema, ApiVersion, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, - NodeId, PartialBlockPtr, Schema, StoreError, SubgraphDeploymentEntity, SubgraphName, - SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, + MetricsRegistry, NodeId, PartialBlockPtr, Schema, StoreError, SubgraphDeploymentEntity, + SubgraphName, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }, url::Url, util::timed_cache::TimedCache, @@ -219,7 +218,7 @@ impl SubgraphStore { placer: Arc, sender: Arc, fork_base: Option, - registry: Arc, + registry: Arc, ) -> Self { Self { inner: Arc::new(SubgraphStoreInner::new( @@ -275,7 +274,7 @@ pub struct SubgraphStoreInner { placer: Arc, sender: Arc, writables: Mutex>>, - registry: Arc, + registry: Arc, } impl SubgraphStoreInner { @@ -298,7 +297,7 @@ impl SubgraphStoreInner { stores: Vec<(Shard, ConnectionPool, Vec, Vec)>, placer: Arc, sender: Arc, - registry: Arc, + registry: Arc, ) -> Self { let mirror = { let pools = HashMap::from_iter( diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 0820f07df37..3ba9fad1dda 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -5,13 +5,13 @@ use std::time::Duration; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::metrics::MetricsRegistryTrait; use graph::components::store::ReadStore; use graph::components::store::{DeploymentCursorTracker, EntityKey}; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ - BlockNumber, Entity, Schema, SubgraphDeploymentEntity, SubgraphStore as _, BLOCK_NUMBER_MAX, + BlockNumber, Entity, MetricsRegistry, Schema, SubgraphDeploymentEntity, SubgraphStore as _, + BLOCK_NUMBER_MAX, }; use graph::slog::info; use graph::util::bounded_queue::BoundedQueue; @@ -586,7 +586,7 @@ impl Queue { logger: Logger, store: Arc, capacity: usize, - registry: Arc, + registry: Arc, ) -> Arc { async fn start_writer(queue: Arc, logger: Logger) { loop { @@ -855,7 +855,7 @@ impl Writer { logger: Logger, store: Arc, capacity: usize, - registry: Arc, + registry: Arc, ) -> Self { info!(logger, "Starting subgraph writer"; "queue_size" => capacity); if capacity == 0 { @@ -987,7 +987,7 @@ impl WritableStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, - registry: Arc, + registry: Arc, ) -> Result { let store = Arc::new(SyncStore::new(subgraph_store, logger.clone(), site)?); let block_ptr = Mutex::new(store.block_ptr().await?); diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 206c074b078..7a803c3d341 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -16,7 +16,7 @@ use graph::blockchain::{ TriggersAdapter, TriggersAdapterSelector, }; use graph::cheap_clone::CheapClone; -use graph::components::metrics::{MetricsRegistry, MetricsRegistryTrait}; +use graph::components::metrics::MetricsRegistry; use graph::components::store::{BlockStore, DeploymentLocator}; use graph::data::graphql::effort::LoadManager; use graph::data::query::{Query, QueryTarget}; @@ -289,7 +289,7 @@ pub async fn stores(store_config_path: &str) -> Stores { }; let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let node_id = NodeId::new(NODE_ID).unwrap(); let store_builder = StoreBuilder::new(&logger, &node_id, &config, None, mock_registry.clone()).await; @@ -331,7 +331,7 @@ pub async fn setup( }); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); + let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); let node_id = NodeId::new(NODE_ID).unwrap(); From 094e40b3b8d781503d19e70bf4aa5391fd6d3502 Mon Sep 17 00:00:00 2001 From: Yaro Shkvorets Date: Mon, 20 Mar 2023 12:36:34 -0400 Subject: [PATCH 0033/2104] Add jitter to `ExponentialBackoff` (#4476) * add backoff jitter, tests and env variable * increase default ERROR_RETRY_CEIL to 60 minutes --- core/src/subgraph/runner.rs | 3 ++- graph/src/env/mod.rs | 12 +++++++++-- graph/src/util/backoff.rs | 40 ++++++++++++++++++++++++++++++++++++- 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 479d0f0b1bd..3ef7ce18845 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -60,9 +60,10 @@ where should_try_unfail_non_deterministic: true, synced: false, skip_ptr_updates_timer: Instant::now(), - backoff: ExponentialBackoff::new( + backoff: ExponentialBackoff::with_jitter( (MINUTE * 2).min(env_vars.subgraph_error_retry_ceil), env_vars.subgraph_error_retry_ceil, + env_vars.subgraph_error_retry_jitter, ), entity_lfu_cache: LfuCache::new(), }, diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index 04f24152e94..c538b8227aa 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -133,8 +133,13 @@ pub struct EnvVars { /// Ceiling for the backoff retry of non-deterministic errors. /// /// Set by the environment variable `GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS` - /// (expressed in seconds). The default value is 1800s (30 minutes). + /// (expressed in seconds). The default value is 3600s (60 minutes). pub subgraph_error_retry_ceil: Duration, + /// Jitter factor for the backoff retry of non-deterministic errors. + /// + /// Set by the environment variable `GRAPH_SUBGRAPH_ERROR_RETRY_JITTER` + /// (clamped between 0.0 and 1.0). The default value is 0.2. + pub subgraph_error_retry_jitter: f64, /// Experimental feature. /// /// Set by the flag `GRAPH_ENABLE_SELECT_BY_SPECIFIC_ATTRIBUTES`. Off by @@ -210,6 +215,7 @@ impl EnvVars { subgraph_max_data_sources: inner.subgraph_max_data_sources.0, disable_fail_fast: inner.disable_fail_fast.0, subgraph_error_retry_ceil: Duration::from_secs(inner.subgraph_error_retry_ceil_in_secs), + subgraph_error_retry_jitter: inner.subgraph_error_retry_jitter, enable_select_by_specific_attributes: inner.enable_select_by_specific_attributes.0, log_trigger_data: inner.log_trigger_data.0, explorer_ttl: Duration::from_secs(inner.explorer_ttl_in_secs), @@ -313,8 +319,10 @@ struct Inner { subgraph_max_data_sources: NoUnderscores, #[envconfig(from = "GRAPH_DISABLE_FAIL_FAST", default = "false")] disable_fail_fast: EnvVarBoolean, - #[envconfig(from = "GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS", default = "1800")] + #[envconfig(from = "GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS", default = "3600")] subgraph_error_retry_ceil_in_secs: u64, + #[envconfig(from = "GRAPH_SUBGRAPH_ERROR_RETRY_JITTER", default = "0.2")] + subgraph_error_retry_jitter: f64, #[envconfig(from = "GRAPH_ENABLE_SELECT_BY_SPECIFIC_ATTRIBUTES", default = "false")] enable_select_by_specific_attributes: EnvVarBoolean, #[envconfig(from = "GRAPH_LOG_TRIGGER_DATA", default = "false")] diff --git a/graph/src/util/backoff.rs b/graph/src/util/backoff.rs index 1edc3684130..ffe8d5bb5d3 100644 --- a/graph/src/util/backoff.rs +++ b/graph/src/util/backoff.rs @@ -8,6 +8,7 @@ pub struct ExponentialBackoff { pub attempt: u64, base: Duration, ceiling: Duration, + jitter: f64, } impl ExponentialBackoff { @@ -16,6 +17,19 @@ impl ExponentialBackoff { attempt: 0, base, ceiling, + jitter: 0.0, + } + } + + // Create ExponentialBackoff with jitter + // jitter is a value between 0.0 and 1.0. Sleep delay will be randomized + // within `jitter` of the normal sleep delay + pub fn with_jitter(base: Duration, ceiling: Duration, jitter: f64) -> Self { + ExponentialBackoff { + attempt: 0, + base, + ceiling, + jitter: jitter.clamp(0.0, 1.0), } } @@ -37,7 +51,8 @@ impl ExponentialBackoff { if delay > self.ceiling { delay = self.ceiling; } - delay + let jitter = rand::Rng::gen_range(&mut rand::thread_rng(), -self.jitter..=self.jitter); + delay.mul_f64(1.0 + jitter) } fn next_attempt(&mut self) -> Duration { @@ -80,6 +95,29 @@ mod tests { assert_eq!(backoff.next_attempt(), Duration::from_secs(5)); } + #[test] + fn test_delay_with_jitter() { + let mut backoff = ExponentialBackoff::with_jitter( + Duration::from_millis(1000), + Duration::from_secs(5), + 0.1, + ); + + // Delay should be between 0.5s and 1.5s + let delay1 = backoff.delay(); + assert!(delay1 > Duration::from_millis(900) && delay1 <= Duration::from_millis(1100)); + let delay2 = backoff.delay(); + assert!(delay2 > Duration::from_millis(900) && delay2 <= Duration::from_millis(1100)); + + // Delays should be random and different + assert_ne!(delay1, delay2); + + // Test ceiling + backoff.attempt = 123456; + let delay = backoff.delay(); + assert!(delay > Duration::from_millis(4500) && delay <= Duration::from_millis(5500)); + } + #[test] fn test_overflow_delay() { let mut backoff = From 2c15d1694ee1a54e7fcab076e320df629a1632fd Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Mon, 20 Mar 2023 17:39:09 +0100 Subject: [PATCH 0034/2104] Upgrade to Rust 1.68.0 (#4471) * Update Rust to 1.68 * node: replace crossbeam-channel with std * config.toml: add useful comment --- .cargo/config.toml | 6 ++++++ Cargo.lock | 1 - node/Cargo.toml | 1 - node/src/main.rs | 7 +++---- rust-toolchain.toml | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 9d360ec0167..810054d3158 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,9 @@ [alias] # Warnings create a lot of noise, we only print errors. check-clippy = "clippy --no-deps -- --allow warnings" + +# Can be safely removed once Cargo's sparse protocol (see +# https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html#cargos-sparse-protocol) +# becomes the default. +[registries.crates-io] +protocol = "sparse" diff --git a/Cargo.lock b/Cargo.lock index 2f6469d393a..e4f43f3596e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1761,7 +1761,6 @@ name = "graph-node" version = "0.30.0" dependencies = [ "clap", - "crossbeam-channel", "diesel", "env_logger 0.9.3", "futures 0.3.16", diff --git a/node/Cargo.toml b/node/Cargo.toml index ff2a2213112..af555856ceb 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -20,7 +20,6 @@ graphql-parser = "0.4.0" futures = { version = "0.3.1", features = ["compat"] } lazy_static = "1.2.0" url = "2.3.1" -crossbeam-channel = "0.5.7" graph = { path = "../graph" } graph-core = { path = "../core" } graph-chain-arweave = { path = "../chain/arweave" } diff --git a/node/src/main.rs b/node/src/main.rs index 953acbc7652..577867f1c4a 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -561,7 +561,7 @@ async fn main() { // Periodically check for contention in the tokio threadpool. First spawn a // task that simply responds to "ping" requests. Then spawn a separate // thread to periodically ping it and check responsiveness. - let (ping_send, mut ping_receive) = mpsc::channel::>(1); + let (ping_send, mut ping_receive) = mpsc::channel::>(1); graph::spawn(async move { while let Some(pong_send) = ping_receive.recv().await { let _ = pong_send.clone().send(()); @@ -570,14 +570,13 @@ async fn main() { }); std::thread::spawn(move || loop { std::thread::sleep(Duration::from_secs(1)); - let (pong_send, pong_receive) = crossbeam_channel::bounded(1); + let (pong_send, pong_receive) = std::sync::mpsc::sync_channel(1); if futures::executor::block_on(ping_send.clone().send(pong_send)).is_err() { debug!(contention_logger, "Shutting down contention checker thread"); break; } let mut timeout = Duration::from_millis(10); - while pong_receive.recv_timeout(timeout) - == Err(crossbeam_channel::RecvTimeoutError::Timeout) + while pong_receive.recv_timeout(timeout) == Err(std::sync::mpsc::RecvTimeoutError::Timeout) { debug!(contention_logger, "Possible contention in tokio threadpool"; "timeout_ms" => timeout.as_millis(), diff --git a/rust-toolchain.toml b/rust-toolchain.toml index f0d51c34aef..d89a6bef65e 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.66.0" +channel = "1.68.0" profile = "default" From 0b26787a949f273948be832d245eff5d2d94c220 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 15 Mar 2023 20:33:29 -0700 Subject: [PATCH 0035/2104] graph: Allow passing explicit config string to logger builder --- graph/src/log/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/graph/src/log/mod.rs b/graph/src/log/mod.rs index 60bbbcd5153..ffde346c2e5 100644 --- a/graph/src/log/mod.rs +++ b/graph/src/log/mod.rs @@ -32,6 +32,10 @@ pub mod factory; pub mod split; pub fn logger(show_debug: bool) -> Logger { + logger_with_levels(show_debug, ENV_VARS.log_levels.as_deref()) +} + +pub fn logger_with_levels(show_debug: bool, levels: Option<&str>) -> Logger { let use_color = isatty::stdout_isatty(); let decorator = slog_term::TermDecorator::new().build(); let drain = CustomFormat::new(decorator, use_color).fuse(); @@ -44,7 +48,7 @@ pub fn logger(show_debug: bool) -> Logger { FilterLevel::Info }, ) - .parse(ENV_VARS.log_levels.as_deref().unwrap_or("")) + .parse(levels.unwrap_or("")) .build(); let drain = slog_async::Async::new(drain) .chan_size(20000) From 70bee451802054607c8d2beaa7a793829d0f605b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 15 Mar 2023 20:44:34 -0700 Subject: [PATCH 0036/2104] node: Do not use GRAPH_LOG to set log level for graphman That behavior was highly annoying. If desired, the log level can be set on the command line or with the GRAPHMAN_LOG environment variable. --- node/src/bin/manager.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 70602a5f2e9..78044b4cdf5 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -2,13 +2,13 @@ use clap::{Parser, Subcommand}; use config::PoolSize; use git_testament::{git_testament, render_testament}; use graph::bail; +use graph::log::logger_with_levels; use graph::prelude::{MetricsRegistry, BLOCK_NUMBER_MAX}; use graph::{data::graphql::effort::LoadManager, prelude::chrono, prometheus::Registry}; use graph::{ - log::logger, prelude::{ anyhow::{self, Context as AnyhowContextTrait}, - info, o, slog, tokio, Logger, NodeId, ENV_VARS, + info, tokio, Logger, NodeId, }, url::Url, }; @@ -47,6 +47,13 @@ lazy_static! { version = RENDERED_TESTAMENT.as_str() )] pub struct Opt { + #[clap( + long, + default_value = "off", + env = "GRAPHMAN_LOG", + help = "level for log output in slog format" + )] + pub log_level: String, #[clap( long, default_value = "auto", @@ -910,10 +917,7 @@ async fn main() -> anyhow::Result<()> { let version_label = opt.version_label.clone(); // Set up logger - let logger = match ENV_VARS.log_levels { - Some(_) => logger(false), - None => Logger::root(slog::Discard, o!()), - }; + let logger = logger_with_levels(false, Some(&opt.log_level)); // Log version information info!( From debe103220b39b778dfb685abb45ee1f0636bb01 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 20 Mar 2023 12:05:11 -0700 Subject: [PATCH 0037/2104] docs: Mention the defaults for GRaphQL validation settings --- docs/environment-variables.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 2bed9a09ab4..83ad7782424 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -117,10 +117,12 @@ those. this variable is set to any value, `graph-node` will still accept GraphQL subscriptions, but they won't receive any updates. - `ENABLE_GRAPHQL_VALIDATIONS`: enables GraphQL validations, based on the GraphQL specification. - This will validate and ensure every query executes follows the execution rules. + This will validate and ensure every query executes follows the execution + rules. Default: `false` - `SILENT_GRAPHQL_VALIDATIONS`: If `ENABLE_GRAPHQL_VALIDATIONS` is enabled, you are also able to just silently print the GraphQL validation errors, without failing the actual query. Note: queries - might still fail as part of the later stage validations running, during GraphQL engine execution. + might still fail as part of the later stage validations running, during + GraphQL engine execution. Default: `true` - `GRAPH_GRAPHQL_DISABLE_BOOL_FILTERS`: disables the ability to use AND/OR filters. This is useful if we want to disable filters because of performance reasons. From 6f5de5a5b4c574bbbc9f00e591520da4907e73cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Mar 2023 12:09:23 +0000 Subject: [PATCH 0038/2104] build(deps): bump openssl from 0.10.45 to 0.10.47 (#4481) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.45 to 0.10.47. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.45...openssl-v0.10.47) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- store/postgres/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4f43f3596e..7362963d095 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2980,9 +2980,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "d8b277f87dacc05a6b709965d1cbafac4649d6ce9f3ce9ceb88508b5666dfec9" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3012,9 +3012,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "a95792af3c4e0153c3914df2261bedd30a98476f94dc892b67dfe1d89d433a04" dependencies = [ "autocfg", "cc", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 0b457c59ca2..58517400cb3 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -22,7 +22,7 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.45" +openssl = "0.10.47" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" From 08f0281da68bb3d167b6da08b17ecdc069c929ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Mar 2023 12:09:40 +0000 Subject: [PATCH 0039/2104] build(deps): bump anyhow from 1.0.69 to 1.0.70 (#4479) Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.69 to 1.0.70. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.69...1.0.70) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- store/postgres/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7362963d095..efebe861661 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,9 +56,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 58517400cb3..6b237c3b535 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -29,7 +29,7 @@ serde = "1.0" uuid = { version = "1.3.0", features = ["v4"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } diesel_derives = "1.4.1" -anyhow = "1.0.69" +anyhow = "1.0.70" git-testament = "0.2.4" itertools = "0.10.5" pin-utils = "0.1" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 387619f1c21..33a30a3a866 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -26,7 +26,7 @@ tokio = { version = "1.16.1", features = ["rt", "macros", "process"] } uuid = { version = "1.3.0", features = ["v4"] } [dev-dependencies] -anyhow = "1.0.69" +anyhow = "1.0.70" bollard = "0.10" lazy_static = "1.4.0" tokio-stream = "0.1" From 3be894fbd8f46a2dd5d47c74bea8eedaf13595b3 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Wed, 22 Mar 2023 15:55:32 +0000 Subject: [PATCH 0040/2104] Detect timeouts in asc_type_id (#4475) * refactor(runtime): Add sanity checks * fix(runtime): Detect non-deterministic traps in asc_type_id They would previously be unconditionally treated as deterministic. * refactor(runtime): Change signatures from DeterministicHostError to HostExportError This all falls out of changing the signature of `fn asc_type_id`. --- chain/arweave/src/runtime/abi.rs | 16 +- chain/arweave/src/trigger.rs | 4 +- chain/cosmos/src/runtime/abi.rs | 7 +- chain/cosmos/src/trigger.rs | 5 +- chain/ethereum/src/runtime/abi.rs | 34 ++-- chain/ethereum/src/trigger.rs | 4 +- chain/near/src/runtime/abi.rs | 95 ++++++----- chain/near/src/trigger.rs | 9 +- chain/substreams/src/trigger.rs | 2 +- graph/src/components/subgraph/host.rs | 13 +- graph/src/runtime/asc_heap.rs | 24 ++- graph/src/runtime/asc_ptr.rs | 6 +- graph/src/runtime/mod.rs | 2 +- runtime/derive/src/generate_array_type.rs | 2 +- runtime/derive/src/generate_from_rust_type.rs | 4 +- runtime/test/src/test_padding.rs | 5 +- runtime/wasm/src/asc_abi/class.rs | 9 +- runtime/wasm/src/asc_abi/v0_0_4.rs | 6 +- runtime/wasm/src/asc_abi/v0_0_5.rs | 8 +- runtime/wasm/src/host_exports.rs | 2 +- runtime/wasm/src/module/mod.rs | 157 ++++++++++-------- runtime/wasm/src/to_from/external.rs | 30 ++-- runtime/wasm/src/to_from/mod.rs | 17 +- 23 files changed, 252 insertions(+), 209 deletions(-) diff --git a/chain/arweave/src/runtime/abi.rs b/chain/arweave/src/runtime/abi.rs index ea495c6e5ff..616cfa70b8c 100644 --- a/chain/arweave/src/runtime/abi.rs +++ b/chain/arweave/src/runtime/abi.rs @@ -1,7 +1,7 @@ use crate::codec; use crate::trigger::TransactionWithBlockPtr; use graph::runtime::gas::GasCounter; -use graph::runtime::{asc_new, AscHeap, AscPtr, DeterministicHostError, ToAscObj}; +use graph::runtime::{asc_new, AscHeap, AscPtr, HostExportError, ToAscObj}; use graph_runtime_wasm::asc_abi::class::{Array, Uint8Array}; pub(crate) use super::generated::*; @@ -11,7 +11,7 @@ impl ToAscObj for codec::Tag { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTag { name: asc_new(heap, self.name.as_slice(), gas)?, value: asc_new(heap, self.value.as_slice(), gas)?, @@ -24,7 +24,7 @@ impl ToAscObj for Vec> { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content = self .iter() .map(|x| asc_new(heap, x.as_slice(), gas)) @@ -38,7 +38,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content = self .iter() .map(|x| asc_new(heap, x, gas)) @@ -52,7 +52,7 @@ impl ToAscObj for codec::ProofOfAccess { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscProofOfAccess { option: asc_new(heap, &self.option, gas)?, tx_path: asc_new(heap, self.tx_path.as_slice(), gas)?, @@ -67,7 +67,7 @@ impl ToAscObj for codec::Transaction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTransaction { format: self.format, id: asc_new(heap, self.id.as_slice(), gas)?, @@ -108,7 +108,7 @@ impl ToAscObj for codec::Block { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscBlock { indep_hash: asc_new(heap, self.indep_hash.as_slice(), gas)?, nonce: asc_new(heap, self.nonce.as_slice(), gas)?, @@ -182,7 +182,7 @@ impl ToAscObj for TransactionWithBlockPtr { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTransactionWithBlockPtr { tx: asc_new(heap, &self.tx.as_ref(), gas)?, block: asc_new(heap, self.block.as_ref(), gas)?, diff --git a/chain/arweave/src/trigger.rs b/chain/arweave/src/trigger.rs index 9d2f7ad3a4d..dc5fbbcad39 100644 --- a/chain/arweave/src/trigger.rs +++ b/chain/arweave/src/trigger.rs @@ -7,7 +7,7 @@ use graph::runtime::asc_new; use graph::runtime::gas::GasCounter; use graph::runtime::AscHeap; use graph::runtime::AscPtr; -use graph::runtime::DeterministicHostError; +use graph::runtime::HostExportError; use graph_runtime_wasm::module::ToAscPtr; use std::{cmp::Ordering, sync::Arc}; @@ -38,7 +38,7 @@ impl ToAscPtr for ArweaveTrigger { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(match self { ArweaveTrigger::Block(block) => asc_new(heap, block.as_ref(), gas)?.erase(), ArweaveTrigger::Transaction(tx) => asc_new(heap, tx.as_ref(), gas)?.erase(), diff --git a/chain/cosmos/src/runtime/abi.rs b/chain/cosmos/src/runtime/abi.rs index 3c5f0dd5353..af9260b63be 100644 --- a/chain/cosmos/src/runtime/abi.rs +++ b/chain/cosmos/src/runtime/abi.rs @@ -1,4 +1,5 @@ use crate::protobuf::*; +use graph::runtime::HostExportError; pub use graph::semver::Version; pub use graph::runtime::{ @@ -16,7 +17,7 @@ impl ToAscObj for Vec> { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self .iter() .map(|x| asc_new(heap, &graph_runtime_wasm::asc_abi::class::Bytes(x), gas)) @@ -52,7 +53,7 @@ impl ToAscObj for prost_types::Any { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscAny { type_url: asc_new(heap, &self.type_url, gas)?, value: asc_new( @@ -71,7 +72,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); Ok(AscAnyArray(Array::new(&content?, heap, gas)?)) diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index 93f35f4ee0a..cecf6edebd3 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -3,7 +3,8 @@ use std::{cmp::Ordering, sync::Arc}; use graph::blockchain::{Block, BlockHash, TriggerData}; use graph::cheap_clone::CheapClone; use graph::prelude::{BlockNumber, Error}; -use graph::runtime::{asc_new, gas::GasCounter, AscHeap, AscPtr, DeterministicHostError}; +use graph::runtime::HostExportError; +use graph::runtime::{asc_new, gas::GasCounter, AscHeap, AscPtr}; use graph_runtime_wasm::module::ToAscPtr; use crate::codec; @@ -42,7 +43,7 @@ impl ToAscPtr for CosmosTrigger { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(match self { CosmosTrigger::Block(block) => asc_new(heap, block.as_ref(), gas)?.erase(), CosmosTrigger::Event { event_data, .. } => { diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index dcc9564bd7f..66862871c19 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -10,7 +10,7 @@ use graph::{ }, runtime::{ asc_get, asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, - DeterministicHostError, FromAscObj, IndexForAscTypeId, ToAscObj, + DeterministicHostError, FromAscObj, HostExportError, IndexForAscTypeId, ToAscObj, }, }; use graph_runtime_derive::AscType; @@ -42,7 +42,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscLogParamArray(Array::new(&content, heap, gas)?)) @@ -73,7 +73,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let topics = self .iter() .map(|topic| asc_new(heap, topic, gas)) @@ -106,7 +106,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let logs = self .iter() .map(|log| asc_new(heap, &log, gas)) @@ -416,7 +416,7 @@ impl ToAscObj for EthereumBlockData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumBlock { hash: asc_new(heap, &self.hash, gas)?, parent_hash: asc_new(heap, &self.parent_hash, gas)?, @@ -448,7 +448,7 @@ impl ToAscObj for EthereumBlockData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumBlock_0_0_6 { hash: asc_new(heap, &self.hash, gas)?, parent_hash: asc_new(heap, &self.parent_hash, gas)?, @@ -484,7 +484,7 @@ impl ToAscObj for EthereumTransactionData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumTransaction_0_0_1 { hash: asc_new(heap, &self.hash, gas)?, index: asc_new(heap, &BigInt::from(self.index), gas)?, @@ -505,7 +505,7 @@ impl ToAscObj for EthereumTransactionData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumTransaction_0_0_2 { hash: asc_new(heap, &self.hash, gas)?, index: asc_new(heap, &BigInt::from(self.index), gas)?, @@ -527,7 +527,7 @@ impl ToAscObj for EthereumTransactionData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumTransaction_0_0_6 { hash: asc_new(heap, &self.hash, gas)?, index: asc_new(heap, &BigInt::from(self.index), gas)?, @@ -556,7 +556,7 @@ where &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(AscEthereumEvent { address: asc_new(heap, &self.address, gas)?, log_index: asc_new(heap, &BigInt::from_unsigned_u256(&self.log_index), gas)?, @@ -589,7 +589,7 @@ where &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let (event_data, optional_receipt) = self; let AscEthereumEvent { address, @@ -623,7 +623,7 @@ impl ToAscObj for Log { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumLog { address: asc_new(heap, &self.address, gas)?, topics: asc_new(heap, &self.topics, gas)?, @@ -670,7 +670,7 @@ impl ToAscObj for &TransactionReceipt { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumTransactionReceipt { transaction_hash: asc_new(heap, &self.transaction_hash, gas)?, transaction_index: asc_new(heap, &BigInt::from(self.transaction_index), gas)?, @@ -714,7 +714,7 @@ impl ToAscObj for EthereumCallData { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscEthereumCall { address: asc_new(heap, &self.to, gas)?, block: asc_new(heap, &self.block, gas)?, @@ -734,7 +734,7 @@ impl ToAscObj Result< AscEthereumCall_0_0_3, - DeterministicHostError, + HostExportError, > { Ok(AscEthereumCall_0_0_3 { to: asc_new(heap, &self.to, gas)?, @@ -756,7 +756,7 @@ impl ToAscObj Result< AscEthereumCall_0_0_3, - DeterministicHostError, + HostExportError, > { Ok(AscEthereumCall_0_0_3 { to: asc_new(heap, &self.to, gas)?, @@ -774,7 +774,7 @@ impl ToAscObj for ethabi::LogParam { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscLogParam { name: asc_new(heap, self.name.as_str(), gas)?, value: asc_new(heap, &self.value, gas)?, diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index 9b609668b1f..fcf0c6e4bc4 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -21,7 +21,7 @@ use graph::runtime::asc_new; use graph::runtime::gas::GasCounter; use graph::runtime::AscHeap; use graph::runtime::AscPtr; -use graph::runtime::DeterministicHostError; +use graph::runtime::HostExportError; use graph::semver::Version; use graph_runtime_wasm::module::ToAscPtr; use std::convert::TryFrom; @@ -116,7 +116,7 @@ impl ToAscPtr for MappingTrigger { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(match self { MappingTrigger::Log { block, diff --git a/chain/near/src/runtime/abi.rs b/chain/near/src/runtime/abi.rs index f9142fa16c7..252a4ffa49f 100644 --- a/chain/near/src/runtime/abi.rs +++ b/chain/near/src/runtime/abi.rs @@ -2,7 +2,7 @@ use crate::codec; use crate::trigger::ReceiptWithOutcome; use graph::anyhow::anyhow; use graph::runtime::gas::GasCounter; -use graph::runtime::{asc_new, AscHeap, AscPtr, DeterministicHostError, ToAscObj}; +use graph::runtime::{asc_new, AscHeap, AscPtr, DeterministicHostError, HostExportError, ToAscObj}; use graph_runtime_wasm::asc_abi::class::{Array, AscEnum, EnumPayload, Uint8Array}; pub(crate) use super::generated::*; @@ -12,7 +12,7 @@ impl ToAscObj for codec::Block { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscBlock { author: asc_new(heap, &self.author, gas)?, header: asc_new(heap, self.header(), gas)?, @@ -26,7 +26,7 @@ impl ToAscObj for codec::BlockHeader { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let chunk_mask = Array::new(self.chunk_mask.as_ref(), heap, gas)?; Ok(AscBlockHeader { @@ -68,7 +68,7 @@ impl ToAscObj for codec::ChunkHeader { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscChunkHeader { chunk_hash: asc_new(heap, self.chunk_hash.as_slice(), gas)?, signature: asc_new(heap, &self.signature.as_ref().unwrap(), gas)?, @@ -96,7 +96,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscChunkHeaderArray(Array::new(&content, heap, gas)?)) @@ -108,7 +108,7 @@ impl ToAscObj for ReceiptWithOutcome { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscReceiptWithOutcome { outcome: asc_new(heap, &self.outcome, gas)?, receipt: asc_new(heap, &self.receipt, gas)?, @@ -122,13 +122,13 @@ impl ToAscObj for codec::Receipt { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let action = match self.receipt.as_ref().unwrap() { codec::receipt::Receipt::Action(action) => action, codec::receipt::Receipt::Data(_) => { - return Err(DeterministicHostError::from(anyhow!( - "Data receipt are now allowed" - ))); + return Err( + DeterministicHostError::from(anyhow!("Data receipt are now allowed")).into(), + ); } }; @@ -151,7 +151,7 @@ impl ToAscObj for codec::Action { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let (kind, payload) = match self.action.as_ref().unwrap() { codec::action::Action::CreateAccount(action) => ( AscActionKind::CreateAccount, @@ -200,7 +200,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscActionEnumArray(Array::new(&content, heap, gas)?)) @@ -212,7 +212,7 @@ impl ToAscObj for codec::CreateAccountAction { &self, _heap: &mut H, _gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscCreateAccountAction {}) } } @@ -222,7 +222,7 @@ impl ToAscObj for codec::DeployContractAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscDeployContractAction { code: asc_new(heap, self.code.as_slice(), gas)?, }) @@ -234,7 +234,7 @@ impl ToAscObj for codec::FunctionCallAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscFunctionCallAction { method_name: asc_new(heap, &self.method_name, gas)?, args: asc_new(heap, self.args.as_slice(), gas)?, @@ -250,7 +250,7 @@ impl ToAscObj for codec::TransferAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTransferAction { deposit: asc_new(heap, self.deposit.as_ref().unwrap(), gas)?, }) @@ -262,7 +262,7 @@ impl ToAscObj for codec::StakeAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscStakeAction { stake: asc_new(heap, self.stake.as_ref().unwrap(), gas)?, public_key: asc_new(heap, self.public_key.as_ref().unwrap(), gas)?, @@ -275,7 +275,7 @@ impl ToAscObj for codec::AddKeyAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscAddKeyAction { public_key: asc_new(heap, self.public_key.as_ref().unwrap(), gas)?, access_key: asc_new(heap, self.access_key.as_ref().unwrap(), gas)?, @@ -288,7 +288,7 @@ impl ToAscObj for codec::AccessKey { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscAccessKey { nonce: self.nonce, permission: asc_new(heap, self.permission.as_ref().unwrap(), gas)?, @@ -302,7 +302,7 @@ impl ToAscObj for codec::AccessKeyPermission { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let (kind, payload) = match self.permission.as_ref().unwrap() { codec::access_key_permission::Permission::FunctionCall(permission) => ( AscAccessKeyPermissionKind::FunctionCall, @@ -327,7 +327,7 @@ impl ToAscObj for codec::FunctionCallPermission { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscFunctionCallPermission { // The `allowance` field is one of the few fields that can actually be None for real allowance: match self.allowance.as_ref() { @@ -345,7 +345,7 @@ impl ToAscObj for codec::FullAccessPermission { &self, _heap: &mut H, _gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscFullAccessPermission {}) } } @@ -355,7 +355,7 @@ impl ToAscObj for codec::DeleteKeyAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscDeleteKeyAction { public_key: asc_new(heap, self.public_key.as_ref().unwrap(), gas)?, }) @@ -367,7 +367,7 @@ impl ToAscObj for codec::DeleteAccountAction { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscDeleteAccountAction { beneficiary_id: asc_new(heap, &self.beneficiary_id, gas)?, }) @@ -379,7 +379,7 @@ impl ToAscObj for codec::DataReceiver { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscDataReceiver { data_id: asc_new(heap, self.data_id.as_ref().unwrap(), gas)?, receiver_id: asc_new(heap, &self.receiver_id, gas)?, @@ -392,7 +392,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscDataReceiverArray(Array::new(&content, heap, gas)?)) @@ -404,7 +404,7 @@ impl ToAscObj for codec::ExecutionOutcomeWithId { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let outcome = self.outcome.as_ref().unwrap(); Ok(AscExecutionOutcome { @@ -426,7 +426,7 @@ impl ToAscObj for codec::execution_outcome::Status { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let (kind, payload) = match self { codec::execution_outcome::Status::SuccessValue(value) => { let bytes = &value.value; @@ -443,12 +443,14 @@ impl ToAscObj for codec::execution_outcome::Status { codec::execution_outcome::Status::Failure(_) => { return Err(DeterministicHostError::from(anyhow!( "Failure execution status are not allowed" - ))); + )) + .into()); } codec::execution_outcome::Status::Unknown(_) => { return Err(DeterministicHostError::from(anyhow!( "Unknown execution status are not allowed" - ))); + )) + .into()); } }; @@ -465,7 +467,7 @@ impl ToAscObj for codec::MerklePathItem { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscMerklePathItem { hash: asc_new(heap, self.hash.as_ref().unwrap(), gas)?, direction: match self.direction { @@ -475,7 +477,8 @@ impl ToAscObj for codec::MerklePathItem { return Err(DeterministicHostError::from(anyhow!( "Invalid direction value {}", x - ))) + )) + .into()) } }, }) @@ -487,7 +490,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscMerklePathItemArray(Array::new(&content, heap, gas)?)) @@ -499,7 +502,7 @@ impl ToAscObj for codec::Signature { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscSignature { kind: match self.r#type { 0 => 0, @@ -508,7 +511,8 @@ impl ToAscObj for codec::Signature { return Err(DeterministicHostError::from(anyhow!( "Invalid signature type {}", value, - ))) + )) + .into()) } }, bytes: asc_new(heap, self.bytes.as_slice(), gas)?, @@ -521,7 +525,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscSignatureArray(Array::new(&content, heap, gas)?)) @@ -533,7 +537,7 @@ impl ToAscObj for codec::PublicKey { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscPublicKey { kind: match self.r#type { 0 => 0, @@ -542,7 +546,8 @@ impl ToAscObj for codec::PublicKey { return Err(DeterministicHostError::from(anyhow!( "Invalid public key type {}", value, - ))) + )) + .into()) } }, bytes: asc_new(heap, self.bytes.as_slice(), gas)?, @@ -555,7 +560,7 @@ impl ToAscObj for codec::ValidatorStake { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscValidatorStake { account_id: asc_new(heap, &self.account_id, gas)?, public_key: asc_new(heap, self.public_key.as_ref().unwrap(), gas)?, @@ -569,7 +574,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscValidatorStakeArray(Array::new(&content, heap, gas)?)) @@ -581,7 +586,7 @@ impl ToAscObj for codec::SlashedValidator { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscSlashedValidator { account_id: asc_new(heap, &self.account_id, gas)?, is_double_sign: self.is_double_sign, @@ -594,7 +599,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscSlashedValidatorArray(Array::new(&content, heap, gas)?)) @@ -606,7 +611,7 @@ impl ToAscObj for codec::CryptoHash { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { self.bytes.to_asc_obj(heap, gas) } } @@ -616,7 +621,7 @@ impl ToAscObj for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Ok(AscCryptoHashArray(Array::new(&content, heap, gas)?)) @@ -628,7 +633,7 @@ impl ToAscObj for codec::BigInt { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { // Bytes are reversed to align with BigInt bytes endianess let reversed: Vec = self.bytes.iter().rev().copied().collect(); diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index 6fc31e8aefe..0fad89b2d63 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -4,7 +4,8 @@ use graph::cheap_clone::CheapClone; use graph::prelude::hex; use graph::prelude::web3::types::H256; use graph::prelude::BlockNumber; -use graph::runtime::{asc_new, gas::GasCounter, AscHeap, AscPtr, DeterministicHostError}; +use graph::runtime::HostExportError; +use graph::runtime::{asc_new, gas::GasCounter, AscHeap, AscPtr}; use graph_runtime_wasm::module::ToAscPtr; use std::{cmp::Ordering, sync::Arc}; @@ -40,7 +41,7 @@ impl ToAscPtr for NearTrigger { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(match self { NearTrigger::Block(block) => asc_new(heap, block.as_ref(), gas)?.erase(), NearTrigger::Receipt(receipt) => asc_new(heap, receipt.as_ref(), gas)?.erase(), @@ -150,7 +151,7 @@ mod tests { anyhow::anyhow, data::subgraph::API_VERSION_0_0_5, prelude::{hex, BigInt}, - runtime::gas::GasCounter, + runtime::{gas::GasCounter, DeterministicHostError, HostExportError}, util::mem::init_slice, }; @@ -495,7 +496,7 @@ mod tests { fn asc_type_id( &mut self, type_id_index: graph::runtime::IndexForAscTypeId, - ) -> Result { + ) -> Result { // Not totally clear what is the purpose of this method, why not a default implementation here? Ok(type_id_index as u32) } diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 334c5db30df..cbe73125bb7 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -38,7 +38,7 @@ impl ToAscPtr for TriggerData { self, _heap: &mut H, _gas: &graph::runtime::gas::GasCounter, - ) -> Result, graph::runtime::DeterministicHostError> { + ) -> Result, graph::runtime::HostExportError> { unimplemented!() } } diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index 9ef35254c18..0209fee5d73 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -11,8 +11,8 @@ use crate::data_source::{ DataSource, DataSourceTemplate, MappingTrigger, TriggerData, TriggerWithHandler, }; use crate::prelude::*; +use crate::runtime::HostExportError; use crate::{blockchain::Blockchain, components::subgraph::SharedProofOfIndexing}; -use crate::{components::metrics::HistogramVec, runtime::DeterministicHostError}; #[derive(Debug)] pub enum MappingError { @@ -27,9 +27,14 @@ impl From for MappingError { } } -impl From for MappingError { - fn from(value: DeterministicHostError) -> MappingError { - MappingError::Unknown(value.inner()) +impl From for MappingError { + fn from(value: HostExportError) -> MappingError { + match value { + HostExportError::PossibleReorg(e) => MappingError::PossibleReorg(e.into()), + HostExportError::Deterministic(e) | HostExportError::Unknown(e) => { + MappingError::Unknown(e.into()) + } + } } } diff --git a/graph/src/runtime/asc_heap.rs b/graph/src/runtime/asc_heap.rs index c39165461db..065af4f5200 100644 --- a/graph/src/runtime/asc_heap.rs +++ b/graph/src/runtime/asc_heap.rs @@ -3,7 +3,8 @@ use std::mem::MaybeUninit; use semver::Version; use super::{ - gas::GasCounter, AscIndexId, AscPtr, AscType, DeterministicHostError, IndexForAscTypeId, + gas::GasCounter, AscIndexId, AscPtr, AscType, DeterministicHostError, HostExportError, + IndexForAscTypeId, }; /// A type that can read and write to the Asc heap. Call `asc_new` and `asc_get` /// for reading and writing Rust structs from and to Asc. @@ -24,10 +25,7 @@ pub trait AscHeap { fn api_version(&self) -> Version; - fn asc_type_id( - &mut self, - type_id_index: IndexForAscTypeId, - ) -> Result; + fn asc_type_id(&mut self, type_id_index: IndexForAscTypeId) -> Result; } /// Instantiate `rust_obj` as an Asc object of class `C`. @@ -39,7 +37,7 @@ pub fn asc_new( heap: &mut H, rust_obj: &T, gas: &GasCounter, -) -> Result, DeterministicHostError> +) -> Result, HostExportError> where C: AscType + AscIndexId, T: ToAscObj, @@ -55,7 +53,7 @@ pub fn asc_new_or_missing( gas: &GasCounter, type_name: &str, field_name: &str, -) -> Result, DeterministicHostError> +) -> Result, HostExportError> where H: AscHeap + ?Sized, O: ToAscObj, @@ -72,7 +70,7 @@ pub fn asc_new_or_null( heap: &mut H, object: &Option, gas: &GasCounter, -) -> Result, DeterministicHostError> +) -> Result, HostExportError> where H: AscHeap + ?Sized, O: ToAscObj, @@ -85,8 +83,8 @@ where } /// Create an error for a missing field in a type. -fn missing_field_error(type_name: &str, field_name: &str) -> DeterministicHostError { - DeterministicHostError::from(anyhow::anyhow!("{} missing {}", type_name, field_name)) +fn missing_field_error(type_name: &str, field_name: &str) -> HostExportError { + DeterministicHostError::from(anyhow::anyhow!("{} missing {}", type_name, field_name)).into() } /// Read the rust representation of an Asc object of class `C`. @@ -111,7 +109,7 @@ pub trait ToAscObj { &self, heap: &mut H, gas: &GasCounter, - ) -> Result; + ) -> Result; } impl> ToAscObj for &T { @@ -119,7 +117,7 @@ impl> ToAscObj for &T { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { (*self).to_asc_obj(heap, gas) } } @@ -129,7 +127,7 @@ impl ToAscObj for bool { &self, _heap: &mut H, _gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(*self) } } diff --git a/graph/src/runtime/asc_ptr.rs b/graph/src/runtime/asc_ptr.rs index 59796ee60d6..890bde20e07 100644 --- a/graph/src/runtime/asc_ptr.rs +++ b/graph/src/runtime/asc_ptr.rs @@ -1,5 +1,5 @@ use super::gas::GasCounter; -use super::{padding_to_16, DeterministicHostError}; +use super::{padding_to_16, DeterministicHostError, HostExportError}; use super::{AscHeap, AscIndexId, AscType, IndexForAscTypeId}; use semver::Version; @@ -86,7 +86,7 @@ impl AscPtr { asc_obj: C, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> + ) -> Result, HostExportError> where C: AscIndexId, { @@ -143,7 +143,7 @@ impl AscPtr { type_id_index: IndexForAscTypeId, content_length: usize, full_length: usize, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let mut header: Vec = Vec::with_capacity(20); let gc_info: [u8; 4] = (0u32).to_le_bytes(); diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index 4b7109c6f8e..74007b96cef 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -385,7 +385,7 @@ impl ToAscObj for IndexForAscTypeId { &self, _heap: &mut H, _gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(*self as u32) } } diff --git a/runtime/derive/src/generate_array_type.rs b/runtime/derive/src/generate_array_type.rs index d01644fae55..f690b1ebd5a 100644 --- a/runtime/derive/src/generate_array_type.rs +++ b/runtime/derive/src/generate_array_type.rs @@ -50,7 +50,7 @@ pub fn generate_array_type(metadata: TokenStream, input: TokenStream) -> TokenSt &self, heap: &mut H, gas: &graph::runtime::gas::GasCounter, - ) -> Result<#asc_name_array, graph::runtime::DeterministicHostError> { + ) -> Result<#asc_name_array, graph::runtime::HostExportError> { let content: Result, _> = self.iter().map(|x| graph::runtime::asc_new(heap, x, gas)).collect(); Ok(#asc_name_array(graph_runtime_wasm::asc_abi::class::Array::new(&content?, heap, gas)?)) diff --git a/runtime/derive/src/generate_from_rust_type.rs b/runtime/derive/src/generate_from_rust_type.rs index 3d59fd08d46..6e24ad78c8c 100644 --- a/runtime/derive/src/generate_from_rust_type.rs +++ b/runtime/derive/src/generate_from_rust_type.rs @@ -56,7 +56,7 @@ pub fn generate_from_rust_type(metadata: TokenStream, input: TokenStream) -> Tok quote! { let #fld_name = self.#fld_name.as_ref() - .ok_or_else(|| graph::runtime::DeterministicHostError::from(anyhow::anyhow!("{} missing {}", #type_nm, #fld_nm)))?; + .ok_or_else(|| graph::runtime::HostExportError::from(graph::runtime::DeterministicHostError::from(anyhow::anyhow!("{} missing {}", #type_nm, #fld_nm))))?; } }); @@ -148,7 +148,7 @@ pub fn generate_from_rust_type(metadata: TokenStream, input: TokenStream) -> Tok &self, heap: &mut H, gas: &graph::runtime::gas::GasCounter, - ) -> Result<#asc_name, graph::runtime::DeterministicHostError> { + ) -> Result<#asc_name, graph::runtime::HostExportError> { #(#enum_validation)* diff --git a/runtime/test/src/test_padding.rs b/runtime/test/src/test_padding.rs index 85da8f3fa9d..968677bd087 100644 --- a/runtime/test/src/test_padding.rs +++ b/runtime/test/src/test_padding.rs @@ -109,6 +109,7 @@ pub mod data { IndexForAscTypeId::UnitTestNetworkUnitTestTypeBool; } + use graph::runtime::HostExportError; pub use graph::runtime::{ asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, AscValue, DeterministicHostError, IndexForAscTypeId, ToAscObj, @@ -119,7 +120,7 @@ pub mod data { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscBad { nonce: self.nonce, str_suff: asc_new(heap, &self.str_suff, gas)?, @@ -178,7 +179,7 @@ pub mod data { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscBadFixed { nonce: self.nonce, str_suff: asc_new(heap, &self.str_suff, gas)?, diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index fda9b6bda7e..0fdac204847 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -4,7 +4,8 @@ use semver::Version; use graph::{ data::store, runtime::{ - gas::GasCounter, AscHeap, AscIndexId, AscType, AscValue, IndexForAscTypeId, ToAscObj, + gas::GasCounter, AscHeap, AscIndexId, AscType, AscValue, HostExportError, + IndexForAscTypeId, ToAscObj, }, }; use graph::{prelude::serde_json, runtime::DeterministicHostError}; @@ -92,7 +93,7 @@ impl TypedArray { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { match heap.api_version() { version if version <= Version::new(0, 0, 4) => Ok(Self::ApiVersion0_0_4( v0_0_4::TypedArray::new(content, heap, gas)?, @@ -147,7 +148,7 @@ impl ToAscObj for Bytes<'_> { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { self.0.to_asc_obj(heap, gas) } } @@ -272,7 +273,7 @@ impl Array { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { match heap.api_version() { version if version <= Version::new(0, 0, 4) => Ok(Self::ApiVersion0_0_4( v0_0_4::Array::new(content, heap, gas)?, diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index ba37c063d25..39123f96efd 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -6,7 +6,7 @@ use std::mem::{size_of, size_of_val}; use anyhow::anyhow; use semver::Version; -use graph::runtime::{AscHeap, AscPtr, AscType, AscValue, DeterministicHostError}; +use graph::runtime::{AscHeap, AscPtr, AscType, AscValue, DeterministicHostError, HostExportError}; use graph_runtime_derive::AscType; use crate::asc_abi::class; @@ -153,7 +153,7 @@ impl TypedArray { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let buffer = class::ArrayBuffer::new(content, heap.api_version())?; let buffer_byte_length = if let class::ArrayBuffer::ApiVersion0_0_4(ref a) = buffer { a.byte_length @@ -307,7 +307,7 @@ impl Array { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let arr_buffer = class::ArrayBuffer::new(content, heap.api_version())?; let arr_buffer_ptr = AscPtr::alloc_obj(arr_buffer, heap, gas)?; Ok(Array { diff --git a/runtime/wasm/src/asc_abi/v0_0_5.rs b/runtime/wasm/src/asc_abi/v0_0_5.rs index 31503af0b5a..183aad0f2a6 100644 --- a/runtime/wasm/src/asc_abi/v0_0_5.rs +++ b/runtime/wasm/src/asc_abi/v0_0_5.rs @@ -5,7 +5,9 @@ use anyhow::anyhow; use semver::Version; use graph::runtime::gas::GasCounter; -use graph::runtime::{AscHeap, AscPtr, AscType, AscValue, DeterministicHostError, HEADER_SIZE}; +use graph::runtime::{ + AscHeap, AscPtr, AscType, AscValue, DeterministicHostError, HostExportError, HEADER_SIZE, +}; use graph_runtime_derive::AscType; use crate::asc_abi::class; @@ -116,7 +118,7 @@ impl TypedArray { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let buffer = class::ArrayBuffer::new(content, heap.api_version())?; let byte_length = content.len() as u32; let ptr = AscPtr::alloc_obj(buffer, heap, gas)?; @@ -266,7 +268,7 @@ impl Array { content: &[T], heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let arr_buffer = class::ArrayBuffer::new(content, heap.api_version())?; let buffer = AscPtr::alloc_obj(arr_buffer, heap, gas)?; let buffer_data_length = buffer.read_len(heap, gas)?; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index ae8288d191f..ded1d7193d6 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -601,7 +601,7 @@ impl HostExports { x: BigDecimal, y: BigDecimal, gas: &GasCounter, - ) -> Result { + ) -> Result { gas.consume_host_fn(gas::BIG_MATH_GAS_OP.with_args(complexity::Min, (&x, &y)))?; Ok(x == y) } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 5996773784f..c7ac94175ac 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -55,7 +55,7 @@ pub trait ToAscPtr { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError>; + ) -> Result, HostExportError>; } impl ToAscPtr for offchain::TriggerData { @@ -63,7 +63,7 @@ impl ToAscPtr for offchain::TriggerData { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { asc_new(heap, self.data.as_ref() as &[u8], gas).map(|ptr| ptr.erase()) } } @@ -76,7 +76,7 @@ where self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { match self { MappingTrigger::Onchain(trigger) => trigger.to_asc_ptr(heap, gas), MappingTrigger::Offchain(trigger) => trigger.to_asc_ptr(heap, gas), @@ -89,7 +89,7 @@ impl ToAscPtr for TriggerWithHandler { self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { self.trigger.to_asc_ptr(heap, gas) } } @@ -125,10 +125,7 @@ impl WasmInstance { asc_get(self.instance_ctx().deref(), asc_ptr, &self.gas) } - pub fn asc_new( - &mut self, - rust_obj: &T, - ) -> Result, DeterministicHostError> + pub fn asc_new(&mut self, rust_obj: &T) -> Result, HostExportError> where P: AscType + AscIndexId, T: ToAscObj

, @@ -137,6 +134,28 @@ impl WasmInstance { } } +fn is_trap_deterministic(trap: &Trap) -> bool { + use wasmtime::TrapCode::*; + + // We try to be exhaustive, even though `TrapCode` is non-exhaustive. + match trap.trap_code() { + Some(MemoryOutOfBounds) + | Some(HeapMisaligned) + | Some(TableOutOfBounds) + | Some(IndirectCallToNull) + | Some(BadSignature) + | Some(IntegerOverflow) + | Some(IntegerDivisionByZero) + | Some(BadConversionToInteger) + | Some(UnreachableCodeReached) => true, + + // `Interrupt`: Can be a timeout, at least as wasmtime currently implements it. + // `StackOverflow`: We may want to have a configurable stack size. + // `None`: A host trap, so we need to check the `deterministic_host_trap` flag in the context. + Some(Interrupt) | Some(StackOverflow) | None | _ => false, + } +} + impl WasmInstance { pub(crate) fn handle_json_callback( mut self, @@ -219,11 +238,17 @@ impl WasmInstance { // This `match` will return early if there was a non-deterministic trap. let deterministic_error: Option = match func.call(arg.wasm_ptr()) { - Ok(()) => None, + Ok(()) => { + assert!(self.instance_ctx().possible_reorg == false); + assert!(self.instance_ctx().deterministic_host_trap == false); + None + } Err(trap) if self.instance_ctx().possible_reorg => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::PossibleReorg(trap.into())); } + + // Treat as a special case to have a better error message. Err(trap) if trap.to_string().contains(TRAP_TIMEOUT) => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(Error::from(trap).context(format!( @@ -233,21 +258,12 @@ impl WasmInstance { )))); } Err(trap) => { - use wasmtime::TrapCode::*; - let trap_code = trap.trap_code(); + let trap_is_deterministic = + is_trap_deterministic(&trap) || self.instance_ctx().deterministic_host_trap; let e = Error::from(trap); - match trap_code { - Some(MemoryOutOfBounds) - | Some(HeapMisaligned) - | Some(TableOutOfBounds) - | Some(IndirectCallToNull) - | Some(BadSignature) - | Some(IntegerOverflow) - | Some(IntegerDivisionByZero) - | Some(BadConversionToInteger) - | Some(UnreachableCodeReached) => Some(e), - _ if self.instance_ctx().deterministic_host_trap => Some(e), - _ => { + match trap_is_deterministic { + true => Some(Error::from(e)), + false => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(e)); } @@ -658,6 +674,15 @@ impl WasmInstance { } } +fn host_export_error_from_trap(trap: Trap, context: String) -> HostExportError { + let trap_is_deterministic = is_trap_deterministic(&trap); + let e = Error::from(trap).context(context); + match trap_is_deterministic { + true => HostExportError::Deterministic(e), + false => HostExportError::Unknown(e), + } +} + impl AscHeap for WasmInstanceContext { fn raw_new(&mut self, bytes: &[u8], gas: &GasCounter) -> Result { // The cost of writing to wasm memory from the host is the same as of writing from wasm @@ -754,18 +779,17 @@ impl AscHeap for WasmInstanceContext { self.ctx.host_exports.api_version.clone() } - fn asc_type_id( - &mut self, - type_id_index: IndexForAscTypeId, - ) -> Result { - let type_id = self - .id_of_type + fn asc_type_id(&mut self, type_id_index: IndexForAscTypeId) -> Result { + self.id_of_type .as_ref() .unwrap() // Unwrap ok because it's only called on correct apiVersion, look for AscPtr::generate_header .call(type_id_index as u32) - .with_context(|| format!("Failed to call 'asc_type_id' with '{:?}'", type_id_index)) - .map_err(DeterministicHostError::from)?; - Ok(type_id) + .map_err(|trap| { + host_export_error_from_trap( + trap, + format!("Failed to call 'asc_type_id' with '{:?}'", type_id_index), + ) + }) } } @@ -1040,7 +1064,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, bytes_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let string = self.ctx.host_exports.bytes_to_string( &self.ctx.logger, asc_get(self, bytes_ptr, gas)?, @@ -1058,7 +1082,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, bytes_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let bytes: Vec = asc_get(self, bytes_ptr, gas)?; gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &bytes))?; @@ -1073,7 +1097,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, big_int_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let n: BigInt = asc_get(self, big_int_ptr, gas)?; gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &n))?; asc_new(self, &n.to_string(), gas) @@ -1084,7 +1108,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, string_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self .ctx .host_exports @@ -1097,7 +1121,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, big_int_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let n: BigInt = asc_get(self, big_int_ptr, gas)?; let hex = self.ctx.host_exports.big_int_to_hex(n, gas)?; asc_new(self, &hex, gas) @@ -1108,7 +1132,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, str_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let s: String = asc_get(self, str_ptr, gas)?; let h160 = self.ctx.host_exports.string_to_h160(&s, gas)?; asc_new(self, &h160, gas) @@ -1119,7 +1143,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, bytes_ptr: AscPtr, - ) -> Result>, DeterministicHostError> { + ) -> Result>, HostExportError> { let bytes: Vec = asc_get(self, bytes_ptr, gas)?; let result = self .ctx @@ -1140,8 +1164,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, bytes_ptr: AscPtr, - ) -> Result>, bool>>, DeterministicHostError> - { + ) -> Result>, bool>>, HostExportError> { let bytes: Vec = asc_get(self, bytes_ptr, gas)?; let result = self .ctx @@ -1319,7 +1342,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, json_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let big_int = self .ctx .host_exports @@ -1332,7 +1355,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, input_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let input = self .ctx .host_exports @@ -1346,7 +1369,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_plus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1361,7 +1384,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_minus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1376,7 +1399,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_times( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1391,7 +1414,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_divided_by( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1406,7 +1429,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let x = BigDecimal::new(asc_get(self, x_ptr, gas)?, 0); let result = self.ctx @@ -1421,7 +1444,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_mod( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1436,7 +1459,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, exp: u32, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let exp = u8::try_from(exp).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self .ctx @@ -1451,7 +1474,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_bit_or( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1466,7 +1489,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_int_bit_and( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1481,7 +1504,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, bits: u32, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let bits = u8::try_from(bits).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self.ctx @@ -1496,7 +1519,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, bits: u32, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let bits = u8::try_from(bits).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self.ctx @@ -1510,7 +1533,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, bytes_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self .ctx .host_exports @@ -1523,7 +1546,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, big_decimal_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self .ctx .host_exports @@ -1536,7 +1559,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, string_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self .ctx .host_exports @@ -1550,7 +1573,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_decimal_plus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1565,7 +1588,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_decimal_minus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1580,7 +1603,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_decimal_times( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1595,7 +1618,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let result = self.ctx.host_exports.big_decimal_divided_by( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1610,7 +1633,7 @@ impl WasmInstanceContext { gas: &GasCounter, x_ptr: AscPtr, y_ptr: AscPtr, - ) -> Result { + ) -> Result { self.ctx.host_exports.big_decimal_equals( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, @@ -1664,7 +1687,7 @@ impl WasmInstanceContext { pub fn data_source_address( &mut self, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { asc_new( self, self.ctx.host_exports.data_source_address(gas)?.as_slice(), @@ -1676,7 +1699,7 @@ impl WasmInstanceContext { pub fn data_source_network( &mut self, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { asc_new(self, &self.ctx.host_exports.data_source_network(gas)?, gas) } @@ -1684,7 +1707,7 @@ impl WasmInstanceContext { pub fn data_source_context( &mut self, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { asc_new( self, &self.ctx.host_exports.data_source_context(gas)?.sorted(), @@ -1737,7 +1760,7 @@ impl WasmInstanceContext { &mut self, gas: &GasCounter, token_ptr: AscPtr>, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { let data = self .ctx .host_exports @@ -1754,7 +1777,7 @@ impl WasmInstanceContext { gas: &GasCounter, types_ptr: AscPtr, data_ptr: AscPtr, - ) -> Result>, DeterministicHostError> { + ) -> Result>, HostExportError> { let result = self.ctx.host_exports.ethereum_decode( asc_get(self, types_ptr, gas)?, asc_get(self, data_ptr, gas)?, diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index 40ca4722e1c..69532fbf237 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -2,7 +2,9 @@ use ethabi; use graph::prelude::{BigDecimal, BigInt}; use graph::runtime::gas::GasCounter; -use graph::runtime::{asc_get, asc_new, AscIndexId, AscPtr, AscType, AscValue, ToAscObj}; +use graph::runtime::{ + asc_get, asc_new, AscIndexId, AscPtr, AscType, AscValue, HostExportError, ToAscObj, +}; use graph::{data::store, runtime::DeterministicHostError}; use graph::{prelude::serde_json, runtime::FromAscObj}; use graph::{prelude::web3::types as web3, runtime::AscHeap}; @@ -14,7 +16,7 @@ impl ToAscObj for web3::H160 { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { self.0.to_asc_obj(heap, gas) } } @@ -46,7 +48,7 @@ impl ToAscObj for web3::H256 { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { self.0.to_asc_obj(heap, gas) } } @@ -56,7 +58,7 @@ impl ToAscObj for web3::U128 { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let mut bytes: [u8; 16] = [0; 16]; self.to_little_endian(&mut bytes); bytes.to_asc_obj(heap, gas) @@ -68,7 +70,7 @@ impl ToAscObj for BigInt { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { let bytes = self.to_signed_bytes_le(); bytes.to_asc_obj(heap, gas) } @@ -90,7 +92,7 @@ impl ToAscObj for BigDecimal { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { // From the docs: "Note that a positive exponent indicates a negative power of 10", // so "exponent" is the opposite of what you'd expect. let (digits, negative_exp) = self.as_bigint_and_exponent(); @@ -137,7 +139,7 @@ impl ToAscObj>> for Vec { &self, heap: &mut H, gas: &GasCounter, - ) -> Result>, DeterministicHostError> { + ) -> Result>, HostExportError> { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Array::new(&content, heap, gas) @@ -149,7 +151,7 @@ impl ToAscObj> for ethabi::Token { &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { use ethabi::Token::*; let kind = EthereumValueKind::get_kind(self); @@ -277,7 +279,7 @@ impl ToAscObj> for store::Value { &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { use self::store::Value; let payload = match self { @@ -311,7 +313,7 @@ impl ToAscObj for serde_json::Map { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTypedMap { entries: asc_new(heap, &*self.iter().collect::>(), gas)?, }) @@ -324,7 +326,7 @@ impl ToAscObj for Vec<(String, store::Value)> { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { Ok(AscTypedMap { entries: asc_new(heap, self.as_slice(), gas)?, }) @@ -336,7 +338,7 @@ impl ToAscObj> for serde_json::Value { &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { use serde_json::Value; let payload = match self { @@ -374,7 +376,7 @@ impl ToAscObj> for AscWrapped { &self, _heap: &mut H, _gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(*self) } } @@ -389,7 +391,7 @@ where &self, heap: &mut H, gas: &GasCounter, - ) -> Result, bool>, DeterministicHostError> { + ) -> Result, bool>, HostExportError> { Ok(match self { Ok(value) => AscResult { value: { diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index ca4c20faa22..31713f282ea 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -5,7 +5,7 @@ use std::iter::FromIterator; use graph::runtime::{ asc_get, asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, AscValue, - DeterministicHostError, FromAscObj, ToAscObj, + DeterministicHostError, FromAscObj, HostExportError, ToAscObj, }; use crate::asc_abi::class::*; @@ -19,7 +19,7 @@ impl ToAscObj> for [T] { &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { TypedArray::new(self, heap, gas) } } @@ -52,8 +52,11 @@ impl ToAscObj for str { &self, heap: &mut H, _gas: &GasCounter, - ) -> Result { - AscString::new(&self.encode_utf16().collect::>(), heap.api_version()) + ) -> Result { + Ok(AscString::new( + &self.encode_utf16().collect::>(), + heap.api_version(), + )?) } } @@ -62,7 +65,7 @@ impl ToAscObj for String { &self, heap: &mut H, gas: &GasCounter, - ) -> Result { + ) -> Result { self.as_str().to_asc_obj(heap, gas) } } @@ -89,7 +92,7 @@ impl> ToAscObj>> for [T] &self, heap: &mut H, gas: &GasCounter, - ) -> Result>, DeterministicHostError> { + ) -> Result>, HostExportError> { let content: Result, _> = self.iter().map(|x| asc_new(heap, x, gas)).collect(); let content = content?; Array::new(&content, heap, gas) @@ -132,7 +135,7 @@ impl, U: ToAscO &self, heap: &mut H, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, HostExportError> { Ok(AscTypedMapEntry { key: asc_new(heap, &self.0, gas)?, value: asc_new(heap, &self.1, gas)?, From 07cfe797ff89fb0c90c7e70b717c8aae6a9d3ddb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 22 Mar 2023 11:28:25 -0700 Subject: [PATCH 0041/2104] store: Disable collecting write metrics for synced deployments --- store/postgres/src/writable.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 3ba9fad1dda..b6c5d0f56ab 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -842,6 +842,10 @@ impl Queue { fn poisoned(&self) -> bool { self.poisoned.load(Ordering::SeqCst) } + + fn deployment_synced(&self) { + self.stopwatch.disable() + } } /// A shim to allow bypassing any pipelined store handling if need be @@ -973,6 +977,13 @@ impl Writer { Writer::Async(queue) => queue.stop().await, } } + + fn deployment_synced(&self) { + match self { + Writer::Sync(_) => {} + Writer::Async(queue) => queue.deployment_synced(), + } + } } pub struct WritableStore { @@ -1136,6 +1147,7 @@ impl WritableStoreTrait for WritableStore { } fn deployment_synced(&self) -> Result<(), StoreError> { + self.writer.deployment_synced(); self.store.deployment_synced() } From c2c28e37c96e64074c2b57a25aacaef20570a19a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Mar 2023 12:05:58 +0200 Subject: [PATCH 0042/2104] build(deps): bump openssl from 0.10.47 to 0.10.48 (#4493) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.47 to 0.10.48. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.47...openssl-v0.10.48) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- store/postgres/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efebe861661..75fce9b770c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2980,9 +2980,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.47" +version = "0.10.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b277f87dacc05a6b709965d1cbafac4649d6ce9f3ce9ceb88508b5666dfec9" +checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3012,9 +3012,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.82" +version = "0.9.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a95792af3c4e0153c3914df2261bedd30a98476f94dc892b67dfe1d89d433a04" +checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" dependencies = [ "autocfg", "cc", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 6b237c3b535..ee185f9f05f 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -22,7 +22,7 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.47" +openssl = "0.10.48" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" From 46bcd8096d0bbb682bcb682b1b5af2fe73ba9f96 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Mon, 27 Mar 2023 15:49:48 +0200 Subject: [PATCH 0043/2104] github: fix monospace font in issue descriptions (#4495) --- .github/ISSUE_TEMPLATE/bug.yml | 1 - .github/ISSUE_TEMPLATE/feature.yml | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 4e024149821..944e74845ef 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -12,7 +12,6 @@ body: attributes: label: Bug report description: Please provide a detailed overview of the expected behavior, and what happens instead. The more details, the better. You can use Markdown. - render: Markdown - type: textarea id: graph-node-logs attributes: diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index 8581c970b91..47fa2619714 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -4,16 +4,14 @@ title: "[Feature] " labels: ["enhancement"] body: - type: textarea - id: bug-report + id: feature-description attributes: label: Description description: Please provide a detailed overview of the desired feature or improvement, along with any examples or useful information. You can use Markdown. - render: Markdown - type: textarea id: blockers attributes: label: Are you aware of any blockers that must be resolved before implementing this feature? If so, which? Link to any relevant GitHub issues. - render: Markdown validations: required: false - type: checkboxes From 43c35b94a2a58d33cd3a0113f8921e544141fc70 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Mon, 27 Mar 2023 15:24:12 +0100 Subject: [PATCH 0044/2104] graph: Add new counter to EndpointMetrics (#4490) - Add new metric with relevant labels to EndpointMetric - Wire the MetricInterceptor of firehose to produce request metrics --- chain/ethereum/examples/firehose.rs | 8 +- chain/substreams/examples/substreams.rs | 6 +- graph/src/components/metrics/registry.rs | 14 ++- graph/src/endpoint.rs | 140 +++++++++++++++++++---- graph/src/firehose/endpoints.rs | 27 ++++- graph/src/firehose/interceptors.rs | 10 +- node/src/main.rs | 1 + node/src/manager/commands/run.rs | 1 + 8 files changed, 170 insertions(+), 37 deletions(-) diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index f6334c72387..fbdfaa1f141 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -4,7 +4,7 @@ use graph::{ env::env_var, firehose::SubgraphLimit, log::logger, - prelude::{prost, tokio, tonic}, + prelude::{prost, tokio, tonic, MetricsRegistry}, {firehose, firehose::FirehoseEndpoint}, }; use graph_chain_ethereum::codec; @@ -24,7 +24,11 @@ async fn main() -> Result<(), Error> { let logger = logger(false); let host = "https://api.streamingfast.io:443".to_string(); - let metrics = Arc::new(EndpointMetrics::new(logger, &[host.clone()])); + let metrics = Arc::new(EndpointMetrics::new( + logger, + &[host.clone()], + Arc::new(MetricsRegistry::mock()), + )); let firehose = Arc::new(FirehoseEndpoint::new( "firehose", diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index 55858825d18..e946fd957ef 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -41,7 +41,11 @@ async fn main() -> Result<(), Error> { prometheus_registry.clone(), )); - let endpoint_metrics = EndpointMetrics::new(logger.clone(), &[endpoint.clone()]); + let endpoint_metrics = EndpointMetrics::new( + logger.clone(), + &[endpoint.clone()], + Arc::new(MetricsRegistry::mock()), + ); let firehose = Arc::new(FirehoseEndpoint::new( "substreams", diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index bd9fe6fe0f6..7fa5b903b05 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; -use prometheus::{labels, Histogram}; +use prometheus::{labels, Histogram, IntCounterVec}; use crate::components::metrics::{counter_with_labels, gauge_with_labels}; use crate::prelude::Collector; @@ -386,6 +386,18 @@ impl MetricsRegistry { Ok(counter) } + pub fn new_int_counter_vec( + &self, + name: &str, + help: &str, + variable_labels: &[&str], + ) -> Result, PrometheusError> { + let opts = Opts::new(name, help); + let counters = Box::new(IntCounterVec::new(opts, &variable_labels)?); + self.register(name, counters.clone()); + Ok(counters) + } + pub fn new_counter_vec( &self, name: &str, diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index bb67c5f0643..368d9db4130 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -6,9 +6,10 @@ use std::{ }, }; +use prometheus::IntCounterVec; use slog::{warn, Logger}; -use crate::data::value::Word; +use crate::{components::metrics::MetricsRegistry, data::value::Word}; /// HostCount is the underlying structure to keep the count, /// we require that all the hosts are known ahead of time, this way we can @@ -20,50 +21,140 @@ type HostCount = Arc>; /// adapters if they share the same endpoint. pub type Host = Word; +/// This struct represents all the current labels except for the result +/// which is added separately. If any new labels are necessary they should +/// remain in the same order as added in [`EndpointMetrics::new`] +#[derive(Clone)] +pub struct RequestLabels { + pub host: Host, + pub req_type: Word, + pub conn_type: ConnectionType, +} + +/// The type of underlying connection we are reporting for. +#[derive(Clone)] +pub enum ConnectionType { + Firehose, + Substreams, + Rpc, +} + +impl Into<&str> for &ConnectionType { + fn into(self) -> &'static str { + match self { + ConnectionType::Firehose => "firehose", + ConnectionType::Substreams => "substreams", + ConnectionType::Rpc => "rpc", + } + } +} + +impl RequestLabels { + fn to_slice(&self, is_success: bool) -> Box<[&str]> { + Box::new([ + (&self.conn_type).into(), + self.req_type.as_str(), + self.host.as_str(), + match is_success { + true => "success", + false => "failure", + }, + ]) + } +} + /// EndpointMetrics keeps track of calls success rate for specific calls, /// a success call to a host will clear the error count. -#[derive(Debug)] pub struct EndpointMetrics { logger: Logger, hosts: HostCount, + counter: Box, +} + +impl std::fmt::Debug for EndpointMetrics { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("{:?}", self.hosts)) + } } impl EndpointMetrics { - pub fn new(logger: Logger, hosts: &[impl AsRef]) -> Self { + pub fn new(logger: Logger, hosts: &[impl AsRef], registry: Arc) -> Self { let hosts = Arc::new(HashMap::from_iter( hosts .iter() .map(|h| (Host::from(h.as_ref()), AtomicU64::new(0))), )); - Self { logger, hosts } + let counter = registry + .new_int_counter_vec( + "endpoint_request", + "successfull request", + &["conn_type", "req_type", "host", "result"], + ) + .expect("unable to create endpoint_request counter_vec"); + + Self { + logger, + hosts, + counter, + } } /// This should only be used for testing. pub fn mock() -> Self { use slog::{o, Discard}; - Self { - logger: Logger::root(Discard, o!()), - hosts: Arc::new(HashMap::default()), + let hosts: &[&str] = &[]; + Self::new( + Logger::root(Discard, o!()), + hosts, + Arc::new(MetricsRegistry::mock()), + ) + } + + #[cfg(debug_assertions)] + pub fn report_for_test(&self, host: &Host, success: bool) { + match success { + true => self.success(&RequestLabels { + host: host.clone(), + req_type: "".into(), + conn_type: ConnectionType::Firehose, + }), + false => self.failure(&RequestLabels { + host: host.clone(), + req_type: "".into(), + conn_type: ConnectionType::Firehose, + }), } } - pub fn success(&self, host: &Host) { - match self.hosts.get(host) { + pub fn success(&self, labels: &RequestLabels) { + match self.hosts.get(&labels.host) { Some(count) => { count.store(0, Ordering::Relaxed); } - None => warn!(&self.logger, "metrics not available for host {}", host), - } + None => warn!( + &self.logger, + "metrics not available for host {}", labels.host + ), + }; + + self.counter.with_label_values(&labels.to_slice(true)).inc(); } - pub fn failure(&self, host: &Host) { - match self.hosts.get(host) { + pub fn failure(&self, labels: &RequestLabels) { + match self.hosts.get(&labels.host) { Some(count) => { count.fetch_add(1, Ordering::Relaxed); } - None => warn!(&self.logger, "metrics not available for host {}", host), - } + None => warn!( + &self.logger, + "metrics not available for host {}", &labels.host + ), + }; + + self.counter + .with_label_values(&labels.to_slice(false)) + .inc(); } /// Returns the current error count of a host or 0 if the host @@ -78,9 +169,14 @@ impl EndpointMetrics { #[cfg(test)] mod test { + use std::sync::Arc; + use slog::{o, Discard, Logger}; - use crate::endpoint::{EndpointMetrics, Host}; + use crate::{ + components::metrics::MetricsRegistry, + endpoint::{EndpointMetrics, Host}, + }; #[tokio::test] async fn should_increment_and_reset() { @@ -88,13 +184,13 @@ mod test { let hosts: &[&str] = &[&a, &b, &c]; let logger = Logger::root(Discard, o!()); - let metrics = EndpointMetrics::new(logger, hosts); + let metrics = EndpointMetrics::new(logger, hosts, Arc::new(MetricsRegistry::mock())); - metrics.success(&a); - metrics.failure(&a); - metrics.failure(&b); - metrics.failure(&b); - metrics.success(&c); + metrics.report_for_test(&a, true); + metrics.report_for_test(&a, false); + metrics.report_for_test(&b, false); + metrics.report_for_test(&b, false); + metrics.report_for_test(&c, true); assert_eq!(metrics.get_count(&a), 1); assert_eq!(metrics.get_count(&b), 2); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 2302c28981f..2fc5ee2ae26 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -4,7 +4,7 @@ use crate::{ blockchain::BlockPtr, cheap_clone::CheapClone, components::store::BlockNumber, - endpoint::{EndpointMetrics, Host}, + endpoint::{ConnectionType, EndpointMetrics, Host, RequestLabels}, firehose::decode_firehose_block, prelude::{anyhow, debug, info}, substreams, @@ -196,7 +196,11 @@ impl FirehoseEndpoint { let metrics = MetricsInterceptor { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), - host: self.host.clone(), + labels: RequestLabels { + host: self.host.clone(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, }; let mut client: FetchClient< @@ -219,7 +223,11 @@ impl FirehoseEndpoint { let metrics = MetricsInterceptor { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), - host: self.host.clone(), + labels: RequestLabels { + host: self.host.clone(), + req_type: "unknown".into(), + conn_type: ConnectionType::Firehose, + }, }; let mut client = StreamClient::with_interceptor(metrics, self.auth.clone()) @@ -240,7 +248,11 @@ impl FirehoseEndpoint { let metrics = MetricsInterceptor { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), - host: self.host.clone(), + labels: RequestLabels { + host: self.host.clone(), + req_type: "unknown".into(), + conn_type: ConnectionType::Substreams, + }, }; let mut client = @@ -505,7 +517,9 @@ mod test { use slog::{o, Discard, Logger}; - use crate::{endpoint::EndpointMetrics, firehose::SubgraphLimit}; + use crate::{ + components::metrics::MetricsRegistry, endpoint::EndpointMetrics, firehose::SubgraphLimit, + }; use super::{AvailableCapacity, FirehoseEndpoint, FirehoseEndpoints, SUBGRAPHS_PER_CONN}; @@ -607,6 +621,7 @@ mod test { "http://127.0.0.2/", "http://127.0.0.3/", ], + Arc::new(MetricsRegistry::mock()), )); let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( @@ -646,7 +661,7 @@ mod test { endpoint_metrics.clone(), )); - endpoint_metrics.failure(&high_error_adapter1.host); + endpoint_metrics.report_for_test(&high_error_adapter1.host, false); let mut endpoints = FirehoseEndpoints::from(vec![ high_error_adapter1.clone(), diff --git a/graph/src/firehose/interceptors.rs b/graph/src/firehose/interceptors.rs index 0c248188f41..8c86d034db4 100644 --- a/graph/src/firehose/interceptors.rs +++ b/graph/src/firehose/interceptors.rs @@ -8,7 +8,7 @@ use tonic::{ service::Interceptor, }; -use crate::endpoint::{EndpointMetrics, Host}; +use crate::endpoint::{EndpointMetrics, RequestLabels}; #[derive(Clone)] pub struct AuthInterceptor { @@ -37,7 +37,7 @@ impl Interceptor for AuthInterceptor { pub struct MetricsInterceptor { pub(crate) metrics: Arc, pub(crate) service: S, - pub(crate) host: Host, + pub(crate) labels: RequestLabels, } impl Service for MetricsInterceptor @@ -60,16 +60,16 @@ where } fn call(&mut self, req: Request) -> Self::Future { - let host = self.host.clone(); + let labels = self.labels.clone(); let metrics = self.metrics.clone(); let fut = self.service.call(req); let res = async move { let res = fut.await; if res.is_ok() { - metrics.success(&host); + metrics.success(&labels); } else { - metrics.failure(&host); + metrics.failure(&labels); } res }; diff --git a/node/src/main.rs b/node/src/main.rs index 577867f1c4a..59ca831812d 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -231,6 +231,7 @@ async fn main() { let endpoint_metrics = Arc::new(EndpointMetrics::new( logger.clone(), &config.chains.provider_urls(), + metrics_registry.cheap_clone(), )); // Ethereum clients; query nodes ignore all ethereum clients and never diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index 5ebc450832c..d9e326579ca 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -75,6 +75,7 @@ pub async fn run( let endpoint_metrics = Arc::new(EndpointMetrics::new( logger.clone(), &config.chains.provider_urls(), + metrics_registry.cheap_clone(), )); // Convert the clients into a link resolver. Since we want to get past From b1da5e5057fe6fc2eaaba128a63a5077452d825d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 26 Jan 2023 09:42:36 -0800 Subject: [PATCH 0045/2104] store: Factor getting `VersionStats` out from `prune_by_copying` --- graph/src/components/store/mod.rs | 6 +- node/src/manager/commands/prune.rs | 8 ++- store/postgres/src/catalog.rs | 31 ++++++++++ store/postgres/src/relational.rs | 16 +++++ store/postgres/src/relational/prune.rs | 83 +++++++++++++++++--------- 5 files changed, 112 insertions(+), 32 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 1b9a7c8e06e..e218bd4c669 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1149,7 +1149,11 @@ pub trait PruneReporter: Send + 'static { fn start_analyze(&mut self) {} fn start_analyze_table(&mut self, table: &str) {} fn finish_analyze_table(&mut self, table: &str) {} - fn finish_analyze(&mut self, stats: &[VersionStats]) {} + + /// Analyzing tables has finished. `stats` are the stats for all tables + /// in the deployment, `analyzed ` are the names of the tables that were + /// actually analyzed + fn finish_analyze(&mut self, stats: &[VersionStats], analyzed: &[&str]) {} fn copy_final_start(&mut self, earliest_block: BlockNumber, final_block: BlockNumber) {} fn copy_final_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) {} diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 41e6754188b..39cddea7270 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -67,10 +67,14 @@ impl PruneReporter for Progress { std::io::stdout().flush().ok(); } - fn finish_analyze(&mut self, stats: &[graph::components::store::VersionStats]) { + fn finish_analyze( + &mut self, + stats: &[graph::components::store::VersionStats], + analyzed: &[&str], + ) { println!( "\rAnalyzed {} tables in {}s", - stats.len(), + analyzed.len(), self.analyze_start.elapsed().as_secs() ); show_stats(stats, HashSet::new()).ok(); diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index eb289dbf48d..f9f43f873ae 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -801,3 +801,34 @@ pub(crate) fn set_stats_target( conn.batch_execute(&query)?; Ok(()) } + +/// Return the names of all tables in the `namespace` that need to be +/// analyzed. Whether a table needs to be analyzed is determined with the +/// same logic that Postgres' [autovacuum +/// daemon](https://www.postgresql.org/docs/current/routine-vacuuming.html#AUTOVACUUM) +/// uses +pub(crate) fn needs_autoanalyze( + conn: &PgConnection, + namespace: &Namespace, +) -> Result, StoreError> { + const QUERY: &str = "select relname \ + from pg_stat_user_tables \ + where (select setting::numeric from pg_settings where name = 'autovacuum_analyze_threshold') \ + + (select setting::numeric from pg_settings where name = 'autovacuum_analyze_scale_factor')*(n_live_tup + n_dead_tup) < n_mod_since_analyze + and schemaname = $1"; + + #[derive(Queryable, QueryableByName)] + struct TableName { + #[sql_type = "Text"] + name: SqlName, + } + + let tables = sql_query(QUERY) + .bind::(namespace.as_str()) + .get_results::(conn) + .optional()? + .map(|tables| tables.into_iter().map(|t| t.name).collect()) + .unwrap_or(vec![]); + + Ok(tables) +} diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 2c9211a583f..85dd87baf1a 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -17,6 +17,10 @@ mod query_tests; pub(crate) mod index; mod prune; +use diesel::pg::Pg; +use diesel::serialize::Output; +use diesel::sql_types::Text; +use diesel::types::{FromSql, ToSql}; use diesel::{connection::SimpleConnection, Connection}; use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; use graph::cheap_clone::CheapClone; @@ -168,6 +172,18 @@ impl Borrow for &SqlName { } } +impl FromSql for SqlName { + fn from_sql(bytes: Option<&[u8]>) -> diesel::deserialize::Result { + >::from_sql(bytes).map(|s| SqlName::verbatim(s)) + } +} + +impl ToSql for SqlName { + fn to_sql(&self, out: &mut Output) -> diesel::serialize::Result { + >::to_sql(&self.0, out) + } +} + /// The SQL type to use for GraphQL ID properties. We support /// strings and byte arrays #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index e95b5fc44d8..1c2dced8166 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -7,7 +7,7 @@ use diesel::{ Connection, PgConnection, RunQueryDsl, }; use graph::{ - components::store::PruneReporter, + components::store::{PruneReporter, VersionStats}, prelude::{BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError}, slog::{warn, Logger}, }; @@ -242,6 +242,56 @@ impl TablePair { } impl Layout { + /// Analyze the `tables` and return `VersionStats` for all tables in + /// this `Layout` + fn analyze_tables( + &self, + conn: &PgConnection, + reporter: &mut dyn PruneReporter, + mut tables: Vec<&Arc>, + cancel: &CancelHandle, + ) -> Result, CancelableError> { + reporter.start_analyze(); + tables.sort_by_key(|table| table.name.as_str()); + for table in &tables { + reporter.start_analyze_table(table.name.as_str()); + table.analyze(conn)?; + reporter.finish_analyze_table(table.name.as_str()); + cancel.check_cancel()?; + } + let stats = catalog::stats(conn, &self.site.namespace)?; + + let analyzed: Vec<_> = tables.iter().map(|table| table.name.as_str()).collect(); + reporter.finish_analyze(&stats, &analyzed); + + Ok(stats) + } + + /// Return statistics for the tables in this `Layout`. If `analyze_all` + /// is `true`, analyze all tables before getting statistics. If it is + /// `false`, only analyze tables that Postgres' autovacuum daemon would + /// consider needing analysis. + fn version_stats( + &self, + conn: &PgConnection, + reporter: &mut dyn PruneReporter, + analyze_all: bool, + cancel: &CancelHandle, + ) -> Result, CancelableError> { + let needs_analyze = if analyze_all { + vec![] + } else { + catalog::needs_autoanalyze(conn, &self.site.namespace)? + }; + let tables: Vec<_> = self + .tables + .values() + .filter(|table| analyze_all || needs_analyze.contains(&table.name)) + .collect(); + + self.analyze_tables(conn, reporter, tables, cancel) + } + /// Remove all data from the underlying deployment that is not needed to /// respond to queries before block `earliest_block`. The strategy /// implemented here works well for situations in which pruning will @@ -287,18 +337,7 @@ impl Layout { prune_ratio: f64, cancel: &CancelHandle, ) -> Result<(), CancelableError> { - // Analyze all tables and get statistics for them - let mut tables: Vec<_> = self.tables.values().collect(); - reporter.start_analyze(); - tables.sort_by_key(|table| table.name.as_str()); - for table in tables { - reporter.start_analyze_table(table.name.as_str()); - table.analyze(conn)?; - reporter.finish_analyze_table(table.name.as_str()); - cancel.check_cancel()?; - } - let stats = catalog::stats(conn, &self.site.namespace)?; - reporter.finish_analyze(stats.as_slice()); + let stats = self.version_stats(conn, reporter, true, cancel)?; // Determine which tables are prunable and create a shadow table for // them via `TablePair::create` @@ -367,22 +406,8 @@ impl Layout { catalog::drop_schema(conn, dst_nsp.as_str())?; // Analyze the new tables - reporter.start_analyze(); - for table in &prunable_src { - reporter.start_analyze_table(table.name.as_str()); - table.analyze(conn)?; - reporter.finish_analyze_table(table.name.as_str()); - cancel.check_cancel()?; - } - let stats: Vec<_> = catalog::stats(conn, &self.site.namespace)? - .into_iter() - .filter(|s| { - prunable_src - .iter() - .any(|table| *table.name.as_str() == s.tablename) - }) - .collect(); - reporter.finish_analyze(stats.as_slice()); + let tables = prunable_src.iter().collect(); + self.analyze_tables(conn, reporter, tables, cancel)?; reporter.finish_prune(); From 04267c440a2e5fe3df0b6916a18ec86318125621 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 26 Jan 2023 10:08:52 -0800 Subject: [PATCH 0046/2104] store: Factor determining which tables are prunable into a helper --- store/postgres/src/relational/prune.rs | 36 ++++++++++++++++++-------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 1c2dced8166..35da4a9803a 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -292,6 +292,28 @@ impl Layout { self.analyze_tables(conn, reporter, tables, cancel) } + /// Return all tables and their stats whose ratio of distinct entities + /// to versions is less than `prune_ratio` + fn prunable_tables<'a>( + &self, + stats: &'a [VersionStats], + prune_ratio: f64, + ) -> Vec<(&Arc
, &'a VersionStats)> { + let mut prunable_tables = self + .tables + .values() + .filter_map(|table| { + stats + .iter() + .find(|stats| stats.tablename == table.name.as_str()) + .map(|stats| (table, stats)) + }) + .filter(|(_, stats)| stats.ratio <= prune_ratio) + .collect::>(); + prunable_tables.sort_by(|(a, _), (b, _)| a.name.as_str().cmp(b.name.as_str())); + prunable_tables + } + /// Remove all data from the underlying deployment that is not needed to /// respond to queries before block `earliest_block`. The strategy /// implemented here works well for situations in which pruning will @@ -345,16 +367,9 @@ impl Layout { let prunable_tables = conn.transaction(|| -> Result<_, StoreError> { catalog::recreate_schema(conn, dst_nsp.as_str())?; - let mut prunable_tables: Vec = self - .tables - .values() - .filter_map(|table| { - stats - .iter() - .find(|s| s.tablename == table.name.as_str()) - .map(|s| (table, s)) - }) - .filter(|(_, stats)| stats.ratio <= prune_ratio) + let prunable_tables: Vec = self + .prunable_tables(&stats, prune_ratio) + .into_iter() .map(|(table, _)| { TablePair::create( conn, @@ -364,7 +379,6 @@ impl Layout { ) }) .collect::>()?; - prunable_tables.sort_by(|a, b| a.src.name.as_str().cmp(b.src.name.as_str())); Ok(prunable_tables) })?; cancel.check_cancel()?; From d8ca8da720cea8097a6014c30061aa6d79120122 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 26 Jan 2023 16:38:29 -0800 Subject: [PATCH 0047/2104] store: Add a column subgraph_manifest.history_blocks --- docs/implementation/metadata.md | 1 + .../migrations/2023-03-06-002954_add_pruning/down.sql | 2 ++ .../postgres/migrations/2023-03-06-002954_add_pruning/up.sql | 4 ++++ store/postgres/src/deployment.rs | 3 +++ store/postgres/src/detail.rs | 1 + 5 files changed, 11 insertions(+) create mode 100644 store/postgres/migrations/2023-03-06-002954_add_pruning/down.sql create mode 100644 store/postgres/migrations/2023-03-06-002954_add_pruning/up.sql diff --git a/docs/implementation/metadata.md b/docs/implementation/metadata.md index ee54fb30361..a999c064a2a 100644 --- a/docs/implementation/metadata.md +++ b/docs/implementation/metadata.md @@ -106,6 +106,7 @@ shard alongside the deployment's data in `sgdNNN`. | `start_block_hash` | `bytea` | Parent of the smallest start block from the manifest | | `start_block_number` | `int4` | | | `on_sync` | `text` | Additional behavior when deployment becomes synced | +| `history_blocks` | `int4!` | How many blocks of history to keep | ### `subgraph_deployment_assignment` diff --git a/store/postgres/migrations/2023-03-06-002954_add_pruning/down.sql b/store/postgres/migrations/2023-03-06-002954_add_pruning/down.sql new file mode 100644 index 00000000000..270b6e55f27 --- /dev/null +++ b/store/postgres/migrations/2023-03-06-002954_add_pruning/down.sql @@ -0,0 +1,2 @@ +alter table subgraphs.subgraph_manifest + drop column history_blocks; diff --git a/store/postgres/migrations/2023-03-06-002954_add_pruning/up.sql b/store/postgres/migrations/2023-03-06-002954_add_pruning/up.sql new file mode 100644 index 00000000000..4e6b80254bc --- /dev/null +++ b/store/postgres/migrations/2023-03-06-002954_add_pruning/up.sql @@ -0,0 +1,4 @@ +alter table subgraphs.subgraph_manifest + add column history_blocks int4 + not null default 2147483647 + check (history_blocks > 0); diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 1ee969d85b3..5c1e7f30269 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -179,6 +179,9 @@ table! { // Names stored as present in the schema, not in snake case. entities_with_causality_region -> Array, on_sync -> Nullable, + // How many blocks of history to keep, defaults to `i32::max` for + // unlimited history + history_blocks -> Integer, } } diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 003422399b2..e59df301f6c 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -341,6 +341,7 @@ struct StoredSubgraphManifest { raw_yaml: Option, entities_with_causality_region: Vec, on_sync: Option, + history_blocks: i32, } impl From for SubgraphManifestEntity { From 0c72944fdb9c07084cc5e4b004a390a74a563e96 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 26 Jan 2023 16:57:01 -0800 Subject: [PATCH 0048/2104] store: Periodically refresh Layout.history_blocks --- store/postgres/src/deployment.rs | 24 ++++++++++++++++++++++++ store/postgres/src/relational.rs | 22 +++++++++++++++++----- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 5c1e7f30269..c9a725d199a 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -349,6 +349,30 @@ impl ManifestInfo { } } +// Return how many blocks of history this subgraph should keep +pub fn history_blocks(conn: &PgConnection, site: &Site) -> Result { + use subgraph_manifest as sm; + sm::table + .select(sm::history_blocks) + .filter(sm::id.eq(site.id)) + .first::(conn) + .map_err(StoreError::from) +} + +pub fn set_history_blocks( + conn: &PgConnection, + site: &Site, + history_blocks: BlockNumber, +) -> Result<(), StoreError> { + use subgraph_manifest as sm; + + update(sm::table.filter(sm::id.eq(site.id))) + .set(sm::history_blocks.eq(history_blocks)) + .execute(conn) + .map(|_| ()) + .map_err(StoreError::from) +} + #[allow(dead_code)] pub fn features(conn: &PgConnection, site: &Site) -> Result, StoreError> { use subgraph_manifest as sm; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 85dd87baf1a..26cc5ca304b 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -247,6 +247,8 @@ pub struct Layout { pub enums: EnumMap, /// The query to count all entities pub count_query: String, + /// How many blocks of history the subgraph should keep + pub history_blocks: BlockNumber, } impl Layout { @@ -374,6 +376,7 @@ impl Layout { tables, enums, count_query, + history_blocks: i32::MAX, }) } @@ -909,15 +912,22 @@ impl Layout { true } - /// Update the layout with the latest information from the database; for - /// now, an update only changes the `is_account_like` flag for tables or - /// the layout's site. If no update is needed, just return `self`. - pub fn refresh( + /// Update the layout with the latest information from the database; an + /// update can only change the `is_account_like` flag for tables, the + /// layout's site, or the `history_blocks`. If no update is needed, just + /// return `self`. + /// + /// This is tied closely to how the `LayoutCache` works and called from + /// it right after creating a `Layout`, and periodically to update the + /// `Layout` in case changes were made + fn refresh( self: Arc, conn: &PgConnection, site: Arc, ) -> Result, StoreError> { let account_like = crate::catalog::account_like(conn, &self.site)?; + let history_blocks = deployment::history_blocks(conn, &self.site)?; + let is_account_like = { |table: &Table| account_like.contains(table.name.as_str()) }; let changed_tables: Vec<_> = self @@ -925,9 +935,10 @@ impl Layout { .values() .filter(|table| table.is_account_like != is_account_like(table.as_ref())) .collect(); - if changed_tables.is_empty() && site == self.site { + if changed_tables.is_empty() && site == self.site && history_blocks == self.history_blocks { return Ok(self); } + let mut layout = (*self).clone(); for table in changed_tables.into_iter() { let mut table = (*table.as_ref()).clone(); @@ -935,6 +946,7 @@ impl Layout { layout.tables.insert(table.object.clone(), Arc::new(table)); } layout.site = site; + layout.history_blocks = history_blocks; Ok(Arc::new(layout)) } } From b409b858fbfba5c6b90348d0c8230d463351faa3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 1 Feb 2023 10:22:09 -0800 Subject: [PATCH 0049/2104] node, store: Set history_blocks from `graphman prune` --- node/src/bin/manager.rs | 16 ++++++++++++-- node/src/manager/commands/prune.rs | 8 +++++++ store/postgres/src/deployment_store.rs | 29 ++++++++++++++++++++++++++ store/postgres/src/subgraph_store.rs | 12 +++++++++++ 4 files changed, 63 insertions(+), 2 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 78044b4cdf5..ad280bba9fa 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -240,7 +240,15 @@ pub enum Command { #[clap(subcommand)] Index(IndexCommand), - /// Prune deployments + /// Prune a deployment + /// + /// Keep only entity versions that are needed to respond to queries at + /// block heights that are within `history` blocks of the subgraph head; + /// all other entity versions are removed. + /// + /// Unless `--once` is given, this setting is permanent and the subgraph + /// will periodically be pruned to remove history as the subgraph head + /// moves forward. Prune { /// The deployment to prune (see `help info`) deployment: DeploymentSearch, @@ -250,6 +258,9 @@ pub enum Command { /// How much history to keep in blocks #[clap(long, short = 'y', default_value = "10000")] history: usize, + /// Prune only this once + #[clap(long, short)] + once: bool, }, /// General database management @@ -1372,9 +1383,10 @@ async fn main() -> anyhow::Result<()> { deployment, history, prune_ratio, + once, } => { let (store, primary_pool) = ctx.store_and_primary(); - commands::prune::run(store, primary_pool, deployment, history, prune_ratio).await + commands::prune::run(store, primary_pool, deployment, history, prune_ratio, once).await } Drop { deployment, diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 39cddea7270..641272fb4cc 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -149,6 +149,7 @@ pub async fn run( search: DeploymentSearch, history: usize, prune_ratio: f64, + once: bool, ) -> Result<(), anyhow::Error> { let history = history as BlockNumber; let deployment = search.locate_unique(&primary_pool)?; @@ -192,5 +193,12 @@ pub async fn run( ) .await?; + // Only after everything worked out, make the history setting permanent + if !once { + store + .subgraph_store() + .set_history_blocks(&deployment, history, ETH_ENV.reorg_threshold)?; + } + Ok(()) } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index f18fbeb71d8..7af94df4b08 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -863,6 +863,35 @@ impl DeploymentStore { .await } + pub(crate) fn set_history_blocks( + &self, + site: &Site, + history_blocks: BlockNumber, + reorg_threshold: BlockNumber, + ) -> Result<(), StoreError> { + if history_blocks <= reorg_threshold { + return Err(constraint_violation!( + "the amount of history to keep for sgd{} can not be set to \ + {history_blocks} since it must be more than the \ + reorg threshold {reorg_threshold}", + site.id + )); + } + + if history_blocks <= 0 { + return Err(constraint_violation!( + "history_blocks must be a positive number" + )); + } + + // Invalidate the layout cache for this site so that the next access + // will use the updated value + self.layout_cache.remove(site); + + let conn = self.get_conn()?; + deployment::set_history_blocks(&conn, site, history_blocks) + } + pub(crate) async fn prune( self: &Arc, mut reporter: Box, diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index ca4a46608b2..9838f8f6d51 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1151,6 +1151,18 @@ impl SubgraphStoreInner { .await } + pub fn set_history_blocks( + &self, + deployment: &DeploymentLocator, + history_blocks: BlockNumber, + reorg_threshold: BlockNumber, + ) -> Result<(), StoreError> { + let site = self.find_site(deployment.id.into())?; + let store = self.for_site(&site)?; + + store.set_history_blocks(&site, history_blocks, reorg_threshold) + } + pub fn load_deployment(&self, site: &Site) -> Result { let src_store = self.for_site(site)?; src_store.load_deployment(site) From f80abc87f5ded2b1a0078383aee19198301971a9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 1 Feb 2023 12:39:04 -0800 Subject: [PATCH 0050/2104] node, store: Change the calling convention for SubgraphStore.prune Pass in how many blocks of history to keep instead of the earliest block; use the subgraph's setting if the caller doesn't specify history --- node/src/manager/commands/prune.rs | 2 +- store/postgres/src/deployment_store.rs | 18 ++++++------ store/postgres/src/subgraph_store.rs | 11 +++++--- store/postgres/tests/graft.rs | 38 ++++++++++---------------- 4 files changed, 32 insertions(+), 37 deletions(-) diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 641272fb4cc..b21a72183e7 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -183,7 +183,7 @@ pub async fn run( .prune( reporter, &deployment, - latest - history, + Some(history), // Using the setting for eth chains is a bit lazy; the value // should really depend on the chain, but we don't have a // convenient way to figure out how each chain deals with diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 7af94df4b08..33fb6ad99f5 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -896,7 +896,7 @@ impl DeploymentStore { self: &Arc, mut reporter: Box, site: Arc, - earliest_block: BlockNumber, + history_blocks: Option, reorg_threshold: BlockNumber, prune_ratio: f64, ) -> Result, StoreError> { @@ -905,13 +905,19 @@ impl DeploymentStore { let layout = store.layout(conn, site.clone())?; cancel.check_cancel()?; let state = deployment::state(conn, site.deployment.clone())?; + let history_blocks = history_blocks.unwrap_or(layout.history_blocks); - if state.latest_block.number <= reorg_threshold { + if state.latest_block.number <= history_blocks { + // We haven't accumulated enough history yet, nothing to prune return Ok(reporter); } + let earliest_block = state.latest_block.number - history_blocks; + if state.earliest_block_number > earliest_block { - return Err(constraint_violation!("earliest block can not move back from {} to {}", state.earliest_block_number, earliest_block).into()); + // We already have less history than we need (e.g., because + // of a manual onetime prune), nothing to prune + return Ok(reporter) } let final_block = state.latest_block.number - reorg_threshold; @@ -919,12 +925,6 @@ impl DeploymentStore { return Err(constraint_violation!("the earliest block {} must be at least {} blocks before the current latest block {}", earliest_block, reorg_threshold, state.latest_block.number).into()); } - if let Some((_, graft)) = deployment::graft_point(conn, &site.deployment)? { - if graft.block_number() >= earliest_block { - return Err(constraint_violation!("the earliest block {} must be after the graft point {}", earliest_block, graft.block_number()).into()); - } - } - cancel.check_cancel()?; conn.transaction(|| { diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 9838f8f6d51..46e95406c92 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1113,8 +1113,11 @@ impl SubgraphStoreInner { store.set_account_like(site, table, is_account_like).await } - /// Remove the history that is only needed to respond to queries before - /// block number `earliest_block` from the given deployment + /// Remove the history exceeding `history_blocks` blocks setting. Only + /// entity versions needed for queries at block heights within + /// `history_blocks` blocks of the current subgraph head will be kept. + /// If `history_blocks` is `None`, use the subgraph's `history_blocks` + /// setting. /// /// Only tables with a ratio of entities to entity versions below /// `prune_ratio` will be pruned; that ratio is determined by looking at @@ -1137,7 +1140,7 @@ impl SubgraphStoreInner { &self, reporter: Box, deployment: &DeploymentLocator, - earliest_block: BlockNumber, + history_blocks: Option, reorg_threshold: BlockNumber, prune_ratio: f64, ) -> Result, StoreError> { @@ -1147,7 +1150,7 @@ impl SubgraphStoreInner { let store = self.for_site(&site)?; store - .prune(reporter, site, earliest_block, reorg_threshold, prune_ratio) + .prune(reporter, site, history_blocks, reorg_threshold, prune_ratio) .await } diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 208a06be8aa..34f791100fd 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -558,37 +558,25 @@ fn prune() { .into_iter() .map(|entity| entity.id().unwrap()) .collect(); - assert_eq!(act, exp); + assert_eq!(act, exp, "different users visible at block {block}"); } - async fn prune( - store: &DieselSubgraphStore, - src: &DeploymentLocator, - earliest_block: BlockNumber, - ) -> Result<(), StoreError> { + async fn prune(store: &DieselSubgraphStore, src: &DeploymentLocator) -> Result<(), StoreError> { struct Progress; impl PruneReporter for Progress {} let reporter = Box::new(Progress); - store - .prune(reporter, src, earliest_block, 1, 1.1) - .await - .map(|_| ()) + store.prune(reporter, src, None, 1, 1.1).await.map(|_| ()) } run_test(|store, src| async move { - // The setup sets the subgraph pointer to block 2, we try to set - // earliest block to 5 - prune(&store, &src, 5) - .await - .expect_err("setting earliest block later than latest does not work"); + store + .set_history_blocks(&src, -3, 10) + .expect_err("history_blocks can not be set to a negative number"); - // Latest block 2 minus reorg threshold 1 means we need to copy - // final blocks from block 1, but want earliest as block 2, i.e. no - // final blocks which won't work - prune(&store, &src, 2) - .await - .expect_err("setting earliest block after last final block fails"); + store + .set_history_blocks(&src, 10, 10) + .expect_err("history_blocks must be bigger than reorg_threshold"); // Add another version for user 2 at block 4 let user2 = create_test_entity( @@ -612,12 +600,16 @@ fn prune() { // 2 | [1,5) [5,) // 3 | [1,2) [2,) - // Forward block ptr to block 5 + // Forward block ptr to block 6 transact_and_wait(&store, &src, BLOCKS[6].clone(), vec![]) .await .unwrap(); + + // Keep 3 blocks of history, i.e. blocks 4..6 + store.set_history_blocks(&src, 3, 0).unwrap(); + // Pruning only removes the [1,2) version of user 3 - prune(&store, &src, 3).await.expect("pruning works"); + prune(&store, &src).await.expect("pruning works"); // Check which versions exist at every block, even if they are // before the new earliest block, since we don't have a convenient From 51707ec0db338a48142cb4a8cfc958e83403a2c5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 1 Feb 2023 17:00:49 -0800 Subject: [PATCH 0051/2104] all: Move reorg threshold env var to garph::env The store also needs access to it, and this avoids making the store dependent on graph::chain::ethereum --- chain/ethereum/src/chain.rs | 2 +- chain/ethereum/src/env.rs | 7 ------- graph/src/env/mod.rs | 10 +++++++++- node/src/main.rs | 2 +- node/src/manager/commands/prune.rs | 14 ++++++++------ node/src/manager/commands/run.rs | 2 +- tests/src/fixture/ethereum.rs | 6 +++--- 7 files changed, 23 insertions(+), 20 deletions(-) diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index eb29003b2da..c3ad5ca6861 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -479,7 +479,7 @@ impl Blockchain for Chain { // present in the DB. Box::new(PollingBlockIngestor::new( logger, - crate::ENV_VARS.reorg_threshold, + graph::env::ENV_VARS.reorg_threshold, eth_adapter, self.chain_store().cheap_clone(), self.polling_ingestor_interval, diff --git a/chain/ethereum/src/env.rs b/chain/ethereum/src/env.rs index 47dfcf74e16..af7f6a97eb0 100644 --- a/chain/ethereum/src/env.rs +++ b/chain/ethereum/src/env.rs @@ -20,9 +20,6 @@ pub struct EnvVars { /// default value is 2000. pub get_logs_max_contracts: usize, - /// Set by the environment variable `ETHEREUM_REORG_THRESHOLD`. The default - /// value is 250 blocks. - pub reorg_threshold: BlockNumber, /// Set by the environment variable `ETHEREUM_TRACE_STREAM_STEP_SIZE`. The /// default value is 50 blocks. pub trace_stream_step_size: BlockNumber, @@ -111,7 +108,6 @@ impl From for EnvVars { .filter(|s| !s.is_empty()) .map(str::to_string) .collect(), - reorg_threshold: x.reorg_threshold, trace_stream_step_size: x.trace_stream_step_size, max_event_only_range: x.max_event_only_range, block_batch_size: x.block_batch_size, @@ -145,9 +141,6 @@ struct Inner { #[envconfig(from = "GRAPH_ETH_GET_LOGS_MAX_CONTRACTS", default = "2000")] get_logs_max_contracts: usize, - // JSON-RPC specific. - #[envconfig(from = "ETHEREUM_REORG_THRESHOLD", default = "250")] - reorg_threshold: BlockNumber, #[envconfig(from = "ETHEREUM_TRACE_STREAM_STEP_SIZE", default = "50")] trace_stream_step_size: BlockNumber, #[envconfig(from = "GRAPH_ETHEREUM_MAX_EVENT_ONLY_RANGE", default = "500")] diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index c538b8227aa..f02c35aa3b2 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -11,7 +11,8 @@ use self::graphql::*; use self::mappings::*; use self::store::*; use crate::{ - components::subgraph::SubgraphVersionSwitchingMode, runtime::gas::CONST_MAX_GAS_PER_HANDLER, + components::{store::BlockNumber, subgraph::SubgraphVersionSwitchingMode}, + runtime::gas::CONST_MAX_GAS_PER_HANDLER, }; lazy_static! { @@ -168,6 +169,9 @@ pub struct EnvVars { /// Maximum number of Dynamic Data Sources after which a Subgraph will /// switch to using static filter. pub static_filters_threshold: usize, + /// Set by the environment variable `ETHEREUM_REORG_THRESHOLD`. The default + /// value is 250 blocks. + pub reorg_threshold: BlockNumber, } impl EnvVars { @@ -224,6 +228,7 @@ impl EnvVars { external_http_base_url: inner.external_http_base_url, external_ws_base_url: inner.external_ws_base_url, static_filters_threshold: inner.static_filters_threshold, + reorg_threshold: inner.reorg_threshold, }) } @@ -340,6 +345,9 @@ struct Inner { // Setting this to be unrealistically high so it doesn't get triggered. #[envconfig(from = "GRAPH_STATIC_FILTERS_THRESHOLD", default = "100000000")] static_filters_threshold: usize, + // JSON-RPC specific. + #[envconfig(from = "ETHEREUM_REORG_THRESHOLD", default = "250")] + reorg_threshold: BlockNumber, } #[derive(Clone, Debug)] diff --git a/node/src/main.rs b/node/src/main.rs index 59ca831812d..9e7aff96883 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -718,7 +718,7 @@ fn ethereum_networks_as_chains( Arc::new(EthereumBlockRefetcher {}), Arc::new(adapter_selector), runtime_adapter, - ethereum::ENV_VARS.reorg_threshold, + ENV_VARS.reorg_threshold, ethereum::ENV_VARS.ingestor_polling_interval, is_ingestible, ); diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index b21a72183e7..e0c1a028f07 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -5,12 +5,12 @@ use std::{ time::{Duration, Instant}, }; +use graph::env::ENV_VARS; use graph::{ components::store::{PruneReporter, StatusStore}, data::subgraph::status, prelude::{anyhow, BlockNumber}, }; -use graph_chain_ethereum::ENV_VARS as ETH_ENV; use graph_store_postgres::{connection_pool::ConnectionPool, Store}; use crate::manager::{ @@ -174,7 +174,7 @@ pub async fn run( println!("prune {deployment}"); println!(" latest: {latest}"); - println!(" final: {}", latest - ETH_ENV.reorg_threshold); + println!(" final: {}", latest - ENV_VARS.reorg_threshold); println!(" earliest: {}\n", latest - history); let reporter = Box::new(Progress::new()); @@ -188,16 +188,18 @@ pub async fn run( // should really depend on the chain, but we don't have a // convenient way to figure out how each chain deals with // finality - ETH_ENV.reorg_threshold, + ENV_VARS.reorg_threshold, prune_ratio, ) .await?; // Only after everything worked out, make the history setting permanent if !once { - store - .subgraph_store() - .set_history_blocks(&deployment, history, ETH_ENV.reorg_threshold)?; + store.subgraph_store().set_history_blocks( + &deployment, + history, + ENV_VARS.reorg_threshold, + )?; } Ok(()) diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index d9e326579ca..f623345e496 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -147,7 +147,7 @@ pub async fn run( call_cache: chain_store.cheap_clone(), eth_adapters: Arc::new(eth_adapters2), }), - ethereum::ENV_VARS.reorg_threshold, + graph::env::ENV_VARS.reorg_threshold, ethereum::ENV_VARS.ingestor_polling_interval, // We assume the tested chain is always ingestible for now true, diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index 645e43fd457..34d94e52abe 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -14,14 +14,14 @@ use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::web3::types::{Address, Log, Transaction, H160}; use graph::prelude::{ - ethabi, tiny_keccak, LightEthereumBlock, LoggerFactory, MetricsRegistry, NodeId, + ethabi, tiny_keccak, LightEthereumBlock, LoggerFactory, MetricsRegistry, NodeId, ENV_VARS, }; use graph::{blockchain::block_stream::BlockWithTriggers, prelude::ethabi::ethereum_types::U64}; +use graph_chain_ethereum::Chain; use graph_chain_ethereum::{ chain::BlockFinality, trigger::{EthereumBlockTriggerType, EthereumTrigger}, }; -use graph_chain_ethereum::{Chain, ENV_VARS}; pub async fn chain( blocks: Vec>, @@ -71,7 +71,7 @@ pub async fn chain( triggers_adapter, Arc::new(NoopRuntimeAdapter { x: PhantomData }), ENV_VARS.reorg_threshold, - ENV_VARS.ingestor_polling_interval, + graph_chain_ethereum::ENV_VARS.ingestor_polling_interval, // We assume the tested chain is always ingestible for now true, ); From b001636faf958a3fbcdaa170477b649d813b5ef3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 9 Feb 2023 15:24:48 -0800 Subject: [PATCH 0052/2104] store: Factor vid_range into a helper --- store/postgres/src/relational/prune.rs | 74 +++++++++++++++----------- 1 file changed, 44 insertions(+), 30 deletions(-) diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 35da4a9803a..df48251a9c7 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -8,7 +8,10 @@ use diesel::{ }; use graph::{ components::store::{PruneReporter, VersionStats}, - prelude::{BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError}, + prelude::{ + BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError, + BLOCK_NUMBER_MAX, + }, slog::{warn, Logger}, }; use itertools::Itertools; @@ -22,6 +25,42 @@ use crate::{ use super::{Layout, Namespace}; +// Additions to `Table` that are useful for pruning +impl Table { + /// Return the first and last vid of any entity that is visible in the + /// block range from `first_block` (inclusive) to `last_block` + /// (exclusive) + fn vid_range( + &self, + conn: &PgConnection, + first_block: BlockNumber, + last_block: BlockNumber, + ) -> Result<(i64, i64), StoreError> { + #[derive(QueryableByName)] + struct VidRange { + #[sql_type = "BigInt"] + min_vid: i64, + #[sql_type = "BigInt"] + max_vid: i64, + } + + // Determine the last vid that we need to copy + let VidRange { min_vid, max_vid } = sql_query(format!( + "select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid from {src} \ + where lower(block_range) <= $2 \ + and coalesce(upper(block_range), 2147483647) > $1 \ + and coalesce(upper(block_range), 2147483647) <= $2 \ + and block_range && int4range($1, $2)", + src = self.qualified_name, + )) + .bind::(first_block) + .bind::(last_block) + .get_result::(conn)?; + Ok((min_vid, max_vid)) + } +} + /// Utility to copy relevant data out of a source table and into a new /// destination table and replace the source table with the destination /// table @@ -78,18 +117,7 @@ impl TablePair { let column_list = self.column_list(); // Determine the last vid that we need to copy - let VidRange { min_vid, max_vid } = sql_query(format!( - "select coalesce(min(vid), 0) as min_vid, \ - coalesce(max(vid), -1) as max_vid from {src} \ - where lower(block_range) <= $2 \ - and coalesce(upper(block_range), 2147483647) > $1 \ - and coalesce(upper(block_range), 2147483647) <= $2 \ - and block_range && int4range($1, $2, '[]')", - src = self.src.qualified_name, - )) - .bind::(earliest_block) - .bind::(final_block) - .get_result::(conn)?; + let (min_vid, max_vid) = self.src.vid_range(conn, earliest_block, final_block)?; let mut batch_size = AdaptiveBatchSize::new(&self.src); // The first vid we still need to copy @@ -147,15 +175,9 @@ impl TablePair { let column_list = self.column_list(); // Determine the last vid that we need to copy - let VidRange { min_vid, max_vid } = sql_query(format!( - "select coalesce(min(vid), 0) as min_vid, \ - coalesce(max(vid), -1) as max_vid from {src} \ - where coalesce(upper(block_range), 2147483647) > $1 \ - and block_range && int4range($1, null)", - src = self.src.qualified_name, - )) - .bind::(final_block) - .get_result::(conn)?; + let (min_vid, max_vid) = self + .src + .vid_range(conn, final_block + 1, BLOCK_NUMBER_MAX)?; let mut batch_size = AdaptiveBatchSize::new(&self.src); // The first vid we still need to copy @@ -428,11 +450,3 @@ impl Layout { Ok(()) } } - -#[derive(QueryableByName)] -struct VidRange { - #[sql_type = "BigInt"] - min_vid: i64, - #[sql_type = "BigInt"] - max_vid: i64, -} From 6424bcf98694415b6ce6c9f518bbe2bbe41c8aeb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 22 Feb 2023 17:14:53 -0800 Subject: [PATCH 0053/2104] graph, store: Restructure how pruning is done We used to perform pruning in two stages, by first copying final entities for all tables, then copying nonfinal entities and switching for all tables. It is better to do this loop the other way around: we now go table-by-table, and for each of them do the nonfinal copy, then the final copy. This makes an ongoing prune operation less visible, since the subgraph writer can write in between the final copying for each table. --- graph/src/components/store/mod.rs | 4 + store/postgres/src/relational/prune.rs | 104 +++++++++++-------------- 2 files changed, 50 insertions(+), 58 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index e218bd4c669..e9135454659 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1166,6 +1166,10 @@ pub trait PruneReporter: Send + 'static { fn finish_switch(&mut self) {} fn finish_prune(&mut self) {} + + fn start_table(&mut self, table: &str) {} + + fn finish_table(&mut self, table: &str) {} } /// Represents an item retrieved from an diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index df48251a9c7..7116e9f58e4 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -350,14 +350,15 @@ impl Layout { /// needed to respond to queries at block heights at or after /// `earliest_block` to a new table and then to replace the existing /// tables with these new tables atomically in a transaction. Copying - /// happens in two stages: we first copy data for final blocks without - /// blocking writes, and then copy data for nonfinal blocks. The latter - /// blocks writes by taking a lock on the row for the deployment in - /// `subgraph_deployment` (via `deployment::lock`) The process for - /// switching to the new tables needs to take the naming of various - /// database objects that Postgres creates automatically into account so - /// that they all have the same names as the original objects to ensure - /// that pruning can be done again without risking name clashes. + /// happens in two stages that are performed for each table in turn: we + /// first copy data for final blocks without blocking writes, and then + /// copy data for nonfinal blocks. The latter blocks writes by taking a + /// lock on the row for the deployment in `subgraph_deployment` (via + /// `deployment::lock`) The process for switching to the new tables + /// needs to take the naming of various database objects that Postgres + /// creates automatically into account so that they all have the same + /// names as the original objects to ensure that pruning can be done + /// again without risking name clashes. /// /// The reason this strategy works well when a lot (or even the /// majority) of the data needs to be removed is that in the more @@ -383,66 +384,53 @@ impl Layout { ) -> Result<(), CancelableError> { let stats = self.version_stats(conn, reporter, true, cancel)?; - // Determine which tables are prunable and create a shadow table for - // them via `TablePair::create` - let dst_nsp = Namespace::prune(self.site.id); - let prunable_tables = conn.transaction(|| -> Result<_, StoreError> { - catalog::recreate_schema(conn, dst_nsp.as_str())?; - - let prunable_tables: Vec = self - .prunable_tables(&stats, prune_ratio) - .into_iter() - .map(|(table, _)| { - TablePair::create( - conn, - table.cheap_clone(), - self.site.namespace.clone(), - dst_nsp.clone(), - ) - }) - .collect::>()?; - Ok(prunable_tables) - })?; - cancel.check_cancel()?; - - // Copy final entities. This can happen in parallel to indexing as - // that part of the table will not change - reporter.copy_final_start(earliest_block, final_block); - for table in &prunable_tables { - table.copy_final_entities(conn, reporter, earliest_block, final_block, cancel)?; - } - reporter.copy_final_finish(); - - let prunable_src: Vec<_> = prunable_tables - .iter() - .map(|table| table.src.clone()) + let prunable_tables: Vec<_> = self + .prunable_tables(&stats, prune_ratio) + .into_iter() .collect(); - // Copy nonfinal entities, and replace the original `src` table with - // the smaller `dst` table - reporter.start_switch(); - // see also: deployment-lock-for-update - deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { - for table in &prunable_tables { - reporter.copy_nonfinal_start(table.src.name.as_str()); - table.copy_nonfinal_entities(conn, reporter, final_block)?; + // create a shadow namespace where we will put the copies of our tables + let dst_nsp = Namespace::prune(self.site.id); + catalog::recreate_schema(conn, dst_nsp.as_str())?; + + // Go table by table; note that the subgraph writer can write in + // between the execution of the `with_lock` block below, and might + // therefore work with tables where some are pruned and some are not + // pruned yet. That does not affect correctness since we make no + // assumption about where the subgraph head is. If the subgraph + // advances during this loop, we might have an unnecessarily + // pessimistic but still safe value for `final_block`. We do assume + // that `final_block` is far enough from the subgraph head that it + // stays final even if a revert happens during this loop, but that + // is the definition of 'final' + for (table, _) in &prunable_tables { + let pair = TablePair::create( + conn, + table.cheap_clone(), + self.site.namespace.clone(), + dst_nsp.clone(), + )?; + // Copy final entities. This can happen in parallel to indexing as + // that part of the table will not change + pair.copy_final_entities(conn, reporter, earliest_block, final_block, cancel)?; + // Copy nonfinal entities, and replace the original `src` table with + // the smaller `dst` table + // see also: deployment-lock-for-update + deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { + pair.copy_nonfinal_entities(conn, reporter, final_block)?; cancel.check_cancel().map_err(CancelableError::from)?; - } - for table in prunable_tables { - conn.transaction(|| table.switch(logger, conn))?; + conn.transaction(|| pair.switch(logger, conn))?; cancel.check_cancel().map_err(CancelableError::from)?; - } - - Ok(()) - })?; - reporter.finish_switch(); + Ok(()) + })?; + } // Get rid of the temporary prune schema catalog::drop_schema(conn, dst_nsp.as_str())?; // Analyze the new tables - let tables = prunable_src.iter().collect(); + let tables = prunable_tables.iter().map(|(table, _)| *table).collect(); self.analyze_tables(conn, reporter, tables, cancel)?; reporter.finish_prune(); From b5f36718d46e9199a12a036d84f3bfe6fcf49f4a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Feb 2023 10:44:44 -0800 Subject: [PATCH 0054/2104] graph, node, store: Update the PruneReporter Change the PruneReporter trait to produce reasonable output for the new pruning flow --- graph/src/components/store/mod.rs | 22 ++--- node/src/manager/commands/prune.rs | 106 +++++++++++++------------ store/postgres/src/relational/prune.rs | 30 ++++--- 3 files changed, 84 insertions(+), 74 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index e9135454659..9d0ad931ecb 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1142,6 +1142,13 @@ pub struct VersionStats { pub ratio: f64, } +/// What phase of pruning we are working on +pub enum PrunePhase { + /// Handling final entities + CopyFinal, + /// Handling nonfinal entities + CopyNonfinal, +} /// Callbacks for `SubgraphStore.prune` so that callers can report progress /// of the pruning procedure to users #[allow(unused_variables)] @@ -1155,21 +1162,14 @@ pub trait PruneReporter: Send + 'static { /// actually analyzed fn finish_analyze(&mut self, stats: &[VersionStats], analyzed: &[&str]) {} - fn copy_final_start(&mut self, earliest_block: BlockNumber, final_block: BlockNumber) {} - fn copy_final_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) {} - fn copy_final_finish(&mut self) {} - + fn start_copy(&mut self) {} + fn start_table(&mut self, table: &str) {} + fn prune_batch(&mut self, table: &str, rows: usize, phase: PrunePhase, finished: bool) {} fn start_switch(&mut self) {} - fn copy_nonfinal_start(&mut self, table: &str) {} - fn copy_nonfinal_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) { - } fn finish_switch(&mut self) {} + fn finish_table(&mut self, table: &str) {} fn finish_prune(&mut self) {} - - fn start_table(&mut self, table: &str) {} - - fn finish_table(&mut self, table: &str) {} } /// Represents an item retrieved from an diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index e0c1a028f07..dffebd7faf1 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -5,7 +5,7 @@ use std::{ time::{Duration, Instant}, }; -use graph::env::ENV_VARS; +use graph::{components::store::PrunePhase, env::ENV_VARS}; use graph::{ components::store::{PruneReporter, StatusStore}, data::subgraph::status, @@ -22,9 +22,10 @@ struct Progress { start: Instant, analyze_start: Instant, switch_start: Instant, + switch_time: Duration, table_start: Instant, - final_start: Instant, - nonfinal_start: Instant, + table_rows: usize, + initial_analyze: bool, } impl Progress { @@ -33,9 +34,10 @@ impl Progress { start: Instant::now(), analyze_start: Instant::now(), switch_start: Instant::now(), - final_start: Instant::now(), + switch_time: Duration::from_secs(0), table_start: Instant::now(), - nonfinal_start: Instant::now(), + table_rows: 0, + initial_analyze: true, } } } @@ -46,9 +48,20 @@ fn print_copy_header() { std::io::stdout().flush().ok(); } -fn print_copy_row(table: &str, total_rows: usize, elapsed: Duration) { +fn print_copy_row( + table: &str, + total_rows: usize, + elapsed: Duration, + phase: PrunePhase, + finished: bool, +) { + let phase = match (finished, phase) { + (true, _) => " ", + (false, PrunePhase::CopyFinal) => "(final)", + (false, PrunePhase::CopyNonfinal) => "(nonfinal)", + }; print!( - "\r{:<30} | {:>10} | {:>9}s", + "\r{:<30} | {:>10} | {:>9}s {phase}", abbreviate_table_name(table, 30), total_rows, elapsed.as_secs() @@ -58,6 +71,9 @@ fn print_copy_row(table: &str, total_rows: usize, elapsed: Duration) { impl PruneReporter for Progress { fn start_analyze(&mut self) { + if !self.initial_analyze { + println!(""); + } print!("Analyze tables"); self.analyze_start = Instant::now(); } @@ -72,74 +88,62 @@ impl PruneReporter for Progress { stats: &[graph::components::store::VersionStats], analyzed: &[&str], ) { + let stats: Vec<_> = stats + .iter() + .filter(|stat| self.initial_analyze || analyzed.contains(&stat.tablename.as_str())) + .map(|stats| stats.clone()) + .collect(); println!( - "\rAnalyzed {} tables in {}s", + "\rAnalyzed {} tables in {}s{: ^30}", analyzed.len(), - self.analyze_start.elapsed().as_secs() + self.analyze_start.elapsed().as_secs(), + "" ); - show_stats(stats, HashSet::new()).ok(); + show_stats(stats.as_slice(), HashSet::new()).ok(); println!(); + self.initial_analyze = false; } - fn copy_final_start(&mut self, earliest_block: BlockNumber, final_block: BlockNumber) { - println!("Copy final entities (versions live between {earliest_block} and {final_block})"); + fn start_copy(&mut self) { + println!("Copying data to new tables and replacing existing tables with them"); print_copy_header(); - - self.final_start = Instant::now(); - self.table_start = self.final_start; } - fn copy_final_batch(&mut self, table: &str, _rows: usize, total_rows: usize, finished: bool) { - print_copy_row(table, total_rows, self.table_start.elapsed()); - if finished { - println!(); - self.table_start = Instant::now(); - } - std::io::stdout().flush().ok(); + fn start_table(&mut self, _table: &str) { + self.table_start = Instant::now(); + self.table_rows = 0 } - fn copy_final_finish(&mut self) { - println!( - "Finished copying final entity versions in {}s\n", - self.final_start.elapsed().as_secs() + fn prune_batch(&mut self, table: &str, rows: usize, phase: PrunePhase, finished: bool) { + self.table_rows += rows; + print_copy_row( + table, + self.table_rows, + self.table_start.elapsed(), + phase, + finished, ); + std::io::stdout().flush().ok(); } fn start_switch(&mut self) { - println!("Blocking writes and switching tables"); - print_copy_header(); self.switch_start = Instant::now(); } fn finish_switch(&mut self) { - println!( - "Enabling writes. Switching took {}s\n", - self.switch_start.elapsed().as_secs() - ); + self.switch_time += self.switch_start.elapsed(); } - fn copy_nonfinal_start(&mut self, table: &str) { - print_copy_row(table, 0, Duration::from_secs(0)); - self.nonfinal_start = Instant::now(); - } - - fn copy_nonfinal_batch( - &mut self, - table: &str, - _rows: usize, - total_rows: usize, - finished: bool, - ) { - print_copy_row(table, total_rows, self.table_start.elapsed()); - if finished { - println!(); - self.table_start = Instant::now(); - } - std::io::stdout().flush().ok(); + fn finish_table(&mut self, _table: &str) { + println!(); } fn finish_prune(&mut self) { - println!("Finished pruning in {}s", self.start.elapsed().as_secs()); + println!( + "Finished pruning in {}s. Writing was blocked for {}s", + self.start.elapsed().as_secs(), + self.switch_time.as_secs() + ); } } diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 7116e9f58e4..7f59d0ea03f 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -7,7 +7,7 @@ use diesel::{ Connection, PgConnection, RunQueryDsl, }; use graph::{ - components::store::{PruneReporter, VersionStats}, + components::store::{PrunePhase, PruneReporter, VersionStats}, prelude::{ BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError, BLOCK_NUMBER_MAX, @@ -113,7 +113,7 @@ impl TablePair { earliest_block: BlockNumber, final_block: BlockNumber, cancel: &CancelHandle, - ) -> Result> { + ) -> Result<(), CancelableError> { let column_list = self.column_list(); // Determine the last vid that we need to copy @@ -122,7 +122,6 @@ impl TablePair { let mut batch_size = AdaptiveBatchSize::new(&self.src); // The first vid we still need to copy let mut next_vid = min_vid; - let mut total_rows: usize = 0; while next_vid <= max_vid { let start = Instant::now(); let rows = conn.transaction(|| { @@ -153,14 +152,18 @@ impl TablePair { })?; cancel.check_cancel()?; - total_rows += rows; next_vid += batch_size.size; batch_size.adapt(start.elapsed()); - reporter.copy_final_batch(self.src.name.as_str(), rows, total_rows, next_vid > max_vid); + reporter.prune_batch( + self.src.name.as_str(), + rows, + PrunePhase::CopyFinal, + next_vid > max_vid, + ); } - Ok(total_rows) + Ok(()) } /// Copy all entity versions visible after `final_block` in batches, @@ -171,7 +174,7 @@ impl TablePair { conn: &PgConnection, reporter: &mut dyn PruneReporter, final_block: BlockNumber, - ) -> Result { + ) -> Result<(), StoreError> { let column_list = self.column_list(); // Determine the last vid that we need to copy @@ -182,7 +185,6 @@ impl TablePair { let mut batch_size = AdaptiveBatchSize::new(&self.src); // The first vid we still need to copy let mut next_vid = min_vid; - let mut total_rows = 0; while next_vid <= max_vid { let start = Instant::now(); let rows = conn.transaction(|| { @@ -208,19 +210,18 @@ impl TablePair { .map_err(StoreError::from) })?; - total_rows += rows; next_vid += batch_size.size; batch_size.adapt(start.elapsed()); - reporter.copy_nonfinal_batch( + reporter.prune_batch( self.src.name.as_str(), rows, - total_rows, + PrunePhase::CopyNonfinal, next_vid > max_vid, ); } - Ok(total_rows) + Ok(()) } /// Replace the `src` table with the `dst` table @@ -403,7 +404,9 @@ impl Layout { // that `final_block` is far enough from the subgraph head that it // stays final even if a revert happens during this loop, but that // is the definition of 'final' + reporter.start_copy(); for (table, _) in &prunable_tables { + reporter.start_table(table.name.as_str()); let pair = TablePair::create( conn, table.cheap_clone(), @@ -416,6 +419,7 @@ impl Layout { // Copy nonfinal entities, and replace the original `src` table with // the smaller `dst` table // see also: deployment-lock-for-update + reporter.start_switch(); deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { pair.copy_nonfinal_entities(conn, reporter, final_block)?; cancel.check_cancel().map_err(CancelableError::from)?; @@ -425,6 +429,8 @@ impl Layout { Ok(()) })?; + reporter.finish_switch(); + reporter.finish_table(table.name.as_str()); } // Get rid of the temporary prune schema catalog::drop_schema(conn, dst_nsp.as_str())?; From 620e63608d452fc90cef60c311666e6f8f3091c6 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 6 Feb 2023 17:39:10 -0800 Subject: [PATCH 0055/2104] node: Print earliest block in `graphman info -s` --- node/src/manager/deployment.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/node/src/manager/deployment.rs b/node/src/manager/deployment.rs index 9720b546a1b..a35c1f670b7 100644 --- a/node/src/manager/deployment.rs +++ b/node/src/manager/deployment.rs @@ -199,7 +199,13 @@ impl Deployment { "node_id", ]; if !statuses.is_empty() { - rows.extend(vec!["synced", "health", "latest block", "chain head block"]); + rows.extend(vec![ + "synced", + "health", + "earliest block", + "latest block", + "chain head block", + ]); } let mut list = List::new(rows); @@ -224,6 +230,7 @@ impl Deployment { rows.extend(vec![ status.synced.to_string(), status.health.as_str().to_string(), + chain.earliest_block_number.to_string(), chain .latest_block .as_ref() From d19182ab3ae717b0e71e65f2a74b8b92acbec295 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Feb 2023 16:18:40 -0800 Subject: [PATCH 0056/2104] all: Make choosing the pruning strategy more explicit --- docs/environment-variables.md | 11 +++ graph/src/components/store/mod.rs | 130 ++++++++++++++++++++++++- graph/src/env/store.rs | 33 +++++++ node/src/bin/manager.rs | 26 ++++- node/src/manager/commands/prune.rs | 35 ++++--- store/postgres/src/deployment_store.rs | 36 ++----- store/postgres/src/relational/prune.rs | 116 ++++++++++++---------- store/postgres/src/subgraph_store.rs | 28 +----- store/postgres/tests/graft.rs | 26 +++-- 9 files changed, 307 insertions(+), 134 deletions(-) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 83ad7782424..3e6ae1614cd 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -222,3 +222,14 @@ those. - `GRAPH_FORK_BASE`: api url for where the graph node will fork from, use `https://api.thegraph.com/subgraphs/id/` for the hosted service. - `GRAPH_DEBUG_FORK`: the IPFS hash id of the subgraph to fork. +- `GRAPH_STORE_HISTORY_COPY_THRESHOLD`, + `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: when pruning, prune by copying the + entities we will keep to new tables if we estimate that we will remove + more than a factor of `COPY_THRESHOLD` of the deployment's history. If we + estimate to remove a factor between `COPY_THRESHOLD` and + `DELETE_THRESHOLD`, prune by deleting from the existing tables of the + deployment. If we estimate to remove less than `DELETE_THRESHOLD` + entities, do not change the table. Both settings are floats, and default + to 0.5 for the `COPY_THRESHOLD` and 0.05 for the `DELETE_THRESHOLD`; they + must be between 0 and 1, and `COPY_THRESHOLD` must be bigger than + `DELETE_THRESHOLD`. diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 9d0ad931ecb..e893bc6ca0f 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -27,7 +27,7 @@ use crate::data::store::scalar::Bytes; use crate::data::store::*; use crate::data::value::Word; use crate::data_source::CausalityRegion; -use crate::prelude::*; +use crate::{constraint_violation, prelude::*}; /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` @@ -1172,6 +1172,134 @@ pub trait PruneReporter: Send + 'static { fn finish_prune(&mut self) {} } +/// Select how pruning should be done +#[derive(Clone, Copy)] +pub enum PruningStrategy { + /// Copy the data we want to keep to new tables and swap them out for + /// the existing tables + Copy, + /// Delete unneeded data from the existing tables + Delete, +} + +/// A request to prune a deployment. This struct encapsulates decision +/// making around the best strategy for pruning (deleting historical +/// entities or copying current ones) It needs to be filled with accurate +/// information about the deployment that should be pruned. +pub struct PruneRequest { + /// How many blocks of history to keep + pub history_blocks: BlockNumber, + /// The reorg threshold for the chain the deployment is on + pub reorg_threshold: BlockNumber, + /// The earliest block pruning should preserve + pub earliest_block: BlockNumber, + /// The last block that contains final entities not subject to a reorg + pub final_block: BlockNumber, + /// The latest block, i.e., the subgraph head + pub latest_block: BlockNumber, + /// An estimate of how much of the deployment we will remove + pub history_pct: f64, + /// Use the copy strategy when removing more than this fraction of + /// history. Initialized from `ENV_VARS.store.copy_threshold`, but can + /// be modified after construction + pub copy_threshold: f64, + /// Use the delete strategy when removing more than this fraction of + /// history but less than `copy_threshold`. Initialized from + /// `ENV_VARS.store.delete_threshold`, but can be modified after + /// construction + pub delete_threshold: f64, +} + +impl PruneRequest { + /// Create a `PruneRequest` for a deployment that currently contains + /// entities for blocks from `first_block` to `latest_block` that should + /// retain only `history_blocks` blocks of history and is subject to a + /// reorg threshold of `reorg_threshold`. + pub fn new( + deployment: &DeploymentLocator, + history_blocks: BlockNumber, + reorg_threshold: BlockNumber, + first_block: BlockNumber, + latest_block: BlockNumber, + ) -> Result { + let copy_threshold = ENV_VARS.store.copy_threshold; + let delete_threshold = ENV_VARS.store.delete_threshold; + if copy_threshold < 0.0 || copy_threshold > 1.0 { + return Err(constraint_violation!( + "the copy threshold must be between 0 and 1 but is {copy_threshold}" + )); + } + if delete_threshold < 0.0 || delete_threshold > 1.0 { + return Err(constraint_violation!( + "the delete threshold must be between 0 and 1 but is {delete_threshold}" + )); + } + if history_blocks < reorg_threshold { + return Err(constraint_violation!( + "the deployment {} needs to keep at least {} blocks \ + of history and can't be pruned to only {} blocks of history", + deployment, + reorg_threshold, + history_blocks + )); + } + if first_block >= latest_block { + return Err(constraint_violation!( + "the earliest block {} must be before the latest block {}", + first_block, + latest_block + )); + } + + let earliest_block = latest_block - history_blocks; + let final_block = latest_block - reorg_threshold; + let total_blocks = latest_block - first_block + 1; + + let history_pct = 1.0 - history_blocks as f64 / total_blocks as f64; + + Ok(Self { + history_blocks, + reorg_threshold, + earliest_block, + final_block, + latest_block, + history_pct, + copy_threshold, + delete_threshold, + }) + } + + /// Determine what strategy to use for pruning + /// + /// We are pruning `history_pct` of the blocks from a table that has a ratio + /// of `version_ratio` entities to versions. If we are removing more than + /// `copy_threshold` percent of the versions, we prune by copying, and if we + /// are removing more than `delete_threshold` percent of the versions, we + /// prune by deleting. If we would remove less than `delete_threshold` + /// percent of the versions, we don't prune. + pub fn strategy(&self, version_ratio: f64) -> Option { + // If the deployment doesn't have enough history to cover the reorg + // threshold, do not prune + if self.earliest_block >= self.final_block { + return None; + } + + // Estimate how much data we will throw away; we assume that + // entity versions are distributed evenly across all blocks so + // that `history_pct` will tell us how much of that data pruning + // will remove. + let removal_ratio = self.history_pct * (1.0 - version_ratio); + if removal_ratio >= self.copy_threshold { + Some(PruningStrategy::Copy) + } else if removal_ratio >= self.delete_threshold { + // We will return 'Delete' here when that's implemented + Some(PruningStrategy::Copy) + } else { + None + } + } +} + /// Represents an item retrieved from an /// [`EthereumCallCache`](super::EthereumCallCache) implementor. pub struct CachedEthereumCall { diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 78655f64f19..c7887183187 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -1,5 +1,7 @@ use std::fmt; +use crate::bail; + use super::*; #[derive(Clone)] @@ -81,6 +83,15 @@ pub struct EnvVarsStore { /// Set by `GRAPH_STORE_BATCH_TARGET_DURATION` (expressed in seconds). /// The default is 180s. pub batch_target_duration: Duration, + + /// Prune tables where we will remove at least this fraction of entity + /// versions by copying. Set by `GRAPH_STORE_HISTORY_COPY_THRESHOLD`. + /// The default is 0.5 + pub copy_threshold: f64, + /// Prune tables where we will remove at least this fraction of entity + /// versions, but fewer than `copy_threshold`, by deleting. Set by + /// `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`. The default is 0.05 + pub delete_threshold: f64, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -117,6 +128,8 @@ impl From for EnvVarsStore { connection_idle_timeout: Duration::from_secs(x.connection_idle_timeout_in_secs), write_queue_size: x.write_queue_size, batch_target_duration: Duration::from_secs(x.batch_target_duration_in_secs), + copy_threshold: x.copy_threshold.0, + delete_threshold: x.delete_threshold.0, } } } @@ -160,4 +173,24 @@ pub struct InnerStore { write_queue_size: usize, #[envconfig(from = "GRAPH_STORE_BATCH_TARGET_DURATION", default = "180")] batch_target_duration_in_secs: u64, + #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.5")] + copy_threshold: ZeroToOneF64, + #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.05")] + delete_threshold: ZeroToOneF64, +} + +#[derive(Clone, Copy, Debug)] +struct ZeroToOneF64(f64); + +impl FromStr for ZeroToOneF64 { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let f = s.parse::()?; + if f < 0.0 || f > 1.0 { + bail!("invalid value: {s} must be between 0 and 1"); + } else { + Ok(ZeroToOneF64(f)) + } + } } diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index ad280bba9fa..236432ad1fe 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -252,9 +252,15 @@ pub enum Command { Prune { /// The deployment to prune (see `help info`) deployment: DeploymentSearch, - /// Prune tables with a ratio of entities to entity versions lower than this - #[clap(long, short, default_value = "0.20")] - prune_ratio: f64, + /// Prune by copying when removing more than this fraction of + /// history. Defaults to GRAPH_STORE_HISTORY_COPY_THRESHOLD + #[clap(long, short)] + copy_threshold: Option, + /// Prune by deleting when removing more than this fraction of + /// history but less than copy_threshold. Defaults to + /// GRAPH_STORE_HISTORY_DELETE_THRESHOLD + #[clap(long, short)] + delete_threshold: Option, /// How much history to keep in blocks #[clap(long, short = 'y', default_value = "10000")] history: usize, @@ -1382,11 +1388,21 @@ async fn main() -> anyhow::Result<()> { Prune { deployment, history, - prune_ratio, + copy_threshold, + delete_threshold, once, } => { let (store, primary_pool) = ctx.store_and_primary(); - commands::prune::run(store, primary_pool, deployment, history, prune_ratio, once).await + commands::prune::run( + store, + primary_pool, + deployment, + history, + copy_threshold, + delete_threshold, + once, + ) + .await } Drop { deployment, diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index dffebd7faf1..06a1ed8781b 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -5,7 +5,10 @@ use std::{ time::{Duration, Instant}, }; -use graph::{components::store::PrunePhase, env::ENV_VARS}; +use graph::{ + components::store::{PrunePhase, PruneRequest}, + env::ENV_VARS, +}; use graph::{ components::store::{PruneReporter, StatusStore}, data::subgraph::status, @@ -152,7 +155,8 @@ pub async fn run( primary_pool: ConnectionPool, search: DeploymentSearch, history: usize, - prune_ratio: f64, + copy_threshold: Option, + delete_threshold: Option, once: bool, ) -> Result<(), anyhow::Error> { let history = history as BlockNumber; @@ -181,20 +185,25 @@ pub async fn run( println!(" final: {}", latest - ENV_VARS.reorg_threshold); println!(" earliest: {}\n", latest - history); + let mut req = PruneRequest::new( + &deployment, + history, + ENV_VARS.reorg_threshold, + status.earliest_block_number, + latest, + )?; + if let Some(copy_threshold) = copy_threshold { + req.copy_threshold = copy_threshold; + } + if let Some(delete_threshold) = delete_threshold { + req.delete_threshold = delete_threshold; + } + let reporter = Box::new(Progress::new()); + store .subgraph_store() - .prune( - reporter, - &deployment, - Some(history), - // Using the setting for eth chains is a bit lazy; the value - // should really depend on the chain, but we don't have a - // convenient way to figure out how each chain deals with - // finality - ENV_VARS.reorg_threshold, - prune_ratio, - ) + .prune(reporter, &deployment, req) .await?; // Only after everything worked out, make the history setting permanent diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 33fb6ad99f5..2fe261f8f10 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -5,7 +5,9 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::{EntityKey, EntityType, PruneReporter, StoredDynamicDataSource}; +use graph::components::store::{ + EntityKey, EntityType, PruneReporter, PruneRequest, StoredDynamicDataSource, +}; use graph::components::versions::VERSIONS; use graph::data::query::Trace; use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; @@ -896,52 +898,32 @@ impl DeploymentStore { self: &Arc, mut reporter: Box, site: Arc, - history_blocks: Option, - reorg_threshold: BlockNumber, - prune_ratio: f64, + req: PruneRequest, ) -> Result, StoreError> { let store = self.clone(); self.with_conn(move |conn, cancel| { let layout = store.layout(conn, site.clone())?; cancel.check_cancel()?; let state = deployment::state(conn, site.deployment.clone())?; - let history_blocks = history_blocks.unwrap_or(layout.history_blocks); - if state.latest_block.number <= history_blocks { + if state.latest_block.number <= req.history_blocks { // We haven't accumulated enough history yet, nothing to prune return Ok(reporter); } - let earliest_block = state.latest_block.number - history_blocks; - - if state.earliest_block_number > earliest_block { + if state.earliest_block_number > req.earliest_block { // We already have less history than we need (e.g., because // of a manual onetime prune), nothing to prune - return Ok(reporter) - } - - let final_block = state.latest_block.number - reorg_threshold; - if final_block <= earliest_block { - return Err(constraint_violation!("the earliest block {} must be at least {} blocks before the current latest block {}", earliest_block, reorg_threshold, state.latest_block.number).into()); + return Ok(reporter); } - cancel.check_cancel()?; - conn.transaction(|| { - deployment::set_earliest_block(conn, site.as_ref(), earliest_block) + deployment::set_earliest_block(conn, site.as_ref(), req.earliest_block) })?; cancel.check_cancel()?; - layout.prune_by_copying( - &store.logger, - reporter.as_mut(), - conn, - earliest_block, - final_block, - prune_ratio, - cancel, - )?; + layout.prune(&store.logger, reporter.as_mut(), conn, &req, cancel)?; Ok(reporter) }) .await diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 7f59d0ea03f..635563b5279 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -7,7 +7,7 @@ use diesel::{ Connection, PgConnection, RunQueryDsl, }; use graph::{ - components::store::{PrunePhase, PruneReporter, VersionStats}, + components::store::{PrunePhase, PruneReporter, PruneRequest, PruningStrategy, VersionStats}, prelude::{ BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError, BLOCK_NUMBER_MAX, @@ -315,13 +315,13 @@ impl Layout { self.analyze_tables(conn, reporter, tables, cancel) } - /// Return all tables and their stats whose ratio of distinct entities + /// Return all tables and the strategy to prune them withir stats whose ratio of distinct entities /// to versions is less than `prune_ratio` - fn prunable_tables<'a>( + fn prunable_tables( &self, - stats: &'a [VersionStats], - prune_ratio: f64, - ) -> Vec<(&Arc
, &'a VersionStats)> { + stats: &[VersionStats], + req: &PruneRequest, + ) -> Vec<(&Arc
, PruningStrategy)> { let mut prunable_tables = self .tables .values() @@ -331,21 +331,24 @@ impl Layout { .find(|stats| stats.tablename == table.name.as_str()) .map(|stats| (table, stats)) }) - .filter(|(_, stats)| stats.ratio <= prune_ratio) + .filter_map(|(table, stats)| req.strategy(stats.ratio).map(|strat| (table, strat))) .collect::>(); prunable_tables.sort_by(|(a, _), (b, _)| a.name.as_str().cmp(b.name.as_str())); prunable_tables } /// Remove all data from the underlying deployment that is not needed to - /// respond to queries before block `earliest_block`. The strategy - /// implemented here works well for situations in which pruning will - /// remove a large amount of data from the subgraph (at least 50%) + /// respond to queries before block `earliest_block`. The `req` is used + /// to determine which strategy should be used for pruning, copy or + /// delete. /// - /// Blocks before `final_block` are considered final and it is assumed - /// that they will not be modified in any way while pruning is running. - /// Only tables where the ratio of entities to entity versions is below - /// `prune_ratio` will actually be pruned. + /// Blocks before `req.final_block` are considered final and it is + /// assumed that they will not be modified in any way while pruning is + /// running. + /// + /// The copy strategy implemented here works well for situations in + /// which pruning will remove a large amount of data from the subgraph + /// (say, at least 50%) /// /// The strategy for `prune_by_copying` is to copy all data that is /// needed to respond to queries at block heights at or after @@ -373,26 +376,22 @@ impl Layout { /// also block queries to the deployment, often for extended periods of /// time. The `prune_by_copying` strategy never blocks reads, it only /// ever blocks writes. - pub fn prune_by_copying( + pub fn prune( &self, logger: &Logger, reporter: &mut dyn PruneReporter, conn: &PgConnection, - earliest_block: BlockNumber, - final_block: BlockNumber, - prune_ratio: f64, + req: &PruneRequest, cancel: &CancelHandle, ) -> Result<(), CancelableError> { let stats = self.version_stats(conn, reporter, true, cancel)?; - let prunable_tables: Vec<_> = self - .prunable_tables(&stats, prune_ratio) - .into_iter() - .collect(); + let prunable_tables: Vec<_> = self.prunable_tables(&stats, req).into_iter().collect(); - // create a shadow namespace where we will put the copies of our tables + // create a shadow namespace where we will put the copies of our + // tables, but only create it in the database if we really need it let dst_nsp = Namespace::prune(self.site.id); - catalog::recreate_schema(conn, dst_nsp.as_str())?; + let mut recreate_dst_nsp = true; // Go table by table; note that the subgraph writer can write in // between the execution of the `with_lock` block below, and might @@ -405,35 +404,52 @@ impl Layout { // stays final even if a revert happens during this loop, but that // is the definition of 'final' reporter.start_copy(); - for (table, _) in &prunable_tables { + for (table, strat) in &prunable_tables { reporter.start_table(table.name.as_str()); - let pair = TablePair::create( - conn, - table.cheap_clone(), - self.site.namespace.clone(), - dst_nsp.clone(), - )?; - // Copy final entities. This can happen in parallel to indexing as - // that part of the table will not change - pair.copy_final_entities(conn, reporter, earliest_block, final_block, cancel)?; - // Copy nonfinal entities, and replace the original `src` table with - // the smaller `dst` table - // see also: deployment-lock-for-update - reporter.start_switch(); - deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { - pair.copy_nonfinal_entities(conn, reporter, final_block)?; - cancel.check_cancel().map_err(CancelableError::from)?; - - conn.transaction(|| pair.switch(logger, conn))?; - cancel.check_cancel().map_err(CancelableError::from)?; - - Ok(()) - })?; - reporter.finish_switch(); + match strat { + PruningStrategy::Copy => { + if recreate_dst_nsp { + catalog::recreate_schema(conn, dst_nsp.as_str())?; + recreate_dst_nsp = false; + } + let pair = TablePair::create( + conn, + table.cheap_clone(), + self.site.namespace.clone(), + dst_nsp.clone(), + )?; + // Copy final entities. This can happen in parallel to indexing as + // that part of the table will not change + pair.copy_final_entities( + conn, + reporter, + req.earliest_block, + req.final_block, + cancel, + )?; + // Copy nonfinal entities, and replace the original `src` table with + // the smaller `dst` table + // see also: deployment-lock-for-update + reporter.start_switch(); + deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { + pair.copy_nonfinal_entities(conn, reporter, req.final_block)?; + cancel.check_cancel().map_err(CancelableError::from)?; + + conn.transaction(|| pair.switch(logger, conn))?; + cancel.check_cancel().map_err(CancelableError::from)?; + + Ok(()) + })?; + reporter.finish_switch(); + } + PruningStrategy::Delete => unimplemented!("Coming soon"), + } reporter.finish_table(table.name.as_str()); } - // Get rid of the temporary prune schema - catalog::drop_schema(conn, dst_nsp.as_str())?; + // Get rid of the temporary prune schema if we actually created it + if !recreate_dst_nsp { + catalog::drop_schema(conn, dst_nsp.as_str())?; + } // Analyze the new tables let tables = prunable_tables.iter().map(|(table, _)| *table).collect(); diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 46e95406c92..7c27c6eace8 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -17,7 +17,7 @@ use graph::{ server::index_node::VersionInfo, store::{ self, BlockStore, DeploymentLocator, DeploymentSchemaVersion, - EnsLookup as EnsLookupTrait, PruneReporter, SubgraphFork, + EnsLookup as EnsLookupTrait, PruneReporter, PruneRequest, SubgraphFork, }, }, constraint_violation, @@ -1113,23 +1113,7 @@ impl SubgraphStoreInner { store.set_account_like(site, table, is_account_like).await } - /// Remove the history exceeding `history_blocks` blocks setting. Only - /// entity versions needed for queries at block heights within - /// `history_blocks` blocks of the current subgraph head will be kept. - /// If `history_blocks` is `None`, use the subgraph's `history_blocks` - /// setting. - /// - /// Only tables with a ratio of entities to entity versions below - /// `prune_ratio` will be pruned; that ratio is determined by looking at - /// Postgres planner stats to avoid lengthy counting queries. It is - /// assumed that if the ratio is higher than `prune_ratio` that pruning - /// won't make much of a difference and will just cause unnecessary - /// work. - /// - /// The `reorg_threshold` is used to determine which blocks will not be - /// modified any more by the subgraph writer that may be running - /// concurrently to reduce the amount of time that the writer needs to - /// be locked out while pruning is happening. + /// Prune the history according to the parameters in `req`. /// /// Pruning can take a long time, and is structured into multiple /// transactions such that none of them takes an excessively long time. @@ -1140,18 +1124,14 @@ impl SubgraphStoreInner { &self, reporter: Box, deployment: &DeploymentLocator, - history_blocks: Option, - reorg_threshold: BlockNumber, - prune_ratio: f64, + req: PruneRequest, ) -> Result, StoreError> { // Find the store by the deployment id; otherwise, we could only // prune the active copy of the deployment with `deployment.hash` let site = self.find_site(deployment.id.into())?; let store = self.for_site(&site)?; - store - .prune(reporter, site, history_blocks, reorg_threshold, prune_ratio) - .await + store.prune(reporter, site, req).await } pub fn set_history_blocks( diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 34f791100fd..8d3e7f74e27 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -5,7 +5,7 @@ use std::{marker::PhantomData, str::FromStr}; use test_store::*; use graph::components::store::{ - DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, + DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, PruneRequest, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; @@ -537,6 +537,9 @@ fn on_sync() { #[test] fn prune() { + struct Progress; + impl PruneReporter for Progress {} + fn check_at_block( store: &DieselSubgraphStore, src: &DeploymentLocator, @@ -561,14 +564,6 @@ fn prune() { assert_eq!(act, exp, "different users visible at block {block}"); } - async fn prune(store: &DieselSubgraphStore, src: &DeploymentLocator) -> Result<(), StoreError> { - struct Progress; - impl PruneReporter for Progress {} - let reporter = Box::new(Progress); - - store.prune(reporter, src, None, 1, 1.1).await.map(|_| ()) - } - run_test(|store, src| async move { store .set_history_blocks(&src, -3, 10) @@ -605,11 +600,14 @@ fn prune() { .await .unwrap(); - // Keep 3 blocks of history, i.e. blocks 4..6 - store.set_history_blocks(&src, 3, 0).unwrap(); - - // Pruning only removes the [1,2) version of user 3 - prune(&store, &src).await.expect("pruning works"); + // Prune to 3 blocks of history, with a reorg threshold of 1 where + // we have blocks from [0, 6]. That should only remove the [1,2) + // version of user 3 + let req = PruneRequest::new(&src, 3, 1, 0, 6)?; + store + .prune(Box::new(Progress), &src, req) + .await + .expect("pruning works"); // Check which versions exist at every block, even if they are // before the new earliest block, since we don't have a convenient From 8ba6003a3a922bf8c27f158b826062a9feb25db8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 26 Jan 2023 16:29:30 -0800 Subject: [PATCH 0057/2104] all: Ongoing pruning Prune the subgraph periodically while transacting blocks. Select the right strategy (copying or deleting) depending on how much history we are removing. --- docs/environment-variables.md | 5 + graph/src/components/store/mod.rs | 29 +++++- graph/src/env/store.rs | 25 +++++ node/src/manager/commands/prune.rs | 23 +++-- store/postgres/src/deployment.rs | 11 +- store/postgres/src/deployment_store.rs | 126 ++++++++++++++++++++--- store/postgres/src/relational/prune.rs | 37 ++++++- store/postgres/src/writable.rs | 1 + store/postgres/tests/graft.rs | 136 +++++++++++++++---------- 9 files changed, 304 insertions(+), 89 deletions(-) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 3e6ae1614cd..04433d4d0e3 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -222,6 +222,11 @@ those. - `GRAPH_FORK_BASE`: api url for where the graph node will fork from, use `https://api.thegraph.com/subgraphs/id/` for the hosted service. - `GRAPH_DEBUG_FORK`: the IPFS hash id of the subgraph to fork. +- `GRAPH_STORE_HISTORY_SLACK_FACTOR`: How much history a subgraph with + limited history can accumulate before it will be pruned. Setting this to + 1.1 means that the subgraph will be pruned every time it contains 10% + more history (in blocks) than its history limit. The default value is 1.2 + and the value must be at least 1.01 - `GRAPH_STORE_HISTORY_COPY_THRESHOLD`, `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: when pruning, prune by copying the entities we will keep to new tables if we estimate that we will remove diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index e893bc6ca0f..feba1856a29 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -7,6 +7,7 @@ pub use entity_cache::{EntityCache, ModificationsAndCache}; use diesel::types::{FromSql, ToSql}; pub use err::StoreError; use itertools::Itertools; +use strum_macros::Display; pub use traits::*; use futures::stream::poll_fn; @@ -1148,11 +1149,31 @@ pub enum PrunePhase { CopyFinal, /// Handling nonfinal entities CopyNonfinal, + /// Delete unneeded entity versions + Delete, +} + +impl PrunePhase { + pub fn strategy(&self) -> PruningStrategy { + match self { + PrunePhase::CopyFinal | PrunePhase::CopyNonfinal => PruningStrategy::Copy, + PrunePhase::Delete => PruningStrategy::Delete, + } + } } + /// Callbacks for `SubgraphStore.prune` so that callers can report progress /// of the pruning procedure to users #[allow(unused_variables)] pub trait PruneReporter: Send + 'static { + /// A pruning run has started. It will use the given `strategy` and + /// remove `history_frac` part of the blocks of the deployment, which + /// amounts to `history_blocks` many blocks. + /// + /// Before pruning, the subgraph has data for blocks from + /// `earliest_block` to `latest_block` + fn start(&mut self, req: &PruneRequest) {} + fn start_analyze(&mut self) {} fn start_analyze_table(&mut self, table: &str) {} fn finish_analyze_table(&mut self, table: &str) {} @@ -1162,18 +1183,17 @@ pub trait PruneReporter: Send + 'static { /// actually analyzed fn finish_analyze(&mut self, stats: &[VersionStats], analyzed: &[&str]) {} - fn start_copy(&mut self) {} fn start_table(&mut self, table: &str) {} fn prune_batch(&mut self, table: &str, rows: usize, phase: PrunePhase, finished: bool) {} fn start_switch(&mut self) {} fn finish_switch(&mut self) {} fn finish_table(&mut self, table: &str) {} - fn finish_prune(&mut self) {} + fn finish(&mut self) {} } /// Select how pruning should be done -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, Display, PartialEq)] pub enum PruningStrategy { /// Copy the data we want to keep to new tables and swap them out for /// the existing tables @@ -1292,8 +1312,7 @@ impl PruneRequest { if removal_ratio >= self.copy_threshold { Some(PruningStrategy::Copy) } else if removal_ratio >= self.delete_threshold { - // We will return 'Delete' here when that's implemented - Some(PruningStrategy::Copy) + Some(PruningStrategy::Delete) } else { None } diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index c7887183187..113755259bc 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -92,6 +92,12 @@ pub struct EnvVarsStore { /// versions, but fewer than `copy_threshold`, by deleting. Set by /// `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`. The default is 0.05 pub delete_threshold: f64, + /// How much history a subgraph with limited history can accumulate + /// before it will be pruned. Setting this to 1.1 means that the + /// subgraph will be pruned every time it contains 10% more history (in + /// blocks) than its history limit. The default value is 1.2 and the + /// value must be at least 1.01 + pub history_slack_factor: f64, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -130,6 +136,7 @@ impl From for EnvVarsStore { batch_target_duration: Duration::from_secs(x.batch_target_duration_in_secs), copy_threshold: x.copy_threshold.0, delete_threshold: x.delete_threshold.0, + history_slack_factor: x.history_slack_factor.0, } } } @@ -177,6 +184,8 @@ pub struct InnerStore { copy_threshold: ZeroToOneF64, #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.05")] delete_threshold: ZeroToOneF64, + #[envconfig(from = "GRAPH_STORE_HISTORY_SLACK_FACTOR", default = "1.2")] + history_slack_factor: HistorySlackF64, } #[derive(Clone, Copy, Debug)] @@ -194,3 +203,19 @@ impl FromStr for ZeroToOneF64 { } } } + +#[derive(Clone, Copy, Debug)] +struct HistorySlackF64(f64); + +impl FromStr for HistorySlackF64 { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let f = s.parse::()?; + if f < 1.01 { + bail!("invalid value: {s} must be bigger than 1.01"); + } else { + Ok(HistorySlackF64(f)) + } + } +} diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 06a1ed8781b..4334cc1fbc1 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -51,7 +51,7 @@ fn print_copy_header() { std::io::stdout().flush().ok(); } -fn print_copy_row( +fn print_batch( table: &str, total_rows: usize, elapsed: Duration, @@ -62,6 +62,7 @@ fn print_copy_row( (true, _) => " ", (false, PrunePhase::CopyFinal) => "(final)", (false, PrunePhase::CopyNonfinal) => "(nonfinal)", + (false, PrunePhase::Delete) => "(delete)", }; print!( "\r{:<30} | {:>10} | {:>9}s {phase}", @@ -73,6 +74,11 @@ fn print_copy_row( } impl PruneReporter for Progress { + fn start(&mut self, req: &PruneRequest) { + let history_pct = req.history_pct * 100.0; + println!("Remove {history_pct:.2}% of historical blocks"); + } + fn start_analyze(&mut self) { if !self.initial_analyze { println!(""); @@ -104,12 +110,13 @@ impl PruneReporter for Progress { ); show_stats(stats.as_slice(), HashSet::new()).ok(); println!(); - self.initial_analyze = false; - } - fn start_copy(&mut self) { - println!("Copying data to new tables and replacing existing tables with them"); - print_copy_header(); + if self.initial_analyze { + // After analyzing, we start the actual work + println!("Pruning tables"); + print_copy_header(); + } + self.initial_analyze = false; } fn start_table(&mut self, _table: &str) { @@ -119,7 +126,7 @@ impl PruneReporter for Progress { fn prune_batch(&mut self, table: &str, rows: usize, phase: PrunePhase, finished: bool) { self.table_rows += rows; - print_copy_row( + print_batch( table, self.table_rows, self.table_start.elapsed(), @@ -141,7 +148,7 @@ impl PruneReporter for Progress { println!(); } - fn finish_prune(&mut self) { + fn finish(&mut self) { println!( "Finished pruning in {}s. Writing was blocked for {}s", self.start.elapsed().as_secs(), diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index c9a725d199a..8a19cd21bf4 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -410,7 +410,7 @@ pub fn transact_block( ptr: &BlockPtr, firehose_cursor: &FirehoseCursor, count: i32, -) -> Result<(), StoreError> { +) -> Result { use crate::diesel::BoolExpressionMethods; use subgraph_deployment as d; @@ -419,7 +419,7 @@ pub fn transact_block( let count_sql = entity_count_sql(count); - let row_count = update( + let rows = update( d::table.filter(d::id.eq(site.id)).filter( // Asserts that the processing direction is forward. d::latest_ethereum_block_number @@ -434,12 +434,13 @@ pub fn transact_block( d::entity_count.eq(sql(&count_sql)), d::current_reorg_depth.eq(0), )) - .execute(conn) + .returning(d::earliest_block_number) + .get_results::(conn) .map_err(StoreError::from)?; - match row_count { + match rows.len() { // Common case: A single row was updated. - 1 => Ok(()), + 1 => Ok(rows[0]), // No matching rows were found. This is an error. By the filter conditions, this can only be // due to a missing deployment (which `block_ptr` catches) or duplicate block processing. diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 2fe261f8f10..fd217944e9a 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -6,7 +6,8 @@ use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - EntityKey, EntityType, PruneReporter, PruneRequest, StoredDynamicDataSource, + EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, + StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; use graph::data::query::Trace; @@ -17,6 +18,7 @@ use graph::prelude::{ SubgraphDeploymentEntity, }; use graph::semver::Version; +use itertools::Itertools; use lru_time_cache::LruCache; use rand::{seq::SliceRandom, thread_rng}; use std::borrow::Cow; @@ -27,7 +29,7 @@ use std::ops::Bound; use std::ops::Deref; use std::str::FromStr; use std::sync::{atomic::AtomicUsize, Arc, Mutex}; -use std::time::Instant; +use std::time::{Duration, Instant}; use graph::components::store::EntityCollection; use graph::components::subgraph::{ProofOfIndexingFinisher, ProofOfIndexingVersion}; @@ -1121,7 +1123,8 @@ impl DeploymentStore { } pub(crate) fn transact_block_operations( - &self, + self: &Arc, + logger: &Logger, site: Arc, block_ptr_to: &BlockPtr, firehose_cursor: &FirehoseCursor, @@ -1137,14 +1140,14 @@ impl DeploymentStore { self.get_conn()? }; - let event = deployment::with_lock(&conn, &site, || { - conn.transaction(|| -> Result<_, StoreError> { - // Emit a store event for the changes we are about to make. We - // wait with sending it until we have done all our other work - // so that we do not hold a lock on the notification queue - // for longer than we have to - let event: StoreEvent = StoreEvent::from_mods(&site.deployment, mods); + // Emit a store event for the changes we are about to make. We + // wait with sending it until we have done all our other work + // so that we do not hold a lock on the notification queue + // for longer than we have to + let event: StoreEvent = StoreEvent::from_mods(&site.deployment, mods); + let (layout, earliest_block) = deployment::with_lock(&conn, &site, || { + conn.transaction(|| -> Result<_, StoreError> { // Make the changes let layout = self.layout(&conn, site.clone())?; @@ -1177,12 +1180,33 @@ impl DeploymentStore { )?; } - deployment::transact_block(&conn, &site, block_ptr_to, firehose_cursor, count)?; + let earliest_block = + deployment::transact_block(&conn, &site, block_ptr_to, firehose_cursor, count)?; - Ok(event) + Ok((layout, earliest_block)) }) })?; + if block_ptr_to.number as f64 + > earliest_block as f64 + + layout.history_blocks as f64 * ENV_VARS.store.history_slack_factor + { + let _section = stopwatch.start_section("transact_blocks_prune"); + + let this = self.clone(); + let reporter = OngoingPruneReporter::new(logger.cheap_clone()); + + let req = PruneRequest::new( + &site.as_ref().into(), + layout.history_blocks, + ENV_VARS.reorg_threshold, + earliest_block, + block_ptr_to.number, + )?; + + graph::block_on(this.prune(reporter, site, req))?; + } + Ok(event) } @@ -1766,3 +1790,81 @@ fn resolve_column_names<'a, T: AsRef>( }) .collect() } + +/// A helper to log progress during pruning that is kicked off from +/// `transact_block_operations` +struct OngoingPruneReporter { + logger: Logger, + start: Instant, + analyze_start: Instant, + analyze_duration: Duration, + rows_copied: usize, + rows_deleted: usize, + tables: Vec, +} + +impl OngoingPruneReporter { + fn new(logger: Logger) -> Box { + Box::new(Self { + logger, + start: Instant::now(), + analyze_start: Instant::now(), + analyze_duration: Duration::from_secs(0), + rows_copied: 0, + rows_deleted: 0, + tables: Vec::new(), + }) + } +} + +impl OngoingPruneReporter { + fn tables_as_string(&self) -> String { + if self.tables.is_empty() { + "ø".to_string() + } else { + format!("[{}]", self.tables.iter().join(",")) + } + } +} + +impl PruneReporter for OngoingPruneReporter { + fn start(&mut self, req: &PruneRequest) { + self.start = Instant::now(); + info!(&self.logger, "Start pruning historical entities"; + "history_pct" => format!("{:.2}", req.history_pct * 100.0), + "history_blocks" => req.history_blocks, + "earliest_block" => req.earliest_block, + "latest_block" => req.latest_block); + } + + fn start_analyze(&mut self) { + self.analyze_start = Instant::now() + } + + fn finish_analyze(&mut self, _stats: &[VersionStats], analyzed: &[&str]) { + self.analyze_duration += self.analyze_start.elapsed(); + debug!(&self.logger, "Analyzed {} tables", analyzed.len(); "time_s" => self.analyze_start.elapsed().as_secs()); + } + + fn start_table(&mut self, table: &str) { + self.tables.push(table.to_string()); + } + + fn prune_batch(&mut self, _table: &str, rows: usize, phase: PrunePhase, _finished: bool) { + match phase.strategy() { + PruningStrategy::Copy => self.rows_copied += rows, + PruningStrategy::Delete => self.rows_deleted += rows, + } + } + fn finish(&mut self) { + info!( + &self.logger, + "Finished pruning entities"; + "tables" => self.tables_as_string(), + "rows_deleted" => self.rows_deleted, + "rows_copied" => self.rows_copied, + "time_s" => self.start.elapsed().as_secs(), + "analyze_time_s" => self.analyze_duration.as_secs() + ) + } +} diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 635563b5279..149c3215379 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -384,6 +384,8 @@ impl Layout { req: &PruneRequest, cancel: &CancelHandle, ) -> Result<(), CancelableError> { + reporter.start(req); + let stats = self.version_stats(conn, reporter, true, cancel)?; let prunable_tables: Vec<_> = self.prunable_tables(&stats, req).into_iter().collect(); @@ -403,7 +405,6 @@ impl Layout { // that `final_block` is far enough from the subgraph head that it // stays final even if a revert happens during this loop, but that // is the definition of 'final' - reporter.start_copy(); for (table, strat) in &prunable_tables { reporter.start_table(table.name.as_str()); match strat { @@ -442,7 +443,37 @@ impl Layout { })?; reporter.finish_switch(); } - PruningStrategy::Delete => unimplemented!("Coming soon"), + PruningStrategy::Delete => { + // Delete all entity versions whose range was closed + // before `req.earliest_block` + let (min_vid, max_vid) = table.vid_range(conn, 0, req.earliest_block)?; + let mut batch_size = AdaptiveBatchSize::new(&table); + let mut next_vid = min_vid; + while next_vid <= max_vid { + let start = Instant::now(); + let rows = sql_query(format!( + "delete from {} \ + where coalesce(upper(block_range), 2147483647) <= $1 \ + and vid >= $2 and vid < $2 + $3", + table.qualified_name + )) + .bind::(req.earliest_block) + .bind::(next_vid) + .bind::(&batch_size) + .execute(conn)?; + + next_vid += batch_size.size; + + batch_size.adapt(start.elapsed()); + + reporter.prune_batch( + table.name.as_str(), + rows as usize, + PrunePhase::Delete, + next_vid > max_vid, + ); + } + } } reporter.finish_table(table.name.as_str()); } @@ -455,7 +486,7 @@ impl Layout { let tables = prunable_tables.iter().map(|(table, _)| *table).collect(); self.analyze_tables(conn, reporter, tables, cancel)?; - reporter.finish_prune(); + reporter.finish(); Ok(()) } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index b6c5d0f56ab..226b99bdf0e 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -261,6 +261,7 @@ impl SyncStore { ) -> Result<(), StoreError> { self.retry("transact_block_operations", move || { let event = self.writable.transact_block_operations( + &self.logger, self.site.clone(), block_ptr_to, firehose_cursor, diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 8d3e7f74e27..505f010e137 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -5,7 +5,8 @@ use std::{marker::PhantomData, str::FromStr}; use test_store::*; use graph::components::store::{ - DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, PruneRequest, + DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, + PruneRequest, PruningStrategy, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; @@ -543,6 +544,7 @@ fn prune() { fn check_at_block( store: &DieselSubgraphStore, src: &DeploymentLocator, + strategy: PruningStrategy, block: BlockNumber, exp: Vec<&str>, ) { @@ -561,62 +563,84 @@ fn prune() { .into_iter() .map(|entity| entity.id().unwrap()) .collect(); - assert_eq!(act, exp, "different users visible at block {block}"); - } - - run_test(|store, src| async move { - store - .set_history_blocks(&src, -3, 10) - .expect_err("history_blocks can not be set to a negative number"); - - store - .set_history_blocks(&src, 10, 10) - .expect_err("history_blocks must be bigger than reorg_threshold"); - - // Add another version for user 2 at block 4 - let user2 = create_test_entity( - "2", - USER, - "Cindini", - "dinici@email.com", - 44_i32, - 157.1, - true, - Some("red"), + assert_eq!( + act, exp, + "different users visible at block {block} with {strategy}" ); - transact_and_wait(&store, &src, BLOCKS[5].clone(), vec![user2]) - .await - .unwrap(); - - // Setup and the above addition create these user versions: - // id | versions - // ---+--------- - // 1 | [0,) - // 2 | [1,5) [5,) - // 3 | [1,2) [2,) + } - // Forward block ptr to block 6 - transact_and_wait(&store, &src, BLOCKS[6].clone(), vec![]) - .await - .unwrap(); + for strategy in [PruningStrategy::Copy, PruningStrategy::Delete] { + run_test(move |store, src| async move { + store + .set_history_blocks(&src, -3, 10) + .expect_err("history_blocks can not be set to a negative number"); - // Prune to 3 blocks of history, with a reorg threshold of 1 where - // we have blocks from [0, 6]. That should only remove the [1,2) - // version of user 3 - let req = PruneRequest::new(&src, 3, 1, 0, 6)?; - store - .prune(Box::new(Progress), &src, req) - .await - .expect("pruning works"); - - // Check which versions exist at every block, even if they are - // before the new earliest block, since we don't have a convenient - // way to load all entity versions with their block range - check_at_block(&store, &src, 0, vec!["1"]); - check_at_block(&store, &src, 1, vec!["1", "2"]); - for block in 2..=5 { - check_at_block(&store, &src, block, vec!["1", "2", "3"]); - } - Ok(()) - }) + store + .set_history_blocks(&src, 10, 10) + .expect_err("history_blocks must be bigger than reorg_threshold"); + + // Add another version for user 2 at block 4 + let user2 = create_test_entity( + "2", + USER, + "Cindini", + "dinici@email.com", + 44_i32, + 157.1, + true, + Some("red"), + ); + transact_and_wait(&store, &src, BLOCKS[5].clone(), vec![user2]) + .await + .unwrap(); + + // Setup and the above addition create these user versions: + // id | versions + // ---+--------- + // 1 | [0,) + // 2 | [1,5) [5,) + // 3 | [1,2) [2,) + + // Forward block ptr to block 6 + transact_and_wait(&store, &src, BLOCKS[6].clone(), vec![]) + .await + .unwrap(); + + // Prune to 3 blocks of history, with a reorg threshold of 1 where + // we have blocks from [0, 6]. That should only remove the [1,2) + // version of user 3 + let mut req = PruneRequest::new(&src, 3, 1, 0, 6)?; + // Change the thresholds so that we select the desired strategy + match strategy { + PruningStrategy::Copy => { + req.copy_threshold = 0.0; + req.delete_threshold = 0.0; + } + PruningStrategy::Delete => { + req.copy_threshold = 1.0; + req.delete_threshold = 0.0; + } + } + // We have 5 versions for 3 entities + assert_eq!( + Some(strategy), + req.strategy(3.0 / 5.0), + "changing thresholds didn't yield desired strategy" + ); + store + .prune(Box::new(Progress), &src, req) + .await + .expect("pruning works"); + + // Check which versions exist at every block, even if they are + // before the new earliest block, since we don't have a convenient + // way to load all entity versions with their block range + check_at_block(&store, &src, strategy, 0, vec!["1"]); + check_at_block(&store, &src, strategy, 1, vec!["1", "2"]); + for block in 2..=5 { + check_at_block(&store, &src, strategy, block, vec!["1", "2", "3"]); + } + Ok(()) + }) + } } From 83ee05ec450bde95416f5c16158570047e2cb273 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 4 Mar 2023 11:54:07 -0800 Subject: [PATCH 0058/2104] store: Annotate pruning queries to enhance observability --- store/postgres/src/relational/prune.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 149c3215379..b998eb0ec1d 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -46,7 +46,8 @@ impl Table { // Determine the last vid that we need to copy let VidRange { min_vid, max_vid } = sql_query(format!( - "select coalesce(min(vid), 0) as min_vid, \ + "/* controller=prune,first={first_block},last={last_block} */ \ + select coalesce(min(vid), 0) as min_vid, \ coalesce(max(vid), -1) as max_vid from {src} \ where lower(block_range) <= $2 \ and coalesce(upper(block_range), 2147483647) > $1 \ @@ -133,7 +134,8 @@ impl TablePair { // The conditions on `block_range` are expressed redundantly // to make more indexes useable sql_query(format!( - "insert into {dst}({column_list}) \ + "/* controller=prune,phase=final,start_vid={next_vid},next_vid={batch_size} */ \ + insert into {dst}({column_list}) \ select {column_list} from {src} \ where lower(block_range) <= $2 \ and coalesce(upper(block_range), 2147483647) > $1 \ @@ -142,7 +144,8 @@ impl TablePair { and vid >= $3 and vid < $3 + $4 \ order by vid", src = self.src.qualified_name, - dst = self.dst.qualified_name + dst = self.dst.qualified_name, + batch_size = batch_size.size, )) .bind::(earliest_block) .bind::(final_block) @@ -194,7 +197,8 @@ impl TablePair { // The conditions on `block_range` are expressed redundantly // to make more indexes useable sql_query(format!( - "insert into {dst}({column_list}) \ + "/* controller=prune,phase=nonfinal,start_vid={next_vid},next_vid={batch_size} */ \ + insert into {dst}({column_list}) \ select {column_list} from {src} \ where coalesce(upper(block_range), 2147483647) > $1 \ and block_range && int4range($1, null) \ @@ -202,6 +206,7 @@ impl TablePair { order by vid", dst = self.dst.qualified_name, src = self.src.qualified_name, + batch_size = batch_size.size )) .bind::(final_block) .bind::(next_vid) @@ -452,10 +457,12 @@ impl Layout { while next_vid <= max_vid { let start = Instant::now(); let rows = sql_query(format!( - "delete from {} \ + "/* controller=prune,phase=delete,next_vid={next_vid},batch_size={batch_size} */ \ + delete from {qname} \ where coalesce(upper(block_range), 2147483647) <= $1 \ and vid >= $2 and vid < $2 + $3", - table.qualified_name + qname = table.qualified_name, + batch_size = batch_size.size )) .bind::(req.earliest_block) .bind::(next_vid) From 5940398c01df0254e00c1e44b5e438619a7260c5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 6 Mar 2023 15:31:55 -0800 Subject: [PATCH 0059/2104] store: Estimate amount of history based on actual amount of history We used to determine the total number of blocks in a subgraph based on its latest and earliest blocks. With ongoing pruning, the earliest block is updated every time we prune, even though the logic in PruneRequest.strategy might have us actually not do anything. That leads to a situation where we think the subgraph contains much fewer blocks than it really does, and we therefore underestimate how much of its data is historical. We now remember for each table the block at which we actually pruned, which might be long before the subgraph's earliest block, and use that to determine how many blocks are present. As an example, assume we want to keep 100 blocks of history, in a subgraph that is at block 1000 and earliest block 800 and a table that was last pruned at block 500. Previously, we would have estimated that 50% of the table is historical, when in reality 80% is historical. --- graph/src/components/store/mod.rs | 28 ++++++++---- node/src/manager/commands/prune.rs | 3 +- node/src/manager/commands/stats.rs | 2 +- .../down.sql | 2 + .../up.sql | 2 + store/postgres/src/catalog.rs | 43 ++++++++++++++++--- store/postgres/src/deployment_store.rs | 1 - store/postgres/src/relational/prune.rs | 9 +++- store/postgres/tests/graft.rs | 11 ++++- 9 files changed, 79 insertions(+), 22 deletions(-) create mode 100644 store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/down.sql create mode 100644 store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/up.sql diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index feba1856a29..b19601f604d 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1141,6 +1141,8 @@ pub struct VersionStats { pub tablename: String, /// The ratio `entities / versions` pub ratio: f64, + /// The last block to which this table was pruned + pub last_pruned_block: Option, } /// What phase of pruning we are working on @@ -1217,8 +1219,6 @@ pub struct PruneRequest { pub final_block: BlockNumber, /// The latest block, i.e., the subgraph head pub latest_block: BlockNumber, - /// An estimate of how much of the deployment we will remove - pub history_pct: f64, /// Use the copy strategy when removing more than this fraction of /// history. Initialized from `ENV_VARS.store.copy_threshold`, but can /// be modified after construction @@ -1273,9 +1273,6 @@ impl PruneRequest { let earliest_block = latest_block - history_blocks; let final_block = latest_block - reorg_threshold; - let total_blocks = latest_block - first_block + 1; - - let history_pct = 1.0 - history_blocks as f64 / total_blocks as f64; Ok(Self { history_blocks, @@ -1283,7 +1280,6 @@ impl PruneRequest { earliest_block, final_block, latest_block, - history_pct, copy_threshold, delete_threshold, }) @@ -1297,7 +1293,7 @@ impl PruneRequest { /// are removing more than `delete_threshold` percent of the versions, we /// prune by deleting. If we would remove less than `delete_threshold` /// percent of the versions, we don't prune. - pub fn strategy(&self, version_ratio: f64) -> Option { + pub fn strategy(&self, stats: &VersionStats) -> Option { // If the deployment doesn't have enough history to cover the reorg // threshold, do not prune if self.earliest_block >= self.final_block { @@ -1308,7 +1304,7 @@ impl PruneRequest { // entity versions are distributed evenly across all blocks so // that `history_pct` will tell us how much of that data pruning // will remove. - let removal_ratio = self.history_pct * (1.0 - version_ratio); + let removal_ratio = self.history_pct(stats) * (1.0 - stats.ratio); if removal_ratio >= self.copy_threshold { Some(PruningStrategy::Copy) } else if removal_ratio >= self.delete_threshold { @@ -1317,6 +1313,22 @@ impl PruneRequest { None } } + + /// Return an estimate of the fraction of the entities that are + /// historical in the table whose `stats` we are given + fn history_pct(&self, stats: &VersionStats) -> f64 { + let total_blocks = self.latest_block - stats.last_pruned_block.unwrap_or(0); + if total_blocks <= 0 || total_blocks < self.history_blocks { + // Something has gone very wrong; this could happen if the + // subgraph is ever rewound to before the last_pruned_block or + // if this is called when the subgraph has fewer blocks than + // history_blocks. In both cases, which should be transient, + // pretend that we would not delete any history + 0.0 + } else { + 1.0 - self.history_blocks as f64 / total_blocks as f64 + } + } } /// Represents an item retrieved from an diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 4334cc1fbc1..52288dcab09 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -75,8 +75,7 @@ fn print_batch( impl PruneReporter for Progress { fn start(&mut self, req: &PruneRequest) { - let history_pct = req.history_pct * 100.0; - println!("Remove {history_pct:.2}% of historical blocks"); + println!("Prune to {} historical blocks", req.history_blocks); } fn start_analyze(&mut self) { diff --git a/node/src/manager/commands/stats.rs b/node/src/manager/commands/stats.rs index 1f8d7474df5..c7f768c436b 100644 --- a/node/src/manager/commands/stats.rs +++ b/node/src/manager/commands/stats.rs @@ -108,7 +108,7 @@ pub fn show( ) -> Result<(), anyhow::Error> { let (site, conn) = site_and_conn(pools, search)?; - let stats = store_catalog::stats(&conn, &site.namespace)?; + let stats = store_catalog::stats(&conn, &site)?; let account_like = store_catalog::account_like(&conn, &site)?; diff --git a/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/down.sql b/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/down.sql new file mode 100644 index 00000000000..25195963d0b --- /dev/null +++ b/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/down.sql @@ -0,0 +1,2 @@ +alter table subgraphs.table_stats + drop column last_pruned_block; diff --git a/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/up.sql b/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/up.sql new file mode 100644 index 00000000000..3cb31928a1a --- /dev/null +++ b/store/postgres/migrations/2023-03-06-233030_add_last_pruned_block/up.sql @@ -0,0 +1,2 @@ +alter table subgraphs.table_stats + add column last_pruned_block int4; diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index f9f43f873ae..de477cab182 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -8,6 +8,7 @@ use diesel::{ }; use graph::components::store::EntityType; use graph::components::store::VersionStats; +use graph::prelude::BlockNumber; use itertools::Itertools; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt::Write; @@ -100,6 +101,7 @@ table! { deployment -> Integer, table_name -> Text, is_account_like -> Nullable, + last_pruned_block -> Nullable, } } @@ -445,8 +447,8 @@ pub fn copy_account_like(conn: &PgConnection, src: &Site, dst: &Site) -> Result< ForeignServer::metadata_schema(&src.shard) }; let query = format!( - "insert into subgraphs.table_stats(deployment, table_name, is_account_like) - select $2 as deployment, ts.table_name, ts.is_account_like + "insert into subgraphs.table_stats(deployment, table_name, is_account_like, last_pruned_block) + select $2 as deployment, ts.table_name, ts.is_account_like, ts.last_pruned_block from {src_nsp}.table_stats ts where ts.deployment = $1", src_nsp = src_nsp @@ -457,6 +459,27 @@ pub fn copy_account_like(conn: &PgConnection, src: &Site, dst: &Site) -> Result< .execute(conn)?) } +pub fn set_last_pruned_block( + conn: &PgConnection, + site: &Site, + table_name: &SqlName, + last_pruned_block: BlockNumber, +) -> Result<(), StoreError> { + use table_stats as ts; + + insert_into(ts::table) + .values(( + ts::deployment.eq(site.id), + ts::table_name.eq(table_name.as_str()), + ts::last_pruned_block.eq(last_pruned_block), + )) + .on_conflict((ts::deployment, ts::table_name)) + .do_update() + .set(ts::last_pruned_block.eq(last_pruned_block)) + .execute(conn)?; + Ok(()) +} + pub(crate) mod table_schema { use super::*; @@ -649,7 +672,7 @@ pub(crate) fn drop_index( Ok(()) } -pub fn stats(conn: &PgConnection, namespace: &Namespace) -> Result, StoreError> { +pub fn stats(conn: &PgConnection, site: &Site) -> Result, StoreError> { #[derive(Queryable, QueryableByName)] pub struct DbStats { #[sql_type = "Integer"] @@ -661,6 +684,8 @@ pub fn stats(conn: &PgConnection, namespace: &Namespace) -> Result, } impl From for VersionStats { @@ -670,6 +695,7 @@ pub fn stats(conn: &PgConnection, namespace: &Namespace) -> Result Result Result(namespace.as_str()) + .bind::(site.id) + .bind::(site.namespace.as_str()) .load::(conn) .map_err(StoreError::from)?; diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index fd217944e9a..9bc42dae19e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1831,7 +1831,6 @@ impl PruneReporter for OngoingPruneReporter { fn start(&mut self, req: &PruneRequest) { self.start = Instant::now(); info!(&self.logger, "Start pruning historical entities"; - "history_pct" => format!("{:.2}", req.history_pct * 100.0), "history_blocks" => req.history_blocks, "earliest_block" => req.earliest_block, "latest_block" => req.latest_block); diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index b998eb0ec1d..b6c68cc6493 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -287,7 +287,7 @@ impl Layout { reporter.finish_analyze_table(table.name.as_str()); cancel.check_cancel()?; } - let stats = catalog::stats(conn, &self.site.namespace)?; + let stats = catalog::stats(conn, &self.site)?; let analyzed: Vec<_> = tables.iter().map(|table| table.name.as_str()).collect(); reporter.finish_analyze(&stats, &analyzed); @@ -330,13 +330,14 @@ impl Layout { let mut prunable_tables = self .tables .values() + .filter(|table| !table.immutable) .filter_map(|table| { stats .iter() .find(|stats| stats.tablename == table.name.as_str()) .map(|stats| (table, stats)) }) - .filter_map(|(table, stats)| req.strategy(stats.ratio).map(|strat| (table, strat))) + .filter_map(|(table, stats)| req.strategy(stats).map(|strat| (table, strat))) .collect::>(); prunable_tables.sort_by(|(a, _), (b, _)| a.name.as_str().cmp(b.name.as_str())); prunable_tables @@ -489,6 +490,10 @@ impl Layout { catalog::drop_schema(conn, dst_nsp.as_str())?; } + for (table, _) in &prunable_tables { + catalog::set_last_pruned_block(conn, &self.site, &table.name, req.earliest_block)?; + } + // Analyze the new tables let tables = prunable_tables.iter().map(|(table, _)| *table).collect(); self.analyze_tables(conn, reporter, tables, cancel)?; diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 505f010e137..5fdb48dd03e 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -6,7 +6,7 @@ use test_store::*; use graph::components::store::{ DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, - PruneRequest, PruningStrategy, + PruneRequest, PruningStrategy, VersionStats, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; @@ -622,9 +622,16 @@ fn prune() { } } // We have 5 versions for 3 entities + let stats = VersionStats { + entities: 3, + versions: 5, + tablename: USER.to_ascii_lowercase(), + ratio: 3.0 / 5.0, + last_pruned_block: None, + }; assert_eq!( Some(strategy), - req.strategy(3.0 / 5.0), + req.strategy(&stats), "changing thresholds didn't yield desired strategy" ); store From 57778968dd60f4cc940b91d1e579462b01a4e79b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 17 Mar 2023 11:36:41 -0700 Subject: [PATCH 0060/2104] graph: Update tokio to 1.26.0 --- Cargo.lock | 138 ++++++++++++++++++++++++++++++++--------------- graph/Cargo.toml | 2 +- 2 files changed, 97 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75fce9b770c..53b79938f85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,9 +163,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" @@ -1468,7 +1468,7 @@ checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -2604,9 +2604,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.131" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "linked-hash-map" @@ -2755,24 +2755,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] @@ -2897,15 +2887,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -3115,7 +3096,7 @@ dependencies = [ "libc", "redox_syscall 0.2.10", "smallvec", - "windows-sys", + "windows-sys 0.32.0", ] [[package]] @@ -4170,9 +4151,9 @@ checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -4431,7 +4412,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] @@ -4497,21 +4478,22 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.16.1" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", - "winapi", + "windows-sys 0.45.0", ] [[package]] @@ -5093,6 +5075,12 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.82" @@ -5526,43 +5514,109 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.32.0", + "windows_i686_gnu 0.32.0", + "windows_i686_msvc 0.32.0", + "windows_x86_64_gnu 0.32.0", + "windows_x86_64_msvc 0.32.0", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_msvc" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_i686_gnu" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_msvc" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_x86_64_gnu" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_msvc" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "winreg" version = "0.10.1" diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 3856c5a9e8c..c71c64c94f0 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -44,7 +44,7 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" -tokio = { version = "1.16.1", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.26.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.12", features = ["sync"] } tokio-retry = "0.3.0" url = "2.3.1" From 40fbae62290ed2daae5b3c28b139aad3d4e05701 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 17 Mar 2023 12:09:05 -0700 Subject: [PATCH 0061/2104] store: Move retry logic into its own module --- store/postgres/src/lib.rs | 1 + store/postgres/src/retry.rs | 68 ++++++++++++++++++++++++++++ store/postgres/src/writable.rs | 83 ++++++++-------------------------- 3 files changed, 87 insertions(+), 65 deletions(-) create mode 100644 store/postgres/src/retry.rs diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index 73b081e8ca9..32b673258ef 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -32,6 +32,7 @@ mod primary; pub mod query_store; mod relational; mod relational_queries; +mod retry; mod sql_value; mod store; mod store_events; diff --git a/store/postgres/src/retry.rs b/store/postgres/src/retry.rs new file mode 100644 index 00000000000..d19df52a69b --- /dev/null +++ b/store/postgres/src/retry.rs @@ -0,0 +1,68 @@ +//! Helpers to retry an operation indefinitely with exponential backoff +//! while the database is not available +use std::time::Duration; + +use graph::{ + prelude::StoreError, + slog::{warn, Logger}, + util::backoff::ExponentialBackoff, +}; + +const BACKOFF_BASE: Duration = Duration::from_millis(100); +const BACKOFF_CEIL: Duration = Duration::from_secs(10); + +fn log_backoff_warning(logger: &Logger, op: &str, backoff: &ExponentialBackoff) { + warn!(logger, + "database unavailable, will retry"; + "operation" => op, + "attempt" => backoff.attempt, + "delay_ms" => backoff.delay().as_millis()); +} + +/// Run `f` with exponential backoff until it succeeds or it produces an +/// error other than `DatabaseUnavailable`. In other words, keep retrying +/// `f` until the database is available. +/// +/// Do not use this from an async context since it will block the current +/// thread. Use `forever_async` instead +pub(crate) fn forever(logger: &Logger, op: &str, f: F) -> Result +where + F: Fn() -> Result, +{ + let mut backoff = ExponentialBackoff::new(BACKOFF_BASE, BACKOFF_CEIL); + loop { + match f() { + Ok(v) => return Ok(v), + Err(StoreError::DatabaseUnavailable) => { + log_backoff_warning(logger, op, &backoff); + } + Err(e) => return Err(e), + } + backoff.sleep(); + } +} + +/// Run `f` with exponential backoff until it succeeds or it produces an +/// error other than `DatabaseUnavailable`. In other words, keep retrying +/// `f` until the database is available. +pub(crate) async fn forever_async( + logger: &Logger, + op: &str, + f: F, +) -> Result +where + F: Fn() -> Fut, + Fut: std::future::Future>, +{ + let mut backoff = ExponentialBackoff::new(BACKOFF_BASE, BACKOFF_CEIL); + loop { + match f().await { + Ok(v) => return Ok(v), + Err(StoreError::DatabaseUnavailable) => { + log_backoff_warning(logger, op, &backoff); + } + Err(e) => return Err(e), + } + backoff.sleep_async().await; + } +} diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 226b99bdf0e..62b47b57097 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,7 +1,6 @@ use std::collections::BTreeSet; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Mutex; -use std::time::Duration; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; @@ -23,13 +22,13 @@ use graph::{ BlockPtr, DeploymentHash, EntityModification, Error, Logger, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, ENV_VARS, }, - slog::{error, warn}, - util::backoff::ExponentialBackoff, + slog::error, }; use store::StoredDynamicDataSource; use crate::deployment_store::DeploymentStore; use crate::primary::DeploymentId; +use crate::retry; use crate::{primary, primary::Site, relational::Layout, SubgraphStore}; /// A wrapper around `SubgraphStore` that only exposes functions that are @@ -73,9 +72,6 @@ struct SyncStore { } impl SyncStore { - const BACKOFF_BASE: Duration = Duration::from_millis(100); - const BACKOFF_CEIL: Duration = Duration::from_secs(10); - fn new( subgraph_store: SubgraphStore, logger: Logger, @@ -93,49 +89,6 @@ impl SyncStore { }) } - fn log_backoff_warning(&self, op: &str, backoff: &ExponentialBackoff) { - warn!(self.logger, - "database unavailable, will retry"; - "operation" => op, - "attempt" => backoff.attempt, - "delay_ms" => backoff.delay().as_millis()); - } - - fn retry(&self, op: &str, f: F) -> Result - where - F: Fn() -> Result, - { - let mut backoff = ExponentialBackoff::new(Self::BACKOFF_BASE, Self::BACKOFF_CEIL); - loop { - match f() { - Ok(v) => return Ok(v), - Err(StoreError::DatabaseUnavailable) => { - self.log_backoff_warning(op, &backoff); - } - Err(e) => return Err(e), - } - backoff.sleep(); - } - } - - async fn retry_async(&self, op: &str, f: F) -> Result - where - F: Fn() -> Fut, - Fut: std::future::Future>, - { - let mut backoff = ExponentialBackoff::new(Self::BACKOFF_BASE, Self::BACKOFF_CEIL); - loop { - match f().await { - Ok(v) => return Ok(v), - Err(StoreError::DatabaseUnavailable) => { - self.log_backoff_warning(op, &backoff); - } - Err(e) => return Err(e), - } - backoff.sleep_async().await; - } - } - /// Try to send a `StoreEvent`; if sending fails, log the error but /// return `Ok(())` fn try_send_store_event(&self, event: StoreEvent) -> Result<(), StoreError> { @@ -153,7 +106,7 @@ impl SyncStore { // Methods that mirror `WritableStoreTrait` impl SyncStore { async fn block_ptr(&self) -> Result, StoreError> { - self.retry_async("block_ptr", || { + retry::forever_async(&self.logger, "block_ptr", || { let site = self.site.clone(); async move { self.writable.block_ptr(site).await } }) @@ -168,7 +121,7 @@ impl SyncStore { } fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { - self.retry("start_subgraph_deployment", || { + retry::forever(&self.logger, "start_subgraph_deployment", || { let graft_base = match self.writable.graft_pending(&self.site.deployment)? { Some((base_id, base_ptr)) => { let src = self.store.layout(&base_id)?; @@ -188,7 +141,7 @@ impl SyncStore { block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, ) -> Result<(), StoreError> { - self.retry("revert_block_operations", || { + retry::forever(&self.logger, "revert_block_operations", || { let event = self.writable.revert_block_operations( self.site.clone(), block_ptr_to.clone(), @@ -204,7 +157,7 @@ impl SyncStore { current_ptr: &BlockPtr, parent_ptr: &BlockPtr, ) -> Result { - self.retry("unfail_deterministic_error", || { + retry::forever(&self.logger, "unfail_deterministic_error", || { self.writable .unfail_deterministic_error(self.site.clone(), current_ptr, parent_ptr) }) @@ -214,14 +167,14 @@ impl SyncStore { &self, current_ptr: &BlockPtr, ) -> Result { - self.retry("unfail_non_deterministic_error", || { + retry::forever(&self.logger, "unfail_non_deterministic_error", || { self.writable .unfail_non_deterministic_error(self.site.clone(), current_ptr) }) } async fn fail_subgraph(&self, error: SubgraphError) -> Result<(), StoreError> { - self.retry_async("fail_subgraph", || { + retry::forever_async(&self.logger, "fail_subgraph", || { let error = error.clone(); async { self.writable @@ -234,7 +187,7 @@ impl SyncStore { } async fn supports_proof_of_indexing(&self) -> Result { - self.retry_async("supports_proof_of_indexing", || async { + retry::forever_async(&self.logger, "supports_proof_of_indexing", || async { self.writable .supports_proof_of_indexing(self.site.clone()) .await @@ -243,7 +196,7 @@ impl SyncStore { } fn get(&self, key: &EntityKey, block: BlockNumber) -> Result, StoreError> { - self.retry("get", || { + retry::forever(&self.logger, "get", || { self.writable.get(self.site.cheap_clone(), key, block) }) } @@ -259,7 +212,7 @@ impl SyncStore { manifest_idx_and_name: &[(u32, String)], processed_data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { - self.retry("transact_block_operations", move || { + retry::forever(&self.logger, "transact_block_operations", move || { let event = self.writable.transact_block_operations( &self.logger, self.site.clone(), @@ -292,14 +245,14 @@ impl SyncStore { .push(key.entity_id.into()); } - self.retry("get_many", || { + retry::forever(&self.logger, "get_many", || { self.writable .get_many(self.site.cheap_clone(), &by_type, block) }) } async fn is_deployment_synced(&self) -> Result { - self.retry_async("is_deployment_synced", || async { + retry::forever_async(&self.logger, "is_deployment_synced", || async { self.writable .exists_and_synced(self.site.deployment.cheap_clone()) .await @@ -308,7 +261,7 @@ impl SyncStore { } fn unassign_subgraph(&self, site: &Site) -> Result<(), StoreError> { - self.retry("unassign_subgraph", || { + retry::forever(&self.logger, "unassign_subgraph", || { let pconn = self.store.primary_conn()?; pconn.transaction(|| -> Result<_, StoreError> { let changes = pconn.unassign_subgraph(site)?; @@ -322,7 +275,7 @@ impl SyncStore { block: BlockNumber, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { - self.retry_async("load_dynamic_data_sources", || async { + retry::forever_async(&self.logger, "load_dynamic_data_sources", || async { self.writable .load_dynamic_data_sources( self.site.cheap_clone(), @@ -337,7 +290,7 @@ impl SyncStore { pub(crate) async fn causality_region_curr_val( &self, ) -> Result, StoreError> { - self.retry_async("causality_region_curr_val", || async { + retry::forever_async(&self.logger, "causality_region_curr_val", || async { self.writable .causality_region_curr_val(self.site.cheap_clone()) .await @@ -354,7 +307,7 @@ impl SyncStore { } fn deployment_synced(&self) -> Result<(), StoreError> { - self.retry("deployment_synced", || { + retry::forever(&self.logger, "deployment_synced", || { let event = { // Make sure we drop `pconn` before we call into the deployment // store so that we do not hold two database connections which @@ -395,7 +348,7 @@ impl SyncStore { } async fn health(&self) -> Result { - self.retry_async("health", || async { + retry::forever_async(&self.logger, "health", || async { self.writable.health(&self.site).await.map(Into::into) }) .await From 8f9457654c8cd7136b3ea429d12dc4873258dd39 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 16 Mar 2023 19:18:17 -0700 Subject: [PATCH 0062/2104] graph, store: Perform pruning in a separate task --- graph/src/components/store/err.rs | 2 + graph/src/components/store/mod.rs | 1 + store/postgres/src/deployment_store.rs | 99 +++++++++++++++++++++++--- 3 files changed, 94 insertions(+), 8 deletions(-) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index 7187e6a687a..e20c1f9915b 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -58,6 +58,8 @@ pub enum StoreError { DeploymentSchemaVersion::LATEST )] UnsupportedDeploymentSchemaVersion(i32), + #[error("pruning failed: {0}")] + PruneFailure(String), } // Convenience to report a constraint violation diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index b19601f604d..82482a03cab 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1204,6 +1204,7 @@ pub enum PruningStrategy { Delete, } +#[derive(Copy, Clone)] /// A request to prune a deployment. This struct encapsulates decision /// making around the best strategy for pruning (deleting historical /// entities or copying current ones) It needs to be filled with accurate diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 9bc42dae19e..5acc04f00c7 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -18,6 +18,7 @@ use graph::prelude::{ SubgraphDeploymentEntity, }; use graph::semver::Version; +use graph::tokio::task::{JoinError, JoinHandle}; use itertools::Itertools; use lru_time_cache::LruCache; use rand::{seq::SliceRandom, thread_rng}; @@ -45,7 +46,6 @@ use graph_graphql::prelude::api_schema; use web3::types::Address; use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; -use crate::catalog; use crate::deployment::{self, OnSync}; use crate::detail::ErrorDetail; use crate::dynds::DataSourcesTable; @@ -53,6 +53,7 @@ use crate::primary::DeploymentId; use crate::relational::index::{CreateIndex, Method}; use crate::relational::{Layout, LayoutCache, SqlName, Table}; use crate::relational_queries::FromEntityData; +use crate::{catalog, retry}; use crate::{connection_pool::ConnectionPool, detail}; use crate::{dynds, primary::Site}; @@ -87,6 +88,8 @@ pub(crate) struct SubgraphInfo { pub(crate) instrument: bool, } +type PruneHandle = JoinHandle>; + pub struct StoreInner { logger: Logger, @@ -108,6 +111,8 @@ pub struct StoreInner { /// hosts this because it lives long enough, but it is managed from /// the entities module pub(crate) layout_cache: LayoutCache, + + prune_handles: Mutex>, } /// Storage of the data for individual deployments. Each `DeploymentStore` @@ -163,6 +168,7 @@ impl DeploymentStore { conn_round_robin_counter: AtomicUsize::new(0), subgraph_cache: Mutex::new(LruCache::with_capacity(100)), layout_cache: LayoutCache::new(ENV_VARS.store.query_stats_refresh_interval), + prune_handles: Mutex::new(HashMap::new()), }; DeploymentStore(Arc::new(store)) @@ -1191,23 +1197,100 @@ impl DeploymentStore { > earliest_block as f64 + layout.history_blocks as f64 * ENV_VARS.store.history_slack_factor { + // This only measures how long it takes to spawn pruning, not + // how long pruning itself takes let _section = stopwatch.start_section("transact_blocks_prune"); - let this = self.clone(); - let reporter = OngoingPruneReporter::new(logger.cheap_clone()); + self.spawn_prune( + logger, + site, + layout.history_blocks, + earliest_block, + block_ptr_to.number, + )?; + } + + Ok(event) + } + + fn spawn_prune( + self: &Arc, + logger: &Logger, + site: Arc, + history_blocks: BlockNumber, + earliest_block: BlockNumber, + latest_block: BlockNumber, + ) -> Result<(), StoreError> { + fn prune_in_progress(store: &DeploymentStore, site: &Site) -> Result { + async fn reap(handle: PruneHandle) -> Result, JoinError> { + handle.await.map(|join| join.map(|_| ())) + } + let finished = store + .prune_handles + .lock() + .unwrap() + .get(&site.id) + .map(|handle| handle.is_finished()); + match finished { + Some(true) => { + // A previous prune has finished + let handle = store + .prune_handles + .lock() + .unwrap() + .remove(&site.id) + .unwrap(); + match graph::block_on(reap(handle)) { + Ok(Ok(())) => Ok(false), + Ok(Err(err)) => Err(StoreError::PruneFailure(err.to_string())), + Err(join_err) => Err(StoreError::PruneFailure(join_err.to_string())), + } + } + Some(false) => { + // A previous prune is still in progress + Ok(true) + } + None => { + // There is no prune in progress + Ok(false) + } + } + } + + async fn run( + logger: Logger, + store: Arc, + site: Arc, + req: PruneRequest, + ) -> Result<(), StoreError> { + let logger = logger.cheap_clone(); + retry::forever_async(&logger, "prune", move || { + let store = store.cheap_clone(); + let reporter = OngoingPruneReporter::new(store.logger.cheap_clone()); + let site = site.cheap_clone(); + async move { store.prune(reporter, site, req).await.map(|_| ()) } + }) + .await + } + + if !prune_in_progress(&self, &site)? { let req = PruneRequest::new( &site.as_ref().into(), - layout.history_blocks, + history_blocks, ENV_VARS.reorg_threshold, earliest_block, - block_ptr_to.number, + latest_block, )?; - graph::block_on(this.prune(reporter, site, req))?; + let deployment_id = site.id; + let handle = graph::spawn(run(logger.cheap_clone(), self.clone(), site, req)); + self.prune_handles + .lock() + .unwrap() + .insert(deployment_id, handle); } - - Ok(event) + Ok(()) } fn rewind_with_conn( From d725a0797d62bdab0d973412bbc2d31fd2004214 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 17 Mar 2023 13:07:07 -0700 Subject: [PATCH 0063/2104] store: Refactor advisory_lock to reduce code duplication --- store/postgres/src/advisory_lock.rs | 90 +++++++++++++++++++---------- 1 file changed, 60 insertions(+), 30 deletions(-) diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index ead57b35532..f588865442d 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -19,9 +19,56 @@ use diesel::{sql_query, PgConnection, RunQueryDsl}; use graph::prelude::StoreError; use crate::command_support::catalog::Site; +use crate::primary::DeploymentId; -/// Get a lock for running migrations. Blocks until we get -/// the lock. +/// A locking scope for a particular deployment. We use different scopes for +/// different purposes, and in each scope we use an advisory lock for each +/// deployment. +struct Scope { + id: i32, +} + +impl Scope { + /// Try to lock the deployment in this scope with the given id. Return + /// `true` if we got the lock, and `false` if it is already locked. + fn try_lock(&self, conn: &PgConnection, id: DeploymentId) -> Result { + #[derive(QueryableByName)] + struct Locked { + #[sql_type = "Bool"] + locked: bool, + } + + sql_query(format!( + "select pg_try_advisory_lock({}, {id}) as locked", + self.id + )) + .get_result::(conn) + .map(|res| res.locked) + .map_err(StoreError::from) + } + + /// Lock the deployment in this scope with the given id. Blocks until we + /// can get the lock + fn lock(&self, conn: &PgConnection, id: DeploymentId) -> Result<(), StoreError> { + sql_query(format!("select pg_advisory_lock({}, {id})", self.id)) + .execute(conn) + .map(|_| ()) + .map_err(StoreError::from) + } + + /// Unlock the deployment in this scope with the given id. + fn unlock(&self, conn: &PgConnection, id: DeploymentId) -> Result<(), StoreError> { + sql_query(format!("select pg_advisory_unlock({}, {id})", self.id)) + .execute(conn) + .map(|_| ()) + .map_err(StoreError::from) + } +} + +const COPY: Scope = Scope { id: 1 }; +const WRITE: Scope = Scope { id: 2 }; + +/// Get a lock for running migrations. Blocks until we get the lock. pub(crate) fn lock_migration(conn: &PgConnection) -> Result<(), StoreError> { sql_query("select pg_advisory_lock(1)").execute(conn)?; @@ -34,40 +81,26 @@ pub(crate) fn unlock_migration(conn: &PgConnection) -> Result<(), StoreError> { Ok(()) } +/// Take the lock used to keep two copy operations to run simultaneously on +/// the same deployment. Block until we can get the lock pub(crate) fn lock_copying(conn: &PgConnection, dst: &Site) -> Result<(), StoreError> { - sql_query(format!("select pg_advisory_lock(1, {})", dst.id)) - .execute(conn) - .map(|_| ()) - .map_err(StoreError::from) + COPY.lock(conn, dst.id) } +/// Release the lock acquired with `lock_copying`. pub(crate) fn unlock_copying(conn: &PgConnection, dst: &Site) -> Result<(), StoreError> { - sql_query(format!("select pg_advisory_unlock(1, {})", dst.id)) - .execute(conn) - .map(|_| ()) - .map_err(StoreError::from) + COPY.unlock(conn, dst.id) } -/// Try to lock deployment `site` with a session lock. Return `true` if we -/// got the lock, and `false` if we did not. You don't want to use this -/// directly. Instead, use `deployment::with_lock` +/// Take the lock used to keep two operations from writing to the deployment +/// simultaneously. Return `true` if we got the lock, and `false` if we did +/// not. You don't want to use this directly. Instead, use +/// `deployment::with_lock` pub(crate) fn lock_deployment_session( conn: &PgConnection, site: &Site, ) -> Result { - #[derive(QueryableByName)] - struct Locked { - #[sql_type = "Bool"] - locked: bool, - } - - sql_query(format!( - "select pg_try_advisory_lock(2, {}) as locked", - site.id - )) - .get_result::(conn) - .map(|res| res.locked) - .map_err(StoreError::from) + WRITE.try_lock(conn, site.id) } /// Release the lock acquired with `lock_deployment_session`. @@ -75,8 +108,5 @@ pub(crate) fn unlock_deployment_session( conn: &PgConnection, site: &Site, ) -> Result<(), StoreError> { - sql_query(format!("select pg_advisory_unlock(2, {})", site.id)) - .execute(conn) - .map(|_| ()) - .map_err(StoreError::from) + WRITE.unlock(conn, site.id) } From 9e6be9577838b133d696214af4e2e3593c46f736 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 17 Mar 2023 13:24:40 -0700 Subject: [PATCH 0064/2104] store: Lock pruning at the database level --- store/postgres/src/advisory_lock.rs | 11 ++++++++++ store/postgres/src/deployment_store.rs | 28 ++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index f588865442d..9a1091b782c 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -67,6 +67,7 @@ impl Scope { const COPY: Scope = Scope { id: 1 }; const WRITE: Scope = Scope { id: 2 }; +const PRUNE: Scope = Scope { id: 3 }; /// Get a lock for running migrations. Blocks until we get the lock. pub(crate) fn lock_migration(conn: &PgConnection) -> Result<(), StoreError> { @@ -110,3 +111,13 @@ pub(crate) fn unlock_deployment_session( ) -> Result<(), StoreError> { WRITE.unlock(conn, site.id) } + +/// Try to take the lock used to prevent two prune operations from running at the +/// same time. Return `true` if we got the lock, and `false` otherwise. +pub(crate) fn try_lock_pruning(conn: &PgConnection, site: &Site) -> Result { + PRUNE.try_lock(conn, site.id) +} + +pub(crate) fn unlock_pruning(conn: &PgConnection, site: &Site) -> Result<(), StoreError> { + PRUNE.unlock(conn, site.id) +} diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 5acc04f00c7..0ac03b228ef 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -53,7 +53,7 @@ use crate::primary::DeploymentId; use crate::relational::index::{CreateIndex, Method}; use crate::relational::{Layout, LayoutCache, SqlName, Table}; use crate::relational_queries::FromEntityData; -use crate::{catalog, retry}; +use crate::{advisory_lock, catalog, retry}; use crate::{connection_pool::ConnectionPool, detail}; use crate::{dynds, primary::Site}; @@ -904,12 +904,18 @@ impl DeploymentStore { pub(crate) async fn prune( self: &Arc, - mut reporter: Box, + reporter: Box, site: Arc, req: PruneRequest, ) -> Result, StoreError> { - let store = self.clone(); - self.with_conn(move |conn, cancel| { + fn do_prune( + store: Arc, + conn: &PooledConnection>, + site: Arc, + cancel: &CancelHandle, + req: PruneRequest, + mut reporter: Box, + ) -> Result, CancelableError> { let layout = store.layout(conn, site.clone())?; cancel.check_cancel()?; let state = deployment::state(conn, site.deployment.clone())?; @@ -933,6 +939,20 @@ impl DeploymentStore { layout.prune(&store.logger, reporter.as_mut(), conn, &req, cancel)?; Ok(reporter) + } + + let store = self.clone(); + self.with_conn(move |conn, cancel| { + // We lock pruning for this deployment to make sure that if the + // deployment is reassigned to another node, that node won't + // kick off a pruning run while this node might still be pruning + if advisory_lock::try_lock_pruning(conn, &site)? { + let res = do_prune(store, conn, site.cheap_clone(), cancel, req, reporter); + advisory_lock::unlock_pruning(conn, &site)?; + res + } else { + Ok(reporter) + } }) .await } From aeb7a761412c97629c084bd2c65e8cf31b251691 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 10:42:24 -0700 Subject: [PATCH 0065/2104] graph: Fix name of GRAPH_STORE_HISTORY_DELETE_THRESHOLD env var --- graph/src/env/store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 113755259bc..f89f394bf17 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -182,7 +182,7 @@ pub struct InnerStore { batch_target_duration_in_secs: u64, #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.5")] copy_threshold: ZeroToOneF64, - #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.05")] + #[envconfig(from = "GRAPH_STORE_HISTORY_DELETE_THRESHOLD", default = "0.05")] delete_threshold: ZeroToOneF64, #[envconfig(from = "GRAPH_STORE_HISTORY_SLACK_FACTOR", default = "1.2")] history_slack_factor: HistorySlackF64, From 78c22a408d2598c976aad9cc7d960c69a517cdcb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 10:45:59 -0700 Subject: [PATCH 0066/2104] store: Fix names of debug comments in prune queries --- store/postgres/src/relational/prune.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index b6c68cc6493..80b06b9af93 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -134,7 +134,7 @@ impl TablePair { // The conditions on `block_range` are expressed redundantly // to make more indexes useable sql_query(format!( - "/* controller=prune,phase=final,start_vid={next_vid},next_vid={batch_size} */ \ + "/* controller=prune,phase=final,start_vid={next_vid},batch_size={batch_size} */ \ insert into {dst}({column_list}) \ select {column_list} from {src} \ where lower(block_range) <= $2 \ @@ -197,7 +197,7 @@ impl TablePair { // The conditions on `block_range` are expressed redundantly // to make more indexes useable sql_query(format!( - "/* controller=prune,phase=nonfinal,start_vid={next_vid},next_vid={batch_size} */ \ + "/* controller=prune,phase=nonfinal,start_vid={next_vid},batch_size={batch_size} */ \ insert into {dst}({column_list}) \ select {column_list} from {src} \ where coalesce(upper(block_range), 2147483647) > $1 \ @@ -458,7 +458,7 @@ impl Layout { while next_vid <= max_vid { let start = Instant::now(); let rows = sql_query(format!( - "/* controller=prune,phase=delete,next_vid={next_vid},batch_size={batch_size} */ \ + "/* controller=prune,phase=delete,start_vid={next_vid},batch_size={batch_size} */ \ delete from {qname} \ where coalesce(upper(block_range), 2147483647) <= $1 \ and vid >= $2 and vid < $2 + $3", From 577a350041a6ee2b7a349d4e28415075a745eb26 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 10:49:06 -0700 Subject: [PATCH 0067/2104] graph: Require that we keep strictly more than reorg_threshold history Also, remove a check that could never fail --- graph/src/components/store/mod.rs | 4 ++-- store/postgres/src/deployment_store.rs | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 82482a03cab..a1c42d5f1bf 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1255,12 +1255,12 @@ impl PruneRequest { "the delete threshold must be between 0 and 1 but is {delete_threshold}" )); } - if history_blocks < reorg_threshold { + if history_blocks <= reorg_threshold { return Err(constraint_violation!( "the deployment {} needs to keep at least {} blocks \ of history and can't be pruned to only {} blocks of history", deployment, - reorg_threshold, + reorg_threshold + 1, history_blocks )); } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 0ac03b228ef..577218f329e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -888,12 +888,6 @@ impl DeploymentStore { )); } - if history_blocks <= 0 { - return Err(constraint_violation!( - "history_blocks must be a positive number" - )); - } - // Invalidate the layout cache for this site so that the next access // will use the updated value self.layout_cache.remove(site); From 33557076e2317ab43825c45e952b27d1545e481c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 11:12:01 -0700 Subject: [PATCH 0068/2104] store: Use now_or_never to reap join handle of prune task --- store/postgres/src/deployment_store.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 577218f329e..d9d13e94ebb 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -13,12 +13,13 @@ use graph::components::versions::VERSIONS; use graph::data::query::Trace; use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; use graph::data_source::CausalityRegion; +use graph::prelude::futures03::FutureExt; use graph::prelude::{ tokio, ApiVersion, CancelHandle, CancelToken, CancelableError, EntityOperation, PoolWaitStats, SubgraphDeploymentEntity, }; use graph::semver::Version; -use graph::tokio::task::{JoinError, JoinHandle}; +use graph::tokio::task::JoinHandle; use itertools::Itertools; use lru_time_cache::LruCache; use rand::{seq::SliceRandom, thread_rng}; @@ -1236,10 +1237,6 @@ impl DeploymentStore { latest_block: BlockNumber, ) -> Result<(), StoreError> { fn prune_in_progress(store: &DeploymentStore, site: &Site) -> Result { - async fn reap(handle: PruneHandle) -> Result, JoinError> { - handle.await.map(|join| join.map(|_| ())) - } - let finished = store .prune_handles .lock() @@ -1255,10 +1252,13 @@ impl DeploymentStore { .unwrap() .remove(&site.id) .unwrap(); - match graph::block_on(reap(handle)) { - Ok(Ok(())) => Ok(false), - Ok(Err(err)) => Err(StoreError::PruneFailure(err.to_string())), - Err(join_err) => Err(StoreError::PruneFailure(join_err.to_string())), + match FutureExt::now_or_never(handle) { + Some(Ok(Ok(()))) => Ok(false), + Some(Ok(Err(err))) => Err(StoreError::PruneFailure(err.to_string())), + Some(Err(join_err)) => Err(StoreError::PruneFailure(join_err.to_string())), + None => Err(constraint_violation!( + "prune handle is finished but not ready" + )), } } Some(false) => { From 6f4485695d258cb19dd607fe9e4b74bf9a799a52 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 11:35:31 -0700 Subject: [PATCH 0069/2104] store: Never move `earliest_block` backwards --- store/postgres/src/deployment.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 8a19cd21bf4..17f172002e6 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1146,6 +1146,11 @@ pub fn set_entity_count( Ok(()) } +/// Set the earliest block of `site` to the larger of `earliest_block` and +/// the current value. This means that the `earliest_block_number` can never +/// go backwards, only forward. This is important so that copying into +/// `site` can not move the earliest block backwards if `site` was also +/// pruned while the copy was running. pub fn set_earliest_block( conn: &PgConnection, site: &Site, @@ -1155,6 +1160,7 @@ pub fn set_earliest_block( update(d::table.filter(d::id.eq(site.id))) .set(d::earliest_block_number.eq(earliest_block)) + .filter(d::earliest_block_number.lt(earliest_block)) .execute(conn)?; Ok(()) } From 9dd482812f153d8d59ce75504ee000fb3ed71761 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 23 Mar 2023 11:49:27 -0700 Subject: [PATCH 0070/2104] graph, store: Retain history_blocks setting when copying --- graph/src/data/subgraph/schema.rs | 2 ++ store/postgres/src/deployment.rs | 2 ++ store/postgres/src/deployment_store.rs | 6 ++++++ store/postgres/src/detail.rs | 1 + 4 files changed, 11 insertions(+) diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index 67717c03964..e877ecb5c6d 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -180,6 +180,7 @@ pub struct SubgraphManifestEntity { pub schema: String, pub raw_yaml: Option, pub entities_with_causality_region: Vec, + pub history_blocks: BlockNumber, } impl SubgraphManifestEntity { @@ -196,6 +197,7 @@ impl SubgraphManifestEntity { schema: manifest.schema.document.clone().to_string(), raw_yaml: Some(raw_yaml), entities_with_causality_region, + history_blocks: BLOCK_NUMBER_MAX, } } diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 17f172002e6..ce422a8f0c8 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1047,6 +1047,7 @@ pub fn create_deployment( schema, raw_yaml, entities_with_causality_region, + history_blocks, }, start_block, graft_base, @@ -1091,6 +1092,7 @@ pub fn create_deployment( m::start_block_number.eq(start_block.as_ref().map(|ptr| ptr.number)), m::raw_yaml.eq(raw_yaml), m::entities_with_causality_region.eq(entities_with_causality_region), + m::history_blocks.eq(history_blocks), ); if exists && replace { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index d9d13e94ebb..d7fdae68658 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1583,6 +1583,12 @@ impl DeploymentStore { src_deployment.earliest_block_number, )?; + deployment::set_history_blocks( + &conn, + &dst.site, + src_deployment.manifest.history_blocks, + )?; + // Analyze all tables for this deployment for entity_name in dst.tables.keys() { self.analyze_with_conn(site.cheap_clone(), entity_name.as_str(), &conn)?; diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index e59df301f6c..4ca5f6de830 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -354,6 +354,7 @@ impl From for SubgraphManifestEntity { schema: value.schema, raw_yaml: value.raw_yaml, entities_with_causality_region: value.entities_with_causality_region, + history_blocks: value.history_blocks, } } } From 966760aefe65b46b8f24c57514da99be40bc6766 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 27 Mar 2023 17:45:06 -0700 Subject: [PATCH 0071/2104] store: Map two more metadata tables across shards --- .../migrations/2023-03-28-004152_trigger_remap/down.sql | 2 ++ .../postgres/migrations/2023-03-28-004152_trigger_remap/up.sql | 3 +++ store/postgres/src/connection_pool.rs | 2 ++ 3 files changed, 7 insertions(+) create mode 100644 store/postgres/migrations/2023-03-28-004152_trigger_remap/down.sql create mode 100644 store/postgres/migrations/2023-03-28-004152_trigger_remap/up.sql diff --git a/store/postgres/migrations/2023-03-28-004152_trigger_remap/down.sql b/store/postgres/migrations/2023-03-28-004152_trigger_remap/down.sql new file mode 100644 index 00000000000..bba31ad7e3f --- /dev/null +++ b/store/postgres/migrations/2023-03-28-004152_trigger_remap/down.sql @@ -0,0 +1,2 @@ +-- No schema changes, migration is only there to trigger remapping of +-- foreign metadata diff --git a/store/postgres/migrations/2023-03-28-004152_trigger_remap/up.sql b/store/postgres/migrations/2023-03-28-004152_trigger_remap/up.sql new file mode 100644 index 00000000000..c88c945ef6e --- /dev/null +++ b/store/postgres/migrations/2023-03-28-004152_trigger_remap/up.sql @@ -0,0 +1,3 @@ +-- No schema changes, migration is only there to trigger remapping of +-- foreign metadata +select 1; diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index b1749abead2..eff1560a347 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -202,6 +202,8 @@ impl ForeignServer { "subgraph_deployment_assignment", "subgraph", "subgraph_version", + "subgraph_deployment", + "subgraph_manifest", ] { let create_stmt = catalog::create_foreign_table(conn, "subgraphs", table_name, &nsp, &self.name)?; From 48c1bab391e36a68424f3c34708754a22ed51ec8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 27 Mar 2023 18:07:30 -0700 Subject: [PATCH 0072/2104] store: Add helper ForeignServer::metadata_schema_in --- store/postgres/src/catalog.rs | 6 +----- store/postgres/src/connection_pool.rs | 16 ++++++++++++++-- store/postgres/src/deployment.rs | 6 +----- store/postgres/src/dynds/shared.rs | 6 +----- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index de477cab182..1cd4e51e165 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -441,11 +441,7 @@ pub fn set_account_like( } pub fn copy_account_like(conn: &PgConnection, src: &Site, dst: &Site) -> Result { - let src_nsp = if src.shard == dst.shard { - "subgraphs".to_string() - } else { - ForeignServer::metadata_schema(&src.shard) - }; + let src_nsp = ForeignServer::metadata_schema_in(&src.shard, &dst.shard); let query = format!( "insert into subgraphs.table_stats(deployment, table_name, is_account_like, last_pruned_block) select $2 as deployment, ts.table_name, ts.is_account_like, ts.last_pruned_block diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index eff1560a347..7c1cb69f7fa 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -54,12 +54,24 @@ impl ForeignServer { format!("shard_{}", shard.as_str()) } - /// The name of the schema under which the `subgraphs` schema for `shard` - /// is accessible in shards that are not `shard` + /// The name of the schema under which the `subgraphs` schema for + /// `shard` is accessible in shards that are not `shard`. In most cases + /// you actually want to use `metadata_schema_in` pub fn metadata_schema(shard: &Shard) -> String { format!("{}_subgraphs", Self::name(shard)) } + /// The name of the schema under which the `subgraphs` schema for + /// `shard` is accessible in the shard `current`. It is permissible for + /// `shard` and `current` to be the same. + pub fn metadata_schema_in(shard: &Shard, current: &Shard) -> String { + if shard == current { + "subgraphs".to_string() + } else { + Self::metadata_schema(&shard) + } + } + pub fn new_from_raw(shard: String, postgres_url: &str) -> Result { Self::new(Shard::new(shard)?, postgres_url) } diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index ce422a8f0c8..a0d6829f3c0 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -944,11 +944,7 @@ pub(crate) fn copy_errors( ) -> Result { use subgraph_error as e; - let src_nsp = if src.shard == dst.shard { - "subgraphs".to_string() - } else { - ForeignServer::metadata_schema(&src.shard) - }; + let src_nsp = ForeignServer::metadata_schema_in(&src.shard, &dst.shard); // Check whether there are any errors for dst which indicates we already // did copy diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index ee9f73dfb9d..45f3108806a 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -183,11 +183,7 @@ pub(crate) fn copy( return Ok(0); } - let src_nsp = if src.shard == dst.shard { - "subgraphs".to_string() - } else { - ForeignServer::metadata_schema(&src.shard) - }; + let src_nsp = ForeignServer::metadata_schema_in(&src.shard, &dst.shard); // Check whether there are any dynamic data sources for dst which // indicates we already did copy From a337ee7c0565d9999955574aa8799c6f6ab05b0a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 27 Mar 2023 18:14:10 -0700 Subject: [PATCH 0073/2104] store: Copy earliest block from source late in the copy flow Fixes https://github.com/graphprotocol/graph-node/issues/4496 --- store/postgres/src/deployment.rs | 20 ++++++++++++++++++++ store/postgres/src/deployment_store.rs | 15 +++++++++------ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index a0d6829f3c0..865722b5f92 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1163,6 +1163,26 @@ pub fn set_earliest_block( Ok(()) } +/// Copy the `earliest_block` attribute from `src` to `dst`. The copy might +/// go across shards and use the metadata tables mapped into the shard for +/// `conn` which must be the shard for `dst` +pub fn copy_earliest_block(conn: &PgConnection, src: &Site, dst: &Site) -> Result<(), StoreError> { + use subgraph_deployment as d; + + let src_nsp = ForeignServer::metadata_schema_in(&src.shard, &dst.shard); + + let query = format!( + "(select earliest_block_number from {src_nsp}.subgraph_deployment where id = {})", + src.id + ); + + update(d::table.filter(d::id.eq(dst.id))) + .set(d::earliest_block_number.eq(sql(&query))) + .execute(conn)?; + + Ok(()) +} + pub fn on_sync(conn: &PgConnection, id: impl Into) -> Result { use subgraph_manifest as m; diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index d7fdae68658..d40e48c68ef 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1577,12 +1577,6 @@ impl DeploymentStore { info!(logger, "Counted the entities"; "time_ms" => start.elapsed().as_millis()); - deployment::set_earliest_block( - &conn, - &dst.site, - src_deployment.earliest_block_number, - )?; - deployment::set_history_blocks( &conn, &dst.site, @@ -1594,6 +1588,15 @@ impl DeploymentStore { self.analyze_with_conn(site.cheap_clone(), entity_name.as_str(), &conn)?; } + // The `earliest_block` for `src` might have changed while + // we did the copy if `src` was pruned while we copied; + // adjusting it very late in the copy process ensures that + // we truly do have all the data starting at + // `earliest_block` and do not inadvertently expose data + // that might be incomplete because a prune on the source + // removed data just before we copied it + deployment::copy_earliest_block(&conn, &src.site, &dst.site)?; + // Set the block ptr to the graft point to signal that we successfully // performed the graft crate::deployment::forward_block_ptr(&conn, &dst.site.deployment, &block)?; From 7f67bb8836241c4304d5f8c098e39d2eb3708743 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Tue, 28 Mar 2023 19:56:20 +0100 Subject: [PATCH 0074/2104] graph-chain-ethereum: Avoid adapters with errors (#4468) * graph-chain-ethereum: Avoid adapters with errors - Wire EndpointMetrics for rpc endpoints - Use a percentage of traffic to retest errored adapters - firehose endpoint dedup with provider --- Cargo.lock | 1 + chain/ethereum/Cargo.toml | 1 + chain/ethereum/src/adapter.rs | 2 - chain/ethereum/src/ethereum_adapter.rs | 18 +- chain/ethereum/src/network.rs | 382 +++++++++++++++--- chain/ethereum/src/transport.rs | 66 ++- .../src/blockchain/firehose_block_ingestor.rs | 2 +- graph/src/blockchain/firehose_block_stream.rs | 2 +- .../src/blockchain/substreams_block_stream.rs | 2 +- graph/src/firehose/endpoints.rs | 31 +- node/src/bin/manager.rs | 4 +- node/src/chain.rs | 46 ++- node/src/config.rs | 19 +- node/src/main.rs | 15 +- node/src/manager/commands/config.rs | 7 +- node/src/manager/commands/run.rs | 15 +- 16 files changed, 471 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53b79938f85..feb30dbac0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1655,6 +1655,7 @@ dependencies = [ "test-store", "tiny-keccak 1.5.0", "tonic-build", + "uuid", ] [[package]] diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index 5d813d0b825..170b9f554cc 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -27,6 +27,7 @@ graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] test-store = { path = "../../store/test-store" } base64 = "0.20.0" +uuid = { version = "1.3.0", features = ["v4"] } [build-dependencies] tonic-build = { workspace = true } diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 3ffa23f1f32..2319584a3e6 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -828,8 +828,6 @@ impl SubgraphEthRpcMetrics { /// or a remote node over RPC. #[async_trait] pub trait EthereumAdapter: Send + Sync + 'static { - fn url_hostname(&self) -> &str; - /// The `provider.label` from the adapter's configuration fn provider(&self) -> &str; diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 384daacdb12..a7a7500297b 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -60,8 +60,6 @@ use crate::{ #[derive(Debug, Clone)] pub struct EthereumAdapter { logger: Logger, - url_hostname: Arc, - /// The label for the provider from the configuration provider: String, web3: Arc>, metrics: Arc, @@ -84,7 +82,6 @@ impl CheapClone for EthereumAdapter { Self { logger: self.logger.clone(), provider: self.provider.clone(), - url_hostname: self.url_hostname.cheap_clone(), web3: self.web3.cheap_clone(), metrics: self.metrics.cheap_clone(), supports_eip_1898: self.supports_eip_1898, @@ -101,19 +98,11 @@ impl EthereumAdapter { pub async fn new( logger: Logger, provider: String, - url: &str, transport: Transport, provider_metrics: Arc, supports_eip_1898: bool, call_only: bool, ) -> Self { - // Unwrap: The transport was constructed with this url, so it is valid and has a host. - let hostname = graph::url::Url::parse(url) - .unwrap() - .host_str() - .unwrap() - .to_string(); - let web3 = Arc::new(Web3::new(transport)); // Use the client version to check if it is ganache. For compatibility with unit tests, be @@ -128,7 +117,6 @@ impl EthereumAdapter { EthereumAdapter { logger, provider, - url_hostname: Arc::new(hostname), web3, metrics: provider_metrics, supports_eip_1898: supports_eip_1898 && !is_ganache, @@ -840,10 +828,6 @@ impl EthereumAdapter { #[async_trait] impl EthereumAdapterTrait for EthereumAdapter { - fn url_hostname(&self) -> &str { - &self.url_hostname - } - fn provider(&self) -> &str { &self.provider } @@ -1422,7 +1406,7 @@ pub(crate) async fn blocks_with_triggers( None => { warn!(logger, "Ethereum endpoint is behind"; - "url" => eth.url_hostname() + "url" => eth.provider() ); bail!("Block {} not found in the chain", to) } diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 0b90ab6e1be..0e4303a9ade 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -1,7 +1,9 @@ -use anyhow::{anyhow, bail, Context}; +use anyhow::{anyhow, bail}; use graph::cheap_clone::CheapClone; -use graph::firehose::SubgraphLimit; -use graph::prelude::rand::{self, seq::IteratorRandom}; +use graph::endpoint::EndpointMetrics; +use graph::firehose::{AvailableCapacity, SubgraphLimit}; +use graph::prelude::rand::seq::IteratorRandom; +use graph::prelude::rand::{self, Rng}; use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; @@ -13,8 +15,11 @@ use crate::adapter::EthereumAdapter as _; use crate::capabilities::NodeCapabilities; use crate::EthereumAdapter; +pub const DEFAULT_ADAPTER_ERROR_RETEST_PERCENT: f64 = 0.2; + #[derive(Debug, Clone)] pub struct EthereumNetworkAdapter { + endpoint_metrics: Arc, pub capabilities: NodeCapabilities, adapter: Arc, /// The maximum number of times this adapter can be used. We use the @@ -28,15 +33,42 @@ impl EthereumNetworkAdapter { fn is_call_only(&self) -> bool { self.adapter.is_call_only() } + + pub fn get_capacity(&self) -> AvailableCapacity { + self.limit.get_capacity(Arc::strong_count(&self.adapter)) + } + + pub fn current_error_count(&self) -> u64 { + self.endpoint_metrics.get_count(&self.provider().into()) + } + pub fn provider(&self) -> &str { + self.adapter.provider() + } } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct EthereumNetworkAdapters { pub adapters: Vec, - pub call_only_adapters: Vec, + call_only_adapters: Vec, + // Percentage of request that should be used to retest errored adapters. + retest_percent: f64, +} + +impl Default for EthereumNetworkAdapters { + fn default() -> Self { + Self::new(None) + } } impl EthereumNetworkAdapters { + pub fn new(retest_percent: Option) -> Self { + Self { + adapters: vec![], + call_only_adapters: vec![], + retest_percent: retest_percent.unwrap_or(DEFAULT_ADAPTER_ERROR_RETEST_PERCENT), + } + } + pub fn push_adapter(&mut self, adapter: EthereumNetworkAdapter) { if adapter.is_call_only() { self.call_only_adapters.push(adapter); @@ -47,7 +79,7 @@ impl EthereumNetworkAdapters { pub fn all_cheapest_with( &self, required_capabilities: &NodeCapabilities, - ) -> impl Iterator> + '_ { + ) -> impl Iterator + '_ { let cheapest_sufficient_capability = self .adapters .iter() @@ -57,27 +89,38 @@ impl EthereumNetworkAdapters { self.adapters .iter() .filter(move |adapter| Some(&adapter.capabilities) == cheapest_sufficient_capability) - .filter(|adapter| { - adapter - .limit - .has_capacity(Arc::strong_count(&adapter.adapter)) - }) - .map(|adapter| adapter.adapter.cheap_clone()) + .filter(|adapter| adapter.get_capacity() > AvailableCapacity::Unavailable) } pub fn cheapest_with( &self, required_capabilities: &NodeCapabilities, ) -> Result, Error> { - // Select randomly from the cheapest adapters that have sufficent capabilities. - self.all_cheapest_with(required_capabilities) - .choose(&mut rand::thread_rng()) - .with_context(|| { - anyhow!( - "A matching Ethereum network with {:?} was not found.", - required_capabilities - ) - }) + let retest_rng: f64 = (&mut rand::thread_rng()).gen(); + let cheapest = self + .all_cheapest_with(required_capabilities) + .choose_multiple(&mut rand::thread_rng(), 3); + let cheapest = cheapest.iter(); + + // If request falls below the retest threshold, use this request to try and + // reset the failed adapter. If a request succeeds the adapter will be more + // likely to be selected afterwards. + if retest_rng < self.retest_percent { + cheapest.max_by_key(|adapter| adapter.current_error_count()) + } else { + // The assumption here is that most RPC endpoints will not have limits + // which makes the check for low/high available capacity less relevant. + // So we essentially assume if it had available capacity when calling + // `all_cheapest_with` then it prolly maintains that state and so we + // just select whichever adapter is working better according to + // the number of errors. + cheapest.min_by_key(|adapter| adapter.current_error_count()) + } + .map(|adapter| adapter.adapter.clone()) + .ok_or(anyhow!( + "A matching Ethereum network with {:?} was not found.", + required_capabilities + )) } pub fn cheapest(&self) -> Option> { @@ -138,13 +181,15 @@ impl EthereumNetworkAdapters { #[derive(Clone)] pub struct EthereumNetworks { + pub metrics: Arc, pub networks: HashMap, } impl EthereumNetworks { - pub fn new() -> EthereumNetworks { + pub fn new(metrics: Arc) -> EthereumNetworks { EthereumNetworks { networks: HashMap::new(), + metrics, } } @@ -161,6 +206,7 @@ impl EthereumNetworks { capabilities, adapter, limit, + endpoint_metrics: self.metrics.cheap_clone(), }); } @@ -219,13 +265,23 @@ impl EthereumNetworks { #[cfg(test)] mod tests { - use graph::{firehose::SubgraphLimit, prelude::MetricsRegistry, tokio, url::Url}; + use graph::{ + endpoint::{EndpointMetrics, Host}, + firehose::SubgraphLimit, + prelude::MetricsRegistry, + slog::{o, Discard, Logger}, + tokio, + url::Url, + }; use http::HeaderMap; use std::sync::Arc; + use uuid::Uuid; - use crate::{EthereumAdapter, EthereumNetworks, ProviderEthRpcMetrics, Transport}; + use crate::{ + EthereumAdapter, EthereumAdapterTrait, EthereumNetworks, ProviderEthRpcMetrics, Transport, + }; - use super::NodeCapabilities; + use super::{EthereumNetworkAdapter, EthereumNetworkAdapters, NodeCapabilities}; #[test] fn ethereum_capabilities_comparison() { @@ -284,18 +340,21 @@ mod tests { #[tokio::test] async fn adapter_selector_selects_eth_call() { + let metrics = Arc::new(EndpointMetrics::mock()); let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); - let transport = - Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let transport = Transport::new_rpc( + Url::parse("http://127.0.0.1").unwrap(), + HeaderMap::new(), + metrics.clone(), + ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let eth_call_adapter = Arc::new( EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -308,7 +367,6 @@ mod tests { EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -318,7 +376,7 @@ mod tests { ); let mut adapters = { - let mut ethereum_networks = EthereumNetworks::new(); + let mut ethereum_networks = EthereumNetworks::new(metrics); ethereum_networks.insert( chain.clone(), NodeCapabilities { @@ -387,18 +445,21 @@ mod tests { #[tokio::test] async fn adapter_selector_unlimited() { + let metrics = Arc::new(EndpointMetrics::mock()); let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); - let transport = - Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let transport = Transport::new_rpc( + Url::parse("http://127.0.0.1").unwrap(), + HeaderMap::new(), + metrics.clone(), + ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let eth_call_adapter = Arc::new( EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -411,7 +472,6 @@ mod tests { EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -421,7 +481,7 @@ mod tests { ); let adapters = { - let mut ethereum_networks = EthereumNetworks::new(); + let mut ethereum_networks = EthereumNetworks::new(metrics); ethereum_networks.insert( chain.clone(), NodeCapabilities { @@ -455,18 +515,21 @@ mod tests { #[tokio::test] async fn adapter_selector_disable_call_only_fallback() { + let metrics = Arc::new(EndpointMetrics::mock()); let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); - let transport = - Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let transport = Transport::new_rpc( + Url::parse("http://127.0.0.1").unwrap(), + HeaderMap::new(), + metrics.clone(), + ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let eth_call_adapter = Arc::new( EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -479,7 +542,6 @@ mod tests { EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -489,7 +551,7 @@ mod tests { ); let adapters = { - let mut ethereum_networks = EthereumNetworks::new(); + let mut ethereum_networks = EthereumNetworks::new(metrics); ethereum_networks.insert( chain.clone(), NodeCapabilities { @@ -521,18 +583,21 @@ mod tests { #[tokio::test] async fn adapter_selector_no_call_only_fallback() { + let metrics = Arc::new(EndpointMetrics::mock()); let chain = "mainnet".to_string(); let logger = graph::log::logger(true); - let mock_registry: Arc = Arc::new(MetricsRegistry::mock()); - let transport = - Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let transport = Transport::new_rpc( + Url::parse("http://127.0.0.1").unwrap(), + HeaderMap::new(), + metrics.clone(), + ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let eth_adapter = Arc::new( EthereumAdapter::new( logger.clone(), String::new(), - "http://127.0.0.1", transport.clone(), provider_metrics.clone(), true, @@ -542,7 +607,7 @@ mod tests { ); let adapters = { - let mut ethereum_networks = EthereumNetworks::new(); + let mut ethereum_networks = EthereumNetworks::new(metrics); ethereum_networks.insert( chain.clone(), NodeCapabilities { @@ -561,4 +626,225 @@ mod tests { false ); } + + #[tokio::test] + async fn eth_adapter_selection_multiple_adapters() { + let logger = Logger::root(Discard, o!()); + let unavailable_provider = Uuid::new_v4().to_string(); + let error_provider = Uuid::new_v4().to_string(); + let no_error_provider = Uuid::new_v4().to_string(); + + let mock_registry = Arc::new(MetricsRegistry::mock()); + let metrics = Arc::new(EndpointMetrics::new( + logger, + &[ + unavailable_provider.clone(), + error_provider.clone(), + no_error_provider.clone(), + ], + mock_registry.clone(), + )); + let logger = graph::log::logger(true); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + + let adapters = vec![ + fake_adapter( + &logger, + &unavailable_provider, + &provider_metrics, + &metrics, + false, + ) + .await, + fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false).await, + fake_adapter( + &logger, + &no_error_provider, + &provider_metrics, + &metrics, + false, + ) + .await, + ]; + + // Set errors + metrics.report_for_test(&Host::from(error_provider.clone()), false); + + let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); + let mut always_retest_adapters = EthereumNetworkAdapters::new(Some(1f64)); + adapters.iter().cloned().for_each(|adapter| { + let limit = if adapter.provider() == unavailable_provider { + SubgraphLimit::Disabled + } else { + SubgraphLimit::Unlimited + }; + + no_retest_adapters.adapters.push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter: adapter.clone(), + limit: limit.clone(), + }); + always_retest_adapters + .adapters + .push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter, + limit, + }); + }); + + assert_eq!( + no_retest_adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .unwrap() + .provider(), + no_error_provider + ); + assert_eq!( + always_retest_adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .unwrap() + .provider(), + error_provider + ); + } + + #[tokio::test] + async fn eth_adapter_selection_single_adapter() { + let logger = Logger::root(Discard, o!()); + let unavailable_provider = Uuid::new_v4().to_string(); + let error_provider = Uuid::new_v4().to_string(); + let no_error_provider = Uuid::new_v4().to_string(); + + let mock_registry = Arc::new(MetricsRegistry::mock()); + let metrics = Arc::new(EndpointMetrics::new( + logger, + &[ + unavailable_provider, + error_provider.clone(), + no_error_provider.clone(), + ], + mock_registry.clone(), + )); + let logger = graph::log::logger(true); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + + // Set errors + metrics.report_for_test(&Host::from(error_provider.clone()), false); + + let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); + no_retest_adapters.adapters.push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter: fake_adapter(&logger, &error_provider, &provider_metrics, &metrics, false) + .await, + limit: SubgraphLimit::Unlimited, + }); + assert_eq!( + no_retest_adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .unwrap() + .provider(), + error_provider + ); + + let mut always_retest_adapters = EthereumNetworkAdapters::new(Some(1f64)); + always_retest_adapters + .adapters + .push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter: fake_adapter( + &logger, + &no_error_provider, + &provider_metrics, + &metrics, + false, + ) + .await, + limit: SubgraphLimit::Unlimited, + }); + assert_eq!( + always_retest_adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .unwrap() + .provider(), + no_error_provider + ); + + let mut no_available_adapter = EthereumNetworkAdapters::default(); + no_available_adapter.adapters.push(EthereumNetworkAdapter { + endpoint_metrics: metrics.clone(), + capabilities: NodeCapabilities { + archive: true, + traces: false, + }, + adapter: fake_adapter( + &logger, + &no_error_provider, + &provider_metrics, + &metrics, + false, + ) + .await, + limit: SubgraphLimit::Disabled, + }); + let res = no_available_adapter.cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }); + assert!(res.is_err(), "{:?}", res); + } + + async fn fake_adapter( + logger: &Logger, + provider: &str, + provider_metrics: &Arc, + endpoint_metrics: &Arc, + call_only: bool, + ) -> Arc { + let transport = Transport::new_rpc( + Url::parse(&"http://127.0.0.1").unwrap(), + HeaderMap::new(), + endpoint_metrics.clone(), + ); + + Arc::new( + EthereumAdapter::new( + logger.clone(), + provider.to_string(), + transport.clone(), + provider_metrics.clone(), + true, + call_only, + ) + .await, + ) + } } diff --git a/chain/ethereum/src/transport.rs b/chain/ethereum/src/transport.rs index b30fd17d84b..6b898b95cae 100644 --- a/chain/ethereum/src/transport.rs +++ b/chain/ethereum/src/transport.rs @@ -1,3 +1,4 @@ +use graph::endpoint::{EndpointMetrics, Host, RequestLabels}; use jsonrpc_core::types::Call; use jsonrpc_core::Value; @@ -11,7 +12,11 @@ use std::future::Future; /// Abstraction over the different web3 transports. #[derive(Clone, Debug)] pub enum Transport { - RPC(http::Http), + RPC { + client: http::Http, + metrics: Arc, + host: Host, + }, IPC(ipc::Ipc), WS(ws::WebSocket), } @@ -38,22 +43,32 @@ impl Transport { /// /// Note: JSON-RPC over HTTP doesn't always support subscribing to new /// blocks (one such example is Infura's HTTP endpoint). - pub fn new_rpc(rpc: Url, headers: ::http::HeaderMap) -> Self { + pub fn new_rpc(rpc: Url, headers: ::http::HeaderMap, metrics: Arc) -> Self { // Unwrap: This only fails if something is wrong with the system's TLS config. let client = reqwest::Client::builder() .default_headers(headers) .build() .unwrap(); - Transport::RPC(http::Http::with_client(client, rpc)) + + let host = rpc.to_string(); + Transport::RPC { + client: http::Http::with_client(client, rpc), + metrics, + host: host.into(), + } } } impl web3::Transport for Transport { - type Out = Box> + Send + Unpin>; + type Out = Pin> + Send + 'static>>; fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { match self { - Transport::RPC(http) => http.prepare(method, params), + Transport::RPC { + client, + metrics: _, + host: _, + } => client.prepare(method, params), Transport::IPC(ipc) => ipc.prepare(method, params), Transport::WS(ws) => ws.prepare(method, params), } @@ -61,9 +76,38 @@ impl web3::Transport for Transport { fn send(&self, id: RequestId, request: Call) -> Self::Out { match self { - Transport::RPC(http) => Box::new(http.send(id, request)), - Transport::IPC(ipc) => Box::new(ipc.send(id, request)), - Transport::WS(ws) => Box::new(ws.send(id, request)), + Transport::RPC { + client, + metrics, + host, + } => { + let metrics = metrics.cheap_clone(); + let client = client.clone(); + let method = match request { + Call::MethodCall(ref m) => m.method.as_str(), + _ => "unknown", + }; + + let labels = RequestLabels { + host: host.clone(), + req_type: method.into(), + conn_type: graph::endpoint::ConnectionType::Rpc, + }; + let out = async move { + let labels = labels; + let out = client.send(id, request).await; + match out { + Ok(_) => metrics.success(&labels), + Err(_) => metrics.failure(&labels), + } + + out + }; + + Box::pin(out) + } + Transport::IPC(ipc) => Box::pin(ipc.send(id, request)), + Transport::WS(ws) => Box::pin(ws.send(id, request)), } } } @@ -80,7 +124,11 @@ impl web3::BatchTransport for Transport { T: IntoIterator, { match self { - Transport::RPC(http) => Box::new(http.send_batch(requests)), + Transport::RPC { + client, + metrics: _, + host: _, + } => Box::new(client.send_batch(requests)), Transport::IPC(ipc) => Box::new(ipc.send_batch(requests)), Transport::WS(ws) => Box::new(ws.send_batch(requests)), } diff --git a/graph/src/blockchain/firehose_block_ingestor.rs b/graph/src/blockchain/firehose_block_ingestor.rs index 634c5f8d501..97703a7a93d 100644 --- a/graph/src/blockchain/firehose_block_ingestor.rs +++ b/graph/src/blockchain/firehose_block_ingestor.rs @@ -182,7 +182,7 @@ where }; let logger = self.logger.new( - o!("provider" => endpoint.provider.clone(), "network_name"=> self.network_name()), + o!("provider" => endpoint.provider.to_string(), "network_name"=> self.network_name()), ); info!( diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 3bd277652d0..93e44f31336 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -217,7 +217,7 @@ fn stream_blocks>( try_stream! { loop { let endpoint = client.firehose_endpoint()?; - let logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.clone())); + let logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); info!( &logger, diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 6adf8299ee2..f1872c7f4aa 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -135,7 +135,7 @@ where let manifest_end_block_num = end_blocks.into_iter().min().unwrap_or(0); let metrics = - SubstreamsBlockStreamMetrics::new(registry, deployment, endpoint.provider.clone()); + SubstreamsBlockStreamMetrics::new(registry, deployment, endpoint.provider.to_string()); SubstreamsBlockStream { stream: Box::pin(stream_blocks( diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 2fc5ee2ae26..9f60e9b4c00 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -37,8 +37,7 @@ const HIGH_VALUE_USED_PERCENTAGE: usize = 80; #[derive(Debug)] pub struct FirehoseEndpoint { - pub provider: String, - pub host: Host, + pub provider: Host, pub auth: AuthInterceptor, pub filters_enabled: bool, pub compression_enabled: bool, @@ -122,7 +121,6 @@ impl FirehoseEndpoint { .as_ref() .parse::() .expect("the url should have been validated by now, so it is a valid Uri"); - let host = Host::from(uri.to_string()); let endpoint_builder = match uri.scheme().unwrap_or(&Scheme::HTTP).as_str() { "http" => Channel::builder(uri), @@ -166,19 +164,18 @@ impl FirehoseEndpoint { }; FirehoseEndpoint { - provider: provider.as_ref().to_string(), + provider: provider.as_ref().into(), channel: endpoint.connect_lazy(), auth: AuthInterceptor { token }, filters_enabled, compression_enabled, subgraph_limit, endpoint_metrics, - host, } } pub fn current_error_count(&self) -> u64 { - self.endpoint_metrics.get_count(&self.host) + self.endpoint_metrics.get_count(&self.provider) } // we need to -1 because there will always be a reference @@ -197,7 +194,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.host.clone(), + host: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Firehose, }, @@ -224,7 +221,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.host.clone(), + host: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Firehose, }, @@ -249,7 +246,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.host.clone(), + host: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Substreams, }, @@ -455,7 +452,7 @@ impl FirehoseEndpoints { pub fn remove(&mut self, provider: &str) { self.0 - .retain(|network_endpoint| network_endpoint.provider != provider); + .retain(|network_endpoint| network_endpoint.provider.as_str() != provider); } } @@ -616,16 +613,12 @@ mod test { let logger = Logger::root(Discard, o!()); let endpoint_metrics = Arc::new(EndpointMetrics::new( logger, - &[ - "http://127.0.0.1/", - "http://127.0.0.2/", - "http://127.0.0.3/", - ], + &["high_error", "low availability", "high availability"], Arc::new(MetricsRegistry::mock()), )); let high_error_adapter1 = Arc::new(FirehoseEndpoint::new( - "high_error1".to_string(), + "high_error".to_string(), "http://127.0.0.1".to_string(), None, false, @@ -634,7 +627,7 @@ mod test { endpoint_metrics.clone(), )); let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( - "high_error2".to_string(), + "high_error".to_string(), "http://127.0.0.1".to_string(), None, false, @@ -661,7 +654,7 @@ mod test { endpoint_metrics.clone(), )); - endpoint_metrics.report_for_test(&high_error_adapter1.host, false); + endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); let mut endpoints = FirehoseEndpoints::from(vec![ high_error_adapter1.clone(), @@ -683,7 +676,7 @@ mod test { // because the others will be low or unavailable let res = endpoints.endpoint().unwrap(); // This will match both high error adapters - assert_eq!(res.host, high_error_adapter1.host); + assert_eq!(res.provider, high_error_adapter1.provider); } #[test] diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 236432ad1fe..b67afff336a 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -2,6 +2,7 @@ use clap::{Parser, Subcommand}; use config::PoolSize; use git_testament::{git_testament, render_testament}; use graph::bail; +use graph::endpoint::EndpointMetrics; use graph::log::logger_with_levels; use graph::prelude::{MetricsRegistry, BLOCK_NUMBER_MAX}; use graph::{data::graphql::effort::LoadManager, prelude::chrono, prometheus::Registry}; @@ -897,7 +898,8 @@ impl Context { async fn ethereum_networks(&self) -> anyhow::Result { let logger = self.logger.clone(); let registry = self.metrics_registry(); - create_all_ethereum_networks(logger, registry, &self.config).await + let metrics = Arc::new(EndpointMetrics::mock()); + create_all_ethereum_networks(logger, registry, &self.config, metrics).await } fn chain_store(self, chain_name: &str) -> anyhow::Result> { diff --git a/node/src/chain.rs b/node/src/chain.rs index 7efbd053d43..d40ba458a9a 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -130,11 +130,13 @@ pub fn create_substreams_networks( .entry(chain.protocol) .or_insert_with(FirehoseNetworks::new); - for i in 0..firehose.conn_pool_size { + for _ in 0..firehose.conn_pool_size { parsed_networks.insert( name.to_string(), Arc::new(FirehoseEndpoint::new( - &format!("{}-{}", provider.label, i), + // This label needs to be the original label so that the metrics + // can be deduped. + &provider.label, &firehose.url, firehose.token.clone(), firehose.filters_enabled(), @@ -184,11 +186,13 @@ pub fn create_firehose_networks( // eg: pool_size = 3 and sg_limit 2 will result in 3 separate instances // of FirehoseEndpoint and each of those instance can be used in 2 different // SubgraphInstances. - for i in 0..firehose.conn_pool_size { + for _ in 0..firehose.conn_pool_size { parsed_networks.insert( name.to_string(), Arc::new(FirehoseEndpoint::new( - &format!("{}-{}", provider.label, i), + // This label needs to be the original label so that the metrics + // can be deduped. + &provider.label, &firehose.url, firehose.token.clone(), firehose.filters_enabled(), @@ -310,7 +314,7 @@ where let logger = logger.new(o!("provider" => endpoint.provider.to_string())); info!( logger, "Connecting to Firehose to get chain identifier"; - "provider" => &endpoint.provider, + "provider" => &endpoint.provider.to_string(), ); match tokio::time::timeout( NET_VERSION_WAIT_TIME, @@ -333,7 +337,7 @@ where info!( logger, "Connected to Firehose"; - "provider" => &endpoint.provider, + "provider" => &endpoint.provider.to_string(), "genesis_block" => format_args!("{}", &ptr), ); @@ -387,6 +391,7 @@ pub async fn create_all_ethereum_networks( logger: Logger, registry: Arc, config: &Config, + endpoint_metrics: Arc, ) -> anyhow::Result { let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); let eth_networks_futures = config @@ -395,7 +400,13 @@ pub async fn create_all_ethereum_networks( .iter() .filter(|(_, chain)| chain.protocol == BlockchainKind::Ethereum) .map(|(name, _)| { - create_ethereum_networks_for_chain(&logger, eth_rpc_metrics.clone(), config, name) + create_ethereum_networks_for_chain( + &logger, + eth_rpc_metrics.clone(), + config, + name, + endpoint_metrics.cheap_clone(), + ) }); Ok(try_join_all(eth_networks_futures) @@ -405,7 +416,7 @@ pub async fn create_all_ethereum_networks( a.extend(b); a }) - .unwrap_or_else(EthereumNetworks::new)) + .unwrap_or_else(|| EthereumNetworks::new(endpoint_metrics))) } /// Parses a single Ethereum connection string and returns its network name and `EthereumAdapter`. @@ -414,8 +425,9 @@ pub async fn create_ethereum_networks_for_chain( eth_rpc_metrics: Arc, config: &Config, network_name: &str, + endpoint_metrics: Arc, ) -> anyhow::Result { - let mut parsed_networks = EthereumNetworks::new(); + let mut parsed_networks = EthereumNetworks::new(endpoint_metrics.cheap_clone()); let chain = config .chains .chains @@ -445,7 +457,11 @@ pub async fn create_ethereum_networks_for_chain( use crate::config::Transport::*; let transport = match web3.transport { - Rpc => Transport::new_rpc(Url::parse(&web3.url)?, web3.headers.clone()), + Rpc => Transport::new_rpc( + Url::parse(&web3.url)?, + web3.headers.clone(), + endpoint_metrics.cheap_clone(), + ), Ipc => Transport::new_ipc(&web3.url).await, Ws => Transport::new_ws(&web3.url).await, }; @@ -459,7 +475,6 @@ pub async fn create_ethereum_networks_for_chain( graph_chain_ethereum::EthereumAdapter::new( logger, provider.label.clone(), - &web3.url, transport, eth_rpc_metrics.clone(), supports_eip_1898, @@ -479,6 +494,7 @@ pub async fn create_ethereum_networks_for_chain( mod test { use crate::chain::create_all_ethereum_networks; use crate::config::{Config, Opt}; + use graph::endpoint::EndpointMetrics; use graph::log::logger; use graph::prelude::{tokio, MetricsRegistry}; use graph::prometheus::Registry; @@ -508,6 +524,7 @@ mod test { unsafe_config: false, }; + let metrics = Arc::new(EndpointMetrics::mock()); let config = Config::load(&logger, &opt).expect("can create config"); let prometheus_registry = Arc::new(Registry::new()); let metrics_registry = Arc::new(MetricsRegistry::new( @@ -515,9 +532,10 @@ mod test { prometheus_registry.clone(), )); - let ethereum_networks = create_all_ethereum_networks(logger, metrics_registry, &config) - .await - .expect("Correctly parse Ethereum network args"); + let ethereum_networks = + create_all_ethereum_networks(logger, metrics_registry, &config, metrics) + .await + .expect("Correctly parse Ethereum network args"); let mut network_names = ethereum_networks.networks.keys().collect::>(); network_names.sort(); diff --git a/node/src/config.rs b/node/src/config.rs index 940433493dd..12b9a4c2cc8 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -421,14 +421,14 @@ impl ChainSection { Ok(Self { ingestor, chains }) } - pub fn provider_urls(&self) -> Vec { + pub fn providers(&self) -> Vec { self.chains .values() .flat_map(|chain| { chain .providers .iter() - .map(|p| p.details.url()) + .map(|p| p.label.clone()) .collect::>() }) .collect() @@ -563,21 +563,6 @@ pub enum ProviderDetails { Web3Call(Web3Provider), } -impl ProviderDetails { - pub fn url(&self) -> String { - let url = match self { - ProviderDetails::Substreams(firehose) | ProviderDetails::Firehose(firehose) => { - firehose.url.clone() - } - ProviderDetails::Web3(web3) | ProviderDetails::Web3Call(web3) => web3.url.to_string(), - }; - - // parsing and printing here normalizes the urls so we don't have - // mismatches later on. - url.parse::().expect("failed to parse url").to_string() - } -} - const FIREHOSE_FILTER_FEATURE: &str = "filters"; const FIREHOSE_COMPRESSION_FEATURE: &str = "compression"; const FIREHOSE_PROVIDER_FEATURES: [&str; 2] = diff --git a/node/src/main.rs b/node/src/main.rs index 9e7aff96883..9d7c79830dc 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -230,18 +230,23 @@ async fn main() { let endpoint_metrics = Arc::new(EndpointMetrics::new( logger.clone(), - &config.chains.provider_urls(), + &config.chains.providers(), metrics_registry.cheap_clone(), )); // Ethereum clients; query nodes ignore all ethereum clients and never // connect to them directly let eth_networks = if query_only { - EthereumNetworks::new() + EthereumNetworks::new(endpoint_metrics.cheap_clone()) } else { - create_all_ethereum_networks(logger.clone(), metrics_registry.clone(), &config) - .await - .expect("Failed to parse Ethereum networks") + create_all_ethereum_networks( + logger.clone(), + metrics_registry.clone(), + &config, + endpoint_metrics.cheap_clone(), + ) + .await + .expect("Failed to parse Ethereum networks") }; let mut firehose_networks_by_kind = if query_only { diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index fd79caf7c47..fa8b1013ac0 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -2,6 +2,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ anyhow::bail, + endpoint::EndpointMetrics, itertools::Itertools, prelude::{ anyhow::{anyhow, Error}, @@ -9,7 +10,7 @@ use graph::{ }, slog::Logger, }; -use graph_chain_ethereum::{EthereumAdapterTrait, NodeCapabilities, ProviderEthRpcMetrics}; +use graph_chain_ethereum::{NodeCapabilities, ProviderEthRpcMetrics}; use graph_store_postgres::DeploymentPlacer; use crate::{chain::create_ethereum_networks_for_chain, config::Config}; @@ -119,10 +120,12 @@ pub async fn provider( Ok(caps) } + let metrics = Arc::new(EndpointMetrics::mock()); let caps = caps_from_features(features)?; let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); let networks = - create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, config, &network).await?; + create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, config, &network, metrics) + .await?; let adapters = networks .networks .get(&network) diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index f623345e496..fec254b1123 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -74,7 +74,7 @@ pub async fn run( let endpoint_metrics = Arc::new(EndpointMetrics::new( logger.clone(), - &config.chains.provider_urls(), + &config.chains.providers(), metrics_registry.cheap_clone(), )); @@ -83,10 +83,15 @@ pub async fn run( let link_resolver = Arc::new(LinkResolver::new(ipfs_clients, env_vars.cheap_clone())); let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(metrics_registry.clone())); - let eth_networks = - create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, &config, &network_name) - .await - .expect("Failed to parse Ethereum networks"); + let eth_networks = create_ethereum_networks_for_chain( + &logger, + eth_rpc_metrics, + &config, + &network_name, + endpoint_metrics.cheap_clone(), + ) + .await + .expect("Failed to parse Ethereum networks"); let firehose_networks_by_kind = create_firehose_networks(logger.clone(), &config, endpoint_metrics); let firehose_networks = firehose_networks_by_kind.get(&BlockchainKind::Ethereum); From edb6fb1eb73dc53fcf7c9e9cd72f39025915fb51 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 29 Mar 2023 05:57:06 +0100 Subject: [PATCH 0075/2104] chain/ethereum: Use provider for metrics (#4504) --- chain/ethereum/src/network.rs | 11 ++++-- chain/ethereum/src/transport.rs | 22 +++++++----- graph/src/endpoint.rs | 62 +++++++++++++++++---------------- graph/src/firehose/endpoints.rs | 10 +++--- node/src/chain.rs | 1 + 5 files changed, 59 insertions(+), 47 deletions(-) diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 0e4303a9ade..77e4dab13b9 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -266,7 +266,7 @@ impl EthereumNetworks { #[cfg(test)] mod tests { use graph::{ - endpoint::{EndpointMetrics, Host}, + endpoint::{EndpointMetrics, Provider}, firehose::SubgraphLimit, prelude::MetricsRegistry, slog::{o, Discard, Logger}, @@ -348,6 +348,7 @@ mod tests { Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), metrics.clone(), + "", ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -453,6 +454,7 @@ mod tests { Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), metrics.clone(), + "", ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -523,6 +525,7 @@ mod tests { Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), metrics.clone(), + "", ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -591,6 +594,7 @@ mod tests { Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new(), metrics.clone(), + "", ); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); @@ -668,7 +672,7 @@ mod tests { ]; // Set errors - metrics.report_for_test(&Host::from(error_provider.clone()), false); + metrics.report_for_test(&Provider::from(error_provider.clone()), false); let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); let mut always_retest_adapters = EthereumNetworkAdapters::new(Some(1f64)); @@ -744,7 +748,7 @@ mod tests { let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); // Set errors - metrics.report_for_test(&Host::from(error_provider.clone()), false); + metrics.report_for_test(&Provider::from(error_provider.clone()), false); let mut no_retest_adapters = EthereumNetworkAdapters::new(Some(0f64)); no_retest_adapters.adapters.push(EthereumNetworkAdapter { @@ -833,6 +837,7 @@ mod tests { Url::parse(&"http://127.0.0.1").unwrap(), HeaderMap::new(), endpoint_metrics.clone(), + "", ); Arc::new( diff --git a/chain/ethereum/src/transport.rs b/chain/ethereum/src/transport.rs index 6b898b95cae..f19b45dc3df 100644 --- a/chain/ethereum/src/transport.rs +++ b/chain/ethereum/src/transport.rs @@ -1,4 +1,4 @@ -use graph::endpoint::{EndpointMetrics, Host, RequestLabels}; +use graph::endpoint::{EndpointMetrics, Provider, RequestLabels}; use jsonrpc_core::types::Call; use jsonrpc_core::Value; @@ -15,7 +15,7 @@ pub enum Transport { RPC { client: http::Http, metrics: Arc, - host: Host, + provider: Provider, }, IPC(ipc::Ipc), WS(ws::WebSocket), @@ -43,18 +43,22 @@ impl Transport { /// /// Note: JSON-RPC over HTTP doesn't always support subscribing to new /// blocks (one such example is Infura's HTTP endpoint). - pub fn new_rpc(rpc: Url, headers: ::http::HeaderMap, metrics: Arc) -> Self { + pub fn new_rpc( + rpc: Url, + headers: ::http::HeaderMap, + metrics: Arc, + provider: impl AsRef, + ) -> Self { // Unwrap: This only fails if something is wrong with the system's TLS config. let client = reqwest::Client::builder() .default_headers(headers) .build() .unwrap(); - let host = rpc.to_string(); Transport::RPC { client: http::Http::with_client(client, rpc), metrics, - host: host.into(), + provider: provider.as_ref().into(), } } } @@ -67,7 +71,7 @@ impl web3::Transport for Transport { Transport::RPC { client, metrics: _, - host: _, + provider: _, } => client.prepare(method, params), Transport::IPC(ipc) => ipc.prepare(method, params), Transport::WS(ws) => ws.prepare(method, params), @@ -79,7 +83,7 @@ impl web3::Transport for Transport { Transport::RPC { client, metrics, - host, + provider, } => { let metrics = metrics.cheap_clone(); let client = client.clone(); @@ -89,7 +93,7 @@ impl web3::Transport for Transport { }; let labels = RequestLabels { - host: host.clone(), + provider: provider.clone(), req_type: method.into(), conn_type: graph::endpoint::ConnectionType::Rpc, }; @@ -127,7 +131,7 @@ impl web3::BatchTransport for Transport { Transport::RPC { client, metrics: _, - host: _, + provider: _, } => Box::new(client.send_batch(requests)), Transport::IPC(ipc) => Box::new(ipc.send_batch(requests)), Transport::WS(ws) => Box::new(ws.send_batch(requests)), diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index 368d9db4130..bff6b0c53f9 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -11,22 +11,20 @@ use slog::{warn, Logger}; use crate::{components::metrics::MetricsRegistry, data::value::Word}; -/// HostCount is the underlying structure to keep the count, +/// ProviderCount is the underlying structure to keep the count, /// we require that all the hosts are known ahead of time, this way we can /// avoid locking since we don't need to modify the entire struture. -type HostCount = Arc>; +type ProviderCount = Arc>; -/// Host represents the normalized (parse::().to_string()) of the -/// underlying endpoint. This allows us to track errors across multiple -/// adapters if they share the same endpoint. -pub type Host = Word; +/// Provider represents label of the underlying endpoint. +pub type Provider = Word; /// This struct represents all the current labels except for the result /// which is added separately. If any new labels are necessary they should /// remain in the same order as added in [`EndpointMetrics::new`] #[derive(Clone)] pub struct RequestLabels { - pub host: Host, + pub provider: Provider, pub req_type: Word, pub conn_type: ConnectionType, } @@ -54,7 +52,7 @@ impl RequestLabels { Box::new([ (&self.conn_type).into(), self.req_type.as_str(), - self.host.as_str(), + self.provider.as_str(), match is_success { true => "success", false => "failure", @@ -67,35 +65,39 @@ impl RequestLabels { /// a success call to a host will clear the error count. pub struct EndpointMetrics { logger: Logger, - hosts: HostCount, + providers: ProviderCount, counter: Box, } impl std::fmt::Debug for EndpointMetrics { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!("{:?}", self.hosts)) + f.write_fmt(format_args!("{:?}", self.providers)) } } impl EndpointMetrics { - pub fn new(logger: Logger, hosts: &[impl AsRef], registry: Arc) -> Self { - let hosts = Arc::new(HashMap::from_iter( - hosts + pub fn new( + logger: Logger, + providers: &[impl AsRef], + registry: Arc, + ) -> Self { + let providers = Arc::new(HashMap::from_iter( + providers .iter() - .map(|h| (Host::from(h.as_ref()), AtomicU64::new(0))), + .map(|h| (Provider::from(h.as_ref()), AtomicU64::new(0))), )); let counter = registry .new_int_counter_vec( "endpoint_request", "successfull request", - &["conn_type", "req_type", "host", "result"], + &["conn_type", "req_type", "provider", "result"], ) .expect("unable to create endpoint_request counter_vec"); Self { logger, - hosts, + providers, counter, } } @@ -103,24 +105,24 @@ impl EndpointMetrics { /// This should only be used for testing. pub fn mock() -> Self { use slog::{o, Discard}; - let hosts: &[&str] = &[]; + let providers: &[&str] = &[]; Self::new( Logger::root(Discard, o!()), - hosts, + providers, Arc::new(MetricsRegistry::mock()), ) } #[cfg(debug_assertions)] - pub fn report_for_test(&self, host: &Host, success: bool) { + pub fn report_for_test(&self, provider: &Provider, success: bool) { match success { true => self.success(&RequestLabels { - host: host.clone(), + provider: provider.clone(), req_type: "".into(), conn_type: ConnectionType::Firehose, }), false => self.failure(&RequestLabels { - host: host.clone(), + provider: provider.clone(), req_type: "".into(), conn_type: ConnectionType::Firehose, }), @@ -128,13 +130,13 @@ impl EndpointMetrics { } pub fn success(&self, labels: &RequestLabels) { - match self.hosts.get(&labels.host) { + match self.providers.get(&labels.provider) { Some(count) => { count.store(0, Ordering::Relaxed); } None => warn!( &self.logger, - "metrics not available for host {}", labels.host + "metrics not available for host {}", labels.provider ), }; @@ -142,13 +144,13 @@ impl EndpointMetrics { } pub fn failure(&self, labels: &RequestLabels) { - match self.hosts.get(&labels.host) { + match self.providers.get(&labels.provider) { Some(count) => { count.fetch_add(1, Ordering::Relaxed); } None => warn!( &self.logger, - "metrics not available for host {}", &labels.host + "metrics not available for host {}", &labels.provider ), }; @@ -159,9 +161,9 @@ impl EndpointMetrics { /// Returns the current error count of a host or 0 if the host /// doesn't have a value on the map. - pub fn get_count(&self, host: &Host) -> u64 { - self.hosts - .get(host) + pub fn get_count(&self, provider: &Provider) -> u64 { + self.providers + .get(provider) .map(|c| c.load(Ordering::Relaxed)) .unwrap_or(0) } @@ -175,12 +177,12 @@ mod test { use crate::{ components::metrics::MetricsRegistry, - endpoint::{EndpointMetrics, Host}, + endpoint::{EndpointMetrics, Provider}, }; #[tokio::test] async fn should_increment_and_reset() { - let (a, b, c): (Host, Host, Host) = ("a".into(), "b".into(), "c".into()); + let (a, b, c): (Provider, Provider, Provider) = ("a".into(), "b".into(), "c".into()); let hosts: &[&str] = &[&a, &b, &c]; let logger = Logger::root(Discard, o!()); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 9f60e9b4c00..d0b02755e3c 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -4,7 +4,7 @@ use crate::{ blockchain::BlockPtr, cheap_clone::CheapClone, components::store::BlockNumber, - endpoint::{ConnectionType, EndpointMetrics, Host, RequestLabels}, + endpoint::{ConnectionType, EndpointMetrics, Provider, RequestLabels}, firehose::decode_firehose_block, prelude::{anyhow, debug, info}, substreams, @@ -37,7 +37,7 @@ const HIGH_VALUE_USED_PERCENTAGE: usize = 80; #[derive(Debug)] pub struct FirehoseEndpoint { - pub provider: Host, + pub provider: Provider, pub auth: AuthInterceptor, pub filters_enabled: bool, pub compression_enabled: bool, @@ -194,7 +194,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.provider.clone().into(), + provider: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Firehose, }, @@ -221,7 +221,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.provider.clone().into(), + provider: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Firehose, }, @@ -246,7 +246,7 @@ impl FirehoseEndpoint { metrics: self.endpoint_metrics.cheap_clone(), service: self.channel.cheap_clone(), labels: RequestLabels { - host: self.provider.clone().into(), + provider: self.provider.clone().into(), req_type: "unknown".into(), conn_type: ConnectionType::Substreams, }, diff --git a/node/src/chain.rs b/node/src/chain.rs index d40ba458a9a..6a7a0be8ab2 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -461,6 +461,7 @@ pub async fn create_ethereum_networks_for_chain( Url::parse(&web3.url)?, web3.headers.clone(), endpoint_metrics.cheap_clone(), + &provider.label, ), Ipc => Transport::new_ipc(&web3.url).await, Ws => Transport::new_ws(&web3.url).await, From 8aa92a7246aa55d25fe382b9c8f4ca2bba7615b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:19:44 +0100 Subject: [PATCH 0076/2104] build(deps): bump quote from 1.0.23 to 1.0.26 (#4501) Bumps [quote](https://github.com/dtolnay/quote) from 1.0.23 to 1.0.26. - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.23...1.0.26) --- updated-dependencies: - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index feb30dbac0a..44f9b75c77c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3330,9 +3330,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" dependencies = [ "unicode-ident", ] @@ -3467,9 +3467,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] From c527e659c1714d4cd15371deb2830fc2abb8f443 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:20:04 +0100 Subject: [PATCH 0077/2104] build(deps): bump tower-test from `664cb35` to `d2f1703` (#4498) Bumps [tower-test](https://github.com/tower-rs/tower) from `664cb35` to `d2f1703`. - [Release notes](https://github.com/tower-rs/tower/releases) - [Commits](https://github.com/tower-rs/tower/compare/664cb35abb8da07b6c46212375c481153d8abfb9...d2f1703c4856c5e234d9f1b61cc34e24a3e8abc5) --- updated-dependencies: - dependency-name: tower-test dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44f9b75c77c..762bc96e4d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4746,7 +4746,7 @@ dependencies = [ [[package]] name = "tower" version = "0.4.12" -source = "git+https://github.com/tower-rs/tower.git#664cb35abb8da07b6c46212375c481153d8abfb9" +source = "git+https://github.com/tower-rs/tower.git#d2f1703c4856c5e234d9f1b61cc34e24a3e8abc5" dependencies = [ "futures-core", "futures-util", @@ -4804,7 +4804,7 @@ dependencies = [ [[package]] name = "tower-layer" version = "0.3.1" -source = "git+https://github.com/tower-rs/tower.git#664cb35abb8da07b6c46212375c481153d8abfb9" +source = "git+https://github.com/tower-rs/tower.git#d2f1703c4856c5e234d9f1b61cc34e24a3e8abc5" [[package]] name = "tower-layer" @@ -4821,12 +4821,12 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tower-service" version = "0.3.1" -source = "git+https://github.com/tower-rs/tower.git#664cb35abb8da07b6c46212375c481153d8abfb9" +source = "git+https://github.com/tower-rs/tower.git#d2f1703c4856c5e234d9f1b61cc34e24a3e8abc5" [[package]] name = "tower-test" version = "0.4.0" -source = "git+https://github.com/tower-rs/tower.git#664cb35abb8da07b6c46212375c481153d8abfb9" +source = "git+https://github.com/tower-rs/tower.git#d2f1703c4856c5e234d9f1b61cc34e24a3e8abc5" dependencies = [ "futures-util", "pin-project-lite", From d3fbf546dc2273fbdd2ce67cf3f7703648d7b0c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:20:29 +0100 Subject: [PATCH 0078/2104] build(deps): bump shellexpand from 2.1.0 to 3.1.0 (#4497) Bumps [shellexpand](https://gitlab.com/ijackson/rust-shellexpand) from 2.1.0 to 3.1.0. - [Release notes](https://gitlab.com/ijackson/rust-shellexpand/tags) - [Commits](https://gitlab.com/ijackson/rust-shellexpand/commits/shellexpand-3.1.0) --- updated-dependencies: - dependency-name: shellexpand dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- node/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 762bc96e4d2..c2eac358cc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4048,11 +4048,11 @@ dependencies = [ [[package]] name = "shellexpand" -version = "2.1.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdb7831b2d85ddf4a7b148aa19d0587eddbe8671a436b7bd1182eaad0f2829" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" dependencies = [ - "dirs-next", + "dirs", ] [[package]] diff --git a/node/Cargo.toml b/node/Cargo.toml index af555856ceb..98ec4269db8 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -38,7 +38,7 @@ graph-store-postgres = { path = "../store/postgres" } serde = { version = "1.0.126", features = ["derive", "rc"] } serde_regex = "1.1.0" toml = "0.7.1" -shellexpand = "2.1.0" +shellexpand = "3.1.0" termcolor = "1.2.0" diesel = "1.4.8" http = "0.2.5" # must be compatible with the version rust-web3 uses From 68d924dd0aface5f524683ad35c548ff708fd575 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:36:46 +0100 Subject: [PATCH 0079/2104] build(deps): bump indexmap from 1.9.2 to 1.9.3 (#4499) Bumps [indexmap](https://github.com/bluss/indexmap) from 1.9.2 to 1.9.3. - [Release notes](https://github.com/bluss/indexmap/releases) - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/1.9.2...1.9.3) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2eac358cc4..9952a8ca8e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2359,9 +2359,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", From 1462de727c55cec92bd795583ebdafbe15e800c9 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 7 Mar 2023 21:18:47 -0300 Subject: [PATCH 0080/2104] graph,core,runtime,store: add store.getWhere --- core/src/subgraph/state.rs | 4 +- graph/src/components/store/entity_cache.rs | 142 +++++++++++++++++---- graph/src/components/store/mod.rs | 40 ++++++ graph/src/components/store/traits.rs | 7 + graph/src/components/subgraph/instance.rs | 4 +- graph/src/runtime/gas/size_of.rs | 19 ++- graph/src/util/cache_weight.rs | 19 ++- runtime/wasm/src/host_exports.rs | 24 +++- runtime/wasm/src/module/mod.rs | 30 +++++ store/postgres/src/deployment_store.rs | 13 +- store/postgres/src/relational.rs | 13 +- store/postgres/src/writable.rs | 29 ++++- 12 files changed, 309 insertions(+), 35 deletions(-) diff --git a/core/src/subgraph/state.rs b/core/src/subgraph/state.rs index 0d5edd84b65..19eab13b2a0 100644 --- a/core/src/subgraph/state.rs +++ b/core/src/subgraph/state.rs @@ -1,5 +1,5 @@ use graph::{ - components::store::EntityKey, + components::store::EntityMultiKey, prelude::Entity, util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}, }; @@ -18,5 +18,5 @@ pub struct IndexingState { /// - The time THRESHOLD is passed /// - Or the subgraph has triggers for the block pub skip_ptr_updates_timer: Instant, - pub entity_lfu_cache: LfuCache>, + pub entity_lfu_cache: LfuCache>, } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 538af72ff18..375000d242b 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -3,10 +3,12 @@ use std::collections::HashMap; use std::fmt::{self, Debug}; use std::sync::Arc; -use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; +use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation, Value}; use crate::prelude::{Schema, ENV_VARS}; use crate::util::lfu_cache::LfuCache; +use super::{EntityDerived, EntityMultiKey}; + /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks /// how entities are modified, and caches all entities looked up from the @@ -17,7 +19,7 @@ use crate::util::lfu_cache::LfuCache; pub struct EntityCache { /// The state of entities in the store. An entry of `None` /// means that the entity is not present in the store - current: LfuCache>, + current: LfuCache>, /// The accumulated changes to an entity. updates: HashMap, @@ -45,7 +47,7 @@ impl Debug for EntityCache { pub struct ModificationsAndCache { pub modifications: Vec, - pub entity_lfu_cache: LfuCache>, + pub entity_lfu_cache: LfuCache>, } impl EntityCache { @@ -62,7 +64,7 @@ impl EntityCache { pub fn with_current( store: Arc, - current: LfuCache>, + current: LfuCache>, ) -> EntityCache { EntityCache { current, @@ -99,7 +101,9 @@ impl EntityCache { pub fn get(&mut self, eref: &EntityKey) -> Result, s::QueryExecutionError> { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. - let mut entity = self.current.get_entity(&*self.store, eref)?; + let mut entity = self + .current + .get_entity(&*self.store, &EntityMultiKey::Equal(eref.clone()))?; // Always test the cache consistency in debug mode. debug_assert!(entity == self.store.get(eref).unwrap()); @@ -110,9 +114,55 @@ impl EntityCache { if let Some(op) = self.handler_updates.get(eref).cloned() { entity = op.apply_to(entity) } + match entity { + Some(ref e) => { + let list = e.clone().sorted(); + let list = list.iter(); + list.for_each(|(k, v)| println!("{}: {:?}", k, v)); + } + None => println!("No entity found for {:?}", eref), + } Ok(entity) } + pub fn get_where( + &mut self, + eref: &EntityDerived, + ) -> Result, s::QueryExecutionError> { + println!("HELLO WORLD, YOU TRIGERED store.getWhere"); + self.current + .get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; + let entity = self + .current + .get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; + let entities = match entity { + Some(e) => { + // retrieve the list from the cache + let mut entities = Vec::new(); + if let Some(Value::List(list)) = e.get(eref.entity_field.as_str()) { + for id in list.iter() { + // we just created + if let Value::String(id) = id { + let key = EntityKey::from(id, eref); + match self.get(&key) { + Ok(Some(value)) => entities.push(value), + _ => (), + } + } + } + } + entities + } + None => { + println!("No entity found for {:?}", eref); + Vec::new() + } + }; + // self.store.get_where() + // todo!(); + Ok(entities) + } + pub fn remove(&mut self, key: EntityKey) { self.entity_op(key, EntityOp::Remove); } @@ -151,6 +201,7 @@ impl EntityCache { } } + // check the validate for derived fields let is_valid = entity.validate(&self.schema, &key).is_ok(); self.entity_op(key.clone(), EntityOp::Update(entity)); @@ -221,10 +272,11 @@ impl EntityCache { // The first step is to make sure all entities being set are in `self.current`. // For each subgraph, we need a map of entity type to missing entity ids. - let missing = self - .updates - .keys() - .filter(|key| !self.current.contains_key(key)); + let missing = self.updates.keys().filter(|key| { + !self + .current + .contains_key(&EntityMultiKey::Equal((*key).clone())) + }); // For immutable types, we assume that the subgraph is well-behaved, // and all updated immutable entities are in fact new, and skip @@ -236,12 +288,14 @@ impl EntityCache { let missing = missing.filter(|key| !self.schema.is_immutable(&key.entity_type)); for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { - self.current.insert(entity_key, Some(entity)); + self.current + .insert(EntityMultiKey::Equal(entity_key), Some(entity)); } let mut mods = Vec::new(); - for (key, update) in self.updates { + for (entity_key, update) in self.updates { use s::EntityModification::*; + let key = EntityMultiKey::Equal(entity_key.clone()); let current = self.current.remove(&key).and_then(|entity| entity); let modification = match (current, update) { @@ -251,7 +305,10 @@ impl EntityCache { let mut data = Entity::new(); data.merge_remove_null_fields(updates); self.current.insert(key.clone(), Some(data.clone())); - Some(Insert { key, data }) + Some(Insert { + key: entity_key, + data, + }) } // Entity may have been changed (Some(current), EntityOp::Update(updates)) => { @@ -259,7 +316,10 @@ impl EntityCache { data.merge_remove_null_fields(updates); self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { key, data }) + Some(Overwrite { + key: entity_key, + data, + }) } else { None } @@ -268,7 +328,10 @@ impl EntityCache { (Some(current), EntityOp::Overwrite(data)) => { self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { key, data }) + Some(Overwrite { + key: entity_key, + data, + }) } else { None } @@ -276,7 +339,7 @@ impl EntityCache { // Existing entity was deleted (Some(_), EntityOp::Remove) => { self.current.insert(key.clone(), None); - Some(Remove { key }) + Some(Remove { key: entity_key }) } // Entity was deleted, but it doesn't exist in the store (None, EntityOp::Remove) => None, @@ -294,23 +357,52 @@ impl EntityCache { } } -impl LfuCache> { +impl LfuCache> { // Helper for cached lookup of an entity. fn get_entity( &mut self, store: &(impl s::ReadStore + ?Sized), - key: &EntityKey, + key: &EntityMultiKey, ) -> Result, s::QueryExecutionError> { match self.get(key) { - None => { - let mut entity = store.get(key)?; - if let Some(entity) = &mut entity { - // `__typename` is for queries not for mappings. - entity.remove("__typename"); + None => match key { + EntityMultiKey::Equal(store_key) => { + let mut entity = store.get(store_key)?; + if let Some(entity) = &mut entity { + // `__typename` is for queries not for mappings. + entity.remove("__typename"); + } + self.insert(key.clone(), entity.clone()); + Ok(entity) } - self.insert(key.clone(), entity.clone()); - Ok(entity) - } + EntityMultiKey::All(derived) => { + // we get all entities with the derived field + let mut entities = store.get_where(derived)?; + // we asssume that derived fields contains ids + let entities = entities + .iter_mut() + .filter(|entity| entity.contains_key("id")) + .map(|entity| { + entity.remove("__typename"); + + // we insert each entity into the cache with the id key + let key = EntityKey::from(&entity.id().unwrap().into(), derived); + self.insert(EntityMultiKey::Equal(key), Some(entity.clone())); + // return the value to save + Value::String(entity.id().unwrap()) + }); + let entities = entities.collect(); + + // create entity with the list of ids + let mut entity = Entity::new(); + entity.insert(derived.entity_field.to_string(), Value::List(entities)); + + // insert to cache the list of ids + self.insert(key.clone(), Some(entity.clone())); + + Ok(Some(entity.clone())) + } + }, Some(data) => Ok(data.clone()), } } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index a1c42d5f1bf..f8304fd3f14 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -138,6 +138,33 @@ pub struct EntityKey { pub causality_region: CausalityRegion, } +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct EntityDerived { + /// Name of the entity type. + pub entity_type: EntityType, + + pub entity_field: Word, + + /// ID of the individual entity. + pub entity_id: Word, + + /// This is the causality region of the data source that created the entity. + /// + /// In the case of an entity lookup, this is the causality region of the data source that is + /// doing the lookup. So if the entity exists but was created on a different causality region, + /// the lookup will return empty. + pub causality_region: CausalityRegion, +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum EntityMultiKey { + /// A filter that matches all entities of a given type. + All(EntityDerived), + + /// A filter that matches a specific entity. + Equal(EntityKey), +} + impl EntityKey { // For use in tests only #[cfg(debug_assertions)] @@ -148,6 +175,15 @@ impl EntityKey { causality_region: CausalityRegion::ONCHAIN, } } + + pub fn from(id: &String, entity_derived: &EntityDerived) -> Self { + let clone = entity_derived.clone(); + Self { + entity_id: id.clone().into(), + entity_type: clone.entity_type, + causality_region: clone.causality_region, + } + } } #[derive(Clone, Debug, PartialEq)] @@ -1127,6 +1163,10 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } + fn get_where(&self, _query: &EntityDerived) -> Result, StoreError> { + Ok(vec![]) + } + fn input_schema(&self) -> Arc { self.schema.cheap_clone() } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 04bc36aa1e6..c897dfb1178 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -186,6 +186,9 @@ pub trait ReadStore: Send + Sync + 'static { keys: BTreeSet, ) -> Result, StoreError>; + /// Reverse lookup + fn get_where(&self, entity_derived: &EntityDerived) -> Result, StoreError>; + fn input_schema(&self) -> Arc; } @@ -202,6 +205,10 @@ impl ReadStore for Arc { (**self).get_many(keys) } + fn get_where(&self, entity_derived: &EntityDerived) -> Result, StoreError> { + (**self).get_where(entity_derived) + } + fn input_schema(&self) -> Arc { (**self).input_schema() } diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index f3df2c672e4..b7f2a1efe19 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -1,6 +1,6 @@ use crate::{ blockchain::Blockchain, - components::store::{EntityKey, ReadStore, StoredDynamicDataSource}, + components::store::{EntityMultiKey, ReadStore, StoredDynamicDataSource}, data::subgraph::schema::SubgraphError, data_source::DataSourceTemplate, prelude::*, @@ -35,7 +35,7 @@ pub struct BlockState { } impl BlockState { - pub fn new(store: impl ReadStore, lfu_cache: LfuCache>) -> Self { + pub fn new(store: impl ReadStore, lfu_cache: LfuCache>) -> Self { BlockState { entity_cache: EntityCache::with_current(Arc::new(store), lfu_cache), deterministic_errors: Vec::new(), diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index 49bb60b1215..4f1218955af 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -1,7 +1,7 @@ //! Various implementations of GasSizeOf; use crate::{ - components::store::{EntityKey, EntityType}, + components::store::{EntityDerived, EntityKey, EntityMultiKey, EntityType}, data::store::{scalar::Bytes, Value}, prelude::{BigDecimal, BigInt}, }; @@ -168,6 +168,23 @@ impl GasSizeOf for EntityKey { } } +impl GasSizeOf for EntityDerived { + fn gas_size_of(&self) -> Gas { + self.entity_type.gas_size_of() + + self.entity_id.gas_size_of() + + self.entity_field.gas_size_of() + } +} + +impl GasSizeOf for EntityMultiKey { + fn gas_size_of(&self) -> Gas { + match self { + EntityMultiKey::Equal(key) => key.gas_size_of(), + EntityMultiKey::All(key) => key.gas_size_of(), + } + } +} + impl GasSizeOf for EntityType { fn gas_size_of(&self) -> Gas { self.as_str().gas_size_of() diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index af15a82b25d..790e5a80f8e 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -1,5 +1,5 @@ use crate::{ - components::store::{EntityKey, EntityType}, + components::store::{EntityDerived, EntityKey, EntityMultiKey, EntityType}, data::value::Word, prelude::{q, BigDecimal, BigInt, Value}, }; @@ -127,6 +127,23 @@ impl CacheWeight for EntityKey { } } +impl CacheWeight for EntityDerived { + fn indirect_weight(&self) -> usize { + self.entity_id.indirect_weight() + + self.entity_type.indirect_weight() + + self.entity_field.indirect_weight() + } +} + +impl CacheWeight for EntityMultiKey { + fn indirect_weight(&self) -> usize { + match self { + EntityMultiKey::All(derived) => derived.indirect_weight(), + EntityMultiKey::Equal(key) => key.indirect_weight(), + } + } +} + impl CacheWeight for [u8; 32] { fn indirect_weight(&self) -> usize { 0 diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index ded1d7193d6..e514314f509 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -9,7 +9,7 @@ use wasmtime::Trap; use web3::types::H160; use graph::blockchain::Blockchain; -use graph::components::store::EnsLookup; +use graph::components::store::{EnsLookup, EntityDerived}; use graph::components::store::{EntityKey, EntityType}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, @@ -239,6 +239,28 @@ impl HostExports { Ok(result) } + pub(crate) fn store_get_where( + &self, + state: &mut BlockState, + entity_type: String, + entity_field: String, + entity_id: String, + gas: &GasCounter, + ) -> Result, anyhow::Error> { + let store_key = EntityDerived { + entity_type: EntityType::new(entity_type), + entity_id: entity_id.into(), + entity_field: entity_field.into(), + causality_region: self.data_source_causality_region, + }; + self.check_entity_type_access(&store_key.entity_type)?; + + let result = state.entity_cache.get_where(&store_key)?; + gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; + + Ok(result) + } + /// Prints the module of `n` in hex. /// Integers are encoded using the least amount of digits (no leading zero digits). /// Their encoding may be of uneven length. The number zero encodes as "0x0". diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index c7ac94175ac..28dbf3fc124 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -527,6 +527,14 @@ impl WasmInstance { link!("abort", abort, message_ptr, file_name_ptr, line, column); link!("store.get", store_get, "host_export_store_get", entity, id); + link!( + "store.getWhere", + store_get_where, + "host_export_store_get_where", + entity, + field, + id + ); link!( "store.set", store_set, @@ -1059,6 +1067,28 @@ impl WasmInstanceContext { Ok(ret) } + /// function store.getWhere(entity: string, field: string, id: string): Entity[] | null + pub fn store_get_where( + &mut self, + gas: &GasCounter, + entity_ptr: AscPtr, + field_ptr: AscPtr, + id_ptr: AscPtr, + ) -> Result>>, HostExportError> { + let entity_type: String = asc_get(self, entity_ptr, gas)?; + let field: String = asc_get(self, field_ptr, gas)?; + let id: String = asc_get(self, id_ptr, gas)?; + println!("store_get_where: {} {} {}", entity_type, field, id); + let entity_option = self.ctx.host_exports.store_get_where( + &mut self.ctx.state, + entity_type.clone(), + field.clone(), + id.clone(), + gas, + )?; + Ok(AscPtr::null()) + } + /// function typeConversion.bytesToString(bytes: Bytes): string pub fn bytes_to_string( &mut self, diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index d40e48c68ef..7545a1d0891 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -6,7 +6,7 @@ use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, + EntityDerived, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; @@ -1119,6 +1119,17 @@ impl DeploymentStore { layout.find_many(&conn, ids_for_type, block) } + pub(crate) fn get_where( + &self, + site: Arc, + key: &EntityDerived, + block: BlockNumber, + ) -> Result, StoreError> { + let conn = self.get_conn()?; + let layout = self.layout(&conn, site)?; + layout.find_where(&conn, key, block) + } + pub(crate) fn get_changes( &self, site: Arc, diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 26cc5ca304b..e9cb5a7ca84 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -49,7 +49,7 @@ use crate::{ FilterQuery, FindManyQuery, FindQuery, InsertQuery, RevertClampQuery, RevertRemoveQuery, }, }; -use graph::components::store::{EntityKey, EntityType}; +use graph::components::store::{EntityDerived, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, DocumentExt, ObjectTypeExt}; use graph::data::schema::{FulltextConfig, FulltextDefinition, Schema, SCHEMA_TYPE_NAME}; use graph::data::store::BYTES_SCALAR; @@ -558,6 +558,17 @@ impl Layout { Ok(entities) } + pub fn find_where( + &self, + conn: &PgConnection, + key: &EntityDerived, + block: BlockNumber, + ) -> Result, StoreError> { + let entities = Vec::new(); + println!("find_where: {:?} {:?}", key, block); + Ok(entities) + } + pub fn find_changes( &self, conn: &PgConnection, diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 62b47b57097..363faf9f5ad 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -5,7 +5,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::ReadStore; -use graph::components::store::{DeploymentCursorTracker, EntityKey}; +use graph::components::store::{DeploymentCursorTracker, EntityDerived, EntityKey}; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -251,6 +251,16 @@ impl SyncStore { }) } + fn get_where( + &self, + key: &EntityDerived, + block: BlockNumber, + ) -> Result, StoreError> { + retry::forever(&self.logger, "get_where", || { + self.writable.get_where(self.site.cheap_clone(), key, block) + }) + } + async fn is_deployment_synced(&self) -> Result { retry::forever_async(&self.logger, "is_deployment_synced", || async { self.writable @@ -746,6 +756,12 @@ impl Queue { Ok(map) } + fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { + let tracker = BlockTracker::new(); + // TODO implement the whole async + self.store.get_where(key, tracker.query_block()) + } + /// Load dynamic data sources by looking at both the queue and the store async fn load_dynamic_data_sources( &self, @@ -904,6 +920,13 @@ impl Writer { } } + fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { + match self { + Writer::Sync(store) => store.get_where(key, BLOCK_NUMBER_MAX), + Writer::Async(queue) => queue.get_where(key), + } + } + async fn load_dynamic_data_sources( &self, manifest_idx_and_name: Vec<(u32, String)>, @@ -993,6 +1016,10 @@ impl ReadStore for WritableStore { self.writer.get_many(keys) } + fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { + self.writer.get_where(key) + } + fn input_schema(&self) -> Arc { self.store.input_schema() } From 54d6167a520638876581cf9bc3648d85bda87ddf Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 00:06:46 -0300 Subject: [PATCH 0081/2104] add asc to array of entity --- graph/src/runtime/mod.rs | 1 + runtime/wasm/src/asc_abi/class.rs | 4 ++++ runtime/wasm/src/module/mod.rs | 9 +++++++-- runtime/wasm/src/to_from/external.rs | 13 +++++++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index 74007b96cef..917f4d85d40 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -260,6 +260,7 @@ pub enum IndexForAscTypeId { Log = 1001, ArrayH256 = 1002, ArrayLog = 1003, + ArrayTypedMapStringStoreValue = 1004, // Continue to add more Ethereum type IDs here. // e.g.: // NextEthereumType = 1004, diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 0fdac204847..95c14b1bfb3 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -609,6 +609,10 @@ impl AscIndexId for AscTypedMap> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::TypedMapStringStoreValue; } +impl AscIndexId for Array> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::ArrayTypedMapStringStoreValue; +} + impl AscIndexId for AscTypedMap> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::TypedMapStringJsonValue; } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 28dbf3fc124..d2c9a268647 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1079,14 +1079,19 @@ impl WasmInstanceContext { let field: String = asc_get(self, field_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; println!("store_get_where: {} {} {}", entity_type, field, id); - let entity_option = self.ctx.host_exports.store_get_where( + let entities = self.ctx.host_exports.store_get_where( &mut self.ctx.state, entity_type.clone(), field.clone(), id.clone(), gas, )?; - Ok(AscPtr::null()) + + let entities: Vec> = entities.iter().map(|entity| entity.clone().sorted()).collect(); + // .map(|name| asc_new(self, &*name, gas)) + // ..collect(); + let ret = asc_new(self, &entities, gas)?; + Ok(ret) } /// function typeConversion.bytesToString(bytes: Bytes): string diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index 69532fbf237..426d224074d 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -333,6 +333,19 @@ impl ToAscObj for Vec<(String, store::Value)> { } } +impl ToAscObj>> for Vec> { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result>, DeterministicHostError> + { + let content: Result, _> = self.iter().map(|x| asc_new(heap, &x, gas)).collect(); + let content = content?; + Array::new(&content, heap, gas) + } +} + impl ToAscObj> for serde_json::Value { fn to_asc_obj( &self, From 2408d81d8c48be02bdcaf46e3dd9b8f3fe3abe96 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 00:13:48 -0300 Subject: [PATCH 0082/2104] store: fix lint --- runtime/wasm/src/module/mod.rs | 5 ++++- runtime/wasm/src/to_from/external.rs | 3 +-- store/postgres/src/writable.rs | 3 +-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index d2c9a268647..93aaf482910 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1087,7 +1087,10 @@ impl WasmInstanceContext { gas, )?; - let entities: Vec> = entities.iter().map(|entity| entity.clone().sorted()).collect(); + let entities: Vec> = entities + .iter() + .map(|entity| entity.clone().sorted()) + .collect(); // .map(|name| asc_new(self, &*name, gas)) // ..collect(); let ret = asc_new(self, &entities, gas)?; diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index 426d224074d..fd8a2e2ad16 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -338,8 +338,7 @@ impl ToAscObj>> for Vec> { &self, heap: &mut H, gas: &GasCounter, - ) -> Result>, DeterministicHostError> - { + ) -> Result>, HostExportError> { let content: Result, _> = self.iter().map(|x| asc_new(heap, &x, gas)).collect(); let content = content?; Array::new(&content, heap, gas) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 363faf9f5ad..758d87657ec 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -4,8 +4,7 @@ use std::sync::Mutex; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::ReadStore; -use graph::components::store::{DeploymentCursorTracker, EntityDerived, EntityKey}; +use graph::components::store::{DeploymentCursorTracker, EntityDerived, EntityKey, ReadStore}; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ From 6ec37dcc09f973c6660f3b743fb8516286bbb368 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 11:51:56 -0300 Subject: [PATCH 0083/2104] store: add FindDerivedQuery --- store/postgres/src/relational.rs | 11 ++++- store/postgres/src/relational_queries.rs | 58 +++++++++++++++++++++++- 2 files changed, 66 insertions(+), 3 deletions(-) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index e9cb5a7ca84..b6cdcfc3674 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -564,8 +564,15 @@ impl Layout { key: &EntityDerived, block: BlockNumber, ) -> Result, StoreError> { - let entities = Vec::new(); - println!("find_where: {:?} {:?}", key, block); + let table = self.table_for_entity(&key.entity_type)?; + let query = FindDerivedQuery::new(table, key, block); + + let mut entities = Vec::new(); + + for data in query.load::(conn)? { + let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; + entities.push(entity_data); + } Ok(entities) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index bb60880935c..a07c654bf8a 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -12,7 +12,7 @@ use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; -use graph::components::store::EntityKey; +use graph::components::store::{EntityKey, EntityDerived}; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -1668,6 +1668,62 @@ impl<'a> LoadQuery for FindManyQuery<'a> { impl<'a, Conn> RunQueryDsl for FindManyQuery<'a> {} +/// A query that finds an entity by key. Used during indexing. +/// See also `FindManyQuery`. +#[derive(Debug, Clone, Constructor)] +pub struct FindDerivedQuery<'a> { + table: &'a Table, + key: &'a EntityDerived, + block: BlockNumber, +} + +impl<'a> QueryFragment for FindDerivedQuery<'a> { + fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); + + let EntityDerived { + entity_type: _, + entity_field, + entity_id, + causality_region, + } = self.key; + + // Generate + // select '..' as entity, to_jsonb(e.*) as data + // from schema.table e where id = $1 + out.push_sql("select "); + out.push_bind_param::(&self.table.object.as_str())?; + out.push_sql(" as entity, to_jsonb(e.*) as data\n"); + out.push_sql(" from "); + out.push_sql(self.table.qualified_name.as_str()); + out.push_sql(" e\n where "); + out.push_identifier(entity_field.as_str())?; + out.push_sql(" = "); + out.push_bind_param::(&entity_id.as_str())?; + out.push_sql(" and "); + if self.table.has_causality_region { + out.push_sql("causality_region = "); + out.push_bind_param::(causality_region)?; + out.push_sql(" and "); + } + BlockRangeColumn::new(self.table, "e.", self.block).contains(&mut out) + } +} + +impl<'a> QueryId for FindDerivedQuery<'a> { + type QueryId = (); + + const HAS_STATIC_QUERY_ID: bool = false; +} + +impl<'a> LoadQuery for FindDerivedQuery<'a> { + fn internal_load(self, conn: &PgConnection) -> QueryResult> { + conn.query_by_name(&self) + } +} + +impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} + #[derive(Debug)] pub struct InsertQuery<'a> { table: &'a Table, From a2857ad042e2cc584248ffcf9462f8cfe43ac3ec Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 11:56:18 -0300 Subject: [PATCH 0084/2104] store,runtime,graph: refact where to derived --- graph/src/components/store/entity_cache.rs | 21 +++++++++------------ graph/src/components/store/mod.rs | 5 ++++- graph/src/components/store/traits.rs | 12 +++++++++--- runtime/wasm/src/host_exports.rs | 5 +++-- runtime/wasm/src/module/mod.rs | 14 +++++++------- store/postgres/src/deployment_store.rs | 4 ++-- store/postgres/src/relational.rs | 6 ++++-- store/postgres/src/writable.rs | 16 ++++++++-------- 8 files changed, 46 insertions(+), 37 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 375000d242b..1db87565d47 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -120,21 +120,17 @@ impl EntityCache { let list = list.iter(); list.for_each(|(k, v)| println!("{}: {:?}", k, v)); } - None => println!("No entity found for {:?}", eref), + None => println!("get: No entity found for {:?}", eref), } Ok(entity) } - pub fn get_where( + pub fn get_derived( &mut self, eref: &EntityDerived, ) -> Result, s::QueryExecutionError> { - println!("HELLO WORLD, YOU TRIGERED store.getWhere"); - self.current - .get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; - let entity = self - .current - .get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; + self.current.get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; + let entity = self.current.get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; let entities = match entity { Some(e) => { // retrieve the list from the cache @@ -154,11 +150,11 @@ impl EntityCache { entities } None => { - println!("No entity found for {:?}", eref); + println!("get_derived: No entity found for {:?}", eref); Vec::new() } }; - // self.store.get_where() + // self.store.get_derived() // todo!(); Ok(entities) } @@ -377,7 +373,7 @@ impl LfuCache> { } EntityMultiKey::All(derived) => { // we get all entities with the derived field - let mut entities = store.get_where(derived)?; + let mut entities = store.get_derived(derived)?; // we asssume that derived fields contains ids let entities = entities .iter_mut() @@ -398,7 +394,8 @@ impl LfuCache> { entity.insert(derived.entity_field.to_string(), Value::List(entities)); // insert to cache the list of ids - self.insert(key.clone(), Some(entity.clone())); + // todo remove this comment + // self.insert(key.clone(), Some(entity.clone())); Ok(Some(entity.clone())) } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index f8304fd3f14..6ff6aa0a733 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1163,7 +1163,10 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn get_where(&self, _query: &EntityDerived) -> Result, StoreError> { + fn get_derived( + &self, + _query: &EntityDerived, + ) -> Result, StoreError> { Ok(vec![]) } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index c897dfb1178..b33bd1ce5f2 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -187,7 +187,10 @@ pub trait ReadStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Reverse lookup - fn get_where(&self, entity_derived: &EntityDerived) -> Result, StoreError>; + fn get_derived( + &self, + entity_derived: &EntityDerived + ) -> Result, StoreError>; fn input_schema(&self) -> Arc; } @@ -205,8 +208,11 @@ impl ReadStore for Arc { (**self).get_many(keys) } - fn get_where(&self, entity_derived: &EntityDerived) -> Result, StoreError> { - (**self).get_where(entity_derived) + fn get_derived( + &self, + entity_derived: &EntityDerived + ) -> Result, StoreError> { + (**self).get_derived(entity_derived) } fn input_schema(&self) -> Arc { diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index e514314f509..6bb2f89f9ce 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -239,7 +239,8 @@ impl HostExports { Ok(result) } - pub(crate) fn store_get_where( + + pub(crate) fn store_get_derived( &self, state: &mut BlockState, entity_type: String, @@ -255,7 +256,7 @@ impl HostExports { }; self.check_entity_type_access(&store_key.entity_type)?; - let result = state.entity_cache.get_where(&store_key)?; + let result = state.entity_cache.get_derived(&store_key)?; gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; Ok(result) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 93aaf482910..795070b4ddd 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -528,9 +528,9 @@ impl WasmInstance { link!("store.get", store_get, "host_export_store_get", entity, id); link!( - "store.getWhere", - store_get_where, - "host_export_store_get_where", + "store.getDerived", + store_get_derived, + "host_export_store_get_derived", entity, field, id @@ -1067,8 +1067,8 @@ impl WasmInstanceContext { Ok(ret) } - /// function store.getWhere(entity: string, field: string, id: string): Entity[] | null - pub fn store_get_where( + /// function store.getDerived(entity: string, field: string, id: string): Entity[] + pub fn store_get_derived( &mut self, gas: &GasCounter, entity_ptr: AscPtr, @@ -1078,8 +1078,8 @@ impl WasmInstanceContext { let entity_type: String = asc_get(self, entity_ptr, gas)?; let field: String = asc_get(self, field_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; - println!("store_get_where: {} {} {}", entity_type, field, id); - let entities = self.ctx.host_exports.store_get_where( + println!("store_get_derived: {} {} {}", entity_type, field, id); + let entities = self.ctx.host_exports.store_get_derived( &mut self.ctx.state, entity_type.clone(), field.clone(), diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 7545a1d0891..08471490443 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1119,7 +1119,7 @@ impl DeploymentStore { layout.find_many(&conn, ids_for_type, block) } - pub(crate) fn get_where( + pub(crate) fn get_derived( &self, site: Arc, key: &EntityDerived, @@ -1127,7 +1127,7 @@ impl DeploymentStore { ) -> Result, StoreError> { let conn = self.get_conn()?; let layout = self.layout(&conn, site)?; - layout.find_where(&conn, key, block) + layout.find_derived(&conn, key, block) } pub(crate) fn get_changes( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b6cdcfc3674..4cd5e97cea3 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -41,7 +41,7 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; -use crate::relational_queries::{FindChangesQuery, FindPossibleDeletionsQuery}; +use crate::relational_queries::{FindChangesQuery, FindPossibleDeletionsQuery, FindDerivedQuery}; use crate::{ primary::{Namespace, Site}, relational_queries::{ @@ -558,7 +558,7 @@ impl Layout { Ok(entities) } - pub fn find_where( + pub fn find_derived( &self, conn: &PgConnection, key: &EntityDerived, @@ -731,6 +731,8 @@ impl Layout { query.query_id, &self.site, )?; + println!("{}", debug_query(&query).to_string()); + let query_clone = query.clone(); let start = Instant::now(); diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 758d87657ec..5d587c1cd1f 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -250,7 +250,7 @@ impl SyncStore { }) } - fn get_where( + fn get_derived( &self, key: &EntityDerived, block: BlockNumber, @@ -755,10 +755,10 @@ impl Queue { Ok(map) } - fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { let tracker = BlockTracker::new(); // TODO implement the whole async - self.store.get_where(key, tracker.query_block()) + self.store.get_derived(key, tracker.query_block()) } /// Load dynamic data sources by looking at both the queue and the store @@ -919,10 +919,10 @@ impl Writer { } } - fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_where(key, BLOCK_NUMBER_MAX), - Writer::Async(queue) => queue.get_where(key), + Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX), + Writer::Async(queue) => queue.get_derived(key), } } @@ -1015,8 +1015,8 @@ impl ReadStore for WritableStore { self.writer.get_many(keys) } - fn get_where(&self, key: &EntityDerived) -> Result, StoreError> { - self.writer.get_where(key) + fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { + self.writer.get_derived(key) } fn input_schema(&self) -> Arc { From a374746cb0597ddf917dec5cabf98582e2aba542 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 12:02:31 -0300 Subject: [PATCH 0085/2104] graph: update EntityMultiKey enum --- graph/src/components/store/entity_cache.rs | 36 ++++++++-------------- graph/src/components/store/mod.rs | 9 ++---- graph/src/components/store/traits.rs | 10 ++---- graph/src/runtime/gas/size_of.rs | 4 +-- graph/src/util/cache_weight.rs | 4 +-- runtime/wasm/src/host_exports.rs | 1 - store/postgres/src/relational.rs | 4 +-- store/postgres/src/relational_queries.rs | 2 +- store/postgres/src/writable.rs | 3 +- 9 files changed, 27 insertions(+), 46 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 1db87565d47..29eaea5d616 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -103,7 +103,7 @@ impl EntityCache { // from `handler_updates`. let mut entity = self .current - .get_entity(&*self.store, &EntityMultiKey::Equal(eref.clone()))?; + .get_entity(&*self.store, &EntityMultiKey::Single(eref.clone()))?; // Always test the cache consistency in debug mode. debug_assert!(entity == self.store.get(eref).unwrap()); @@ -114,14 +114,6 @@ impl EntityCache { if let Some(op) = self.handler_updates.get(eref).cloned() { entity = op.apply_to(entity) } - match entity { - Some(ref e) => { - let list = e.clone().sorted(); - let list = list.iter(); - list.for_each(|(k, v)| println!("{}: {:?}", k, v)); - } - None => println!("get: No entity found for {:?}", eref), - } Ok(entity) } @@ -129,8 +121,11 @@ impl EntityCache { &mut self, eref: &EntityDerived, ) -> Result, s::QueryExecutionError> { - self.current.get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; - let entity = self.current.get_entity(&*self.store, &EntityMultiKey::All(eref.clone()))?; + self.current + .get_entity(&*self.store, &EntityMultiKey::Derived(eref.clone()))?; + let entity = self + .current + .get_entity(&*self.store, &EntityMultiKey::Derived(eref.clone()))?; let entities = match entity { Some(e) => { // retrieve the list from the cache @@ -149,13 +144,8 @@ impl EntityCache { } entities } - None => { - println!("get_derived: No entity found for {:?}", eref); - Vec::new() - } + None => Vec::new(), }; - // self.store.get_derived() - // todo!(); Ok(entities) } @@ -271,7 +261,7 @@ impl EntityCache { let missing = self.updates.keys().filter(|key| { !self .current - .contains_key(&EntityMultiKey::Equal((*key).clone())) + .contains_key(&EntityMultiKey::Single((*key).clone())) }); // For immutable types, we assume that the subgraph is well-behaved, @@ -285,13 +275,13 @@ impl EntityCache { for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { self.current - .insert(EntityMultiKey::Equal(entity_key), Some(entity)); + .insert(EntityMultiKey::Single(entity_key), Some(entity)); } let mut mods = Vec::new(); for (entity_key, update) in self.updates { use s::EntityModification::*; - let key = EntityMultiKey::Equal(entity_key.clone()); + let key = EntityMultiKey::Single(entity_key.clone()); let current = self.current.remove(&key).and_then(|entity| entity); let modification = match (current, update) { @@ -362,7 +352,7 @@ impl LfuCache> { ) -> Result, s::QueryExecutionError> { match self.get(key) { None => match key { - EntityMultiKey::Equal(store_key) => { + EntityMultiKey::Single(store_key) => { let mut entity = store.get(store_key)?; if let Some(entity) = &mut entity { // `__typename` is for queries not for mappings. @@ -371,7 +361,7 @@ impl LfuCache> { self.insert(key.clone(), entity.clone()); Ok(entity) } - EntityMultiKey::All(derived) => { + EntityMultiKey::Derived(derived) => { // we get all entities with the derived field let mut entities = store.get_derived(derived)?; // we asssume that derived fields contains ids @@ -383,7 +373,7 @@ impl LfuCache> { // we insert each entity into the cache with the id key let key = EntityKey::from(&entity.id().unwrap().into(), derived); - self.insert(EntityMultiKey::Equal(key), Some(entity.clone())); + self.insert(EntityMultiKey::Single(key), Some(entity.clone())); // return the value to save Value::String(entity.id().unwrap()) }); diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 6ff6aa0a733..7c35cb359ca 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -159,10 +159,10 @@ pub struct EntityDerived { #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum EntityMultiKey { /// A filter that matches all entities of a given type. - All(EntityDerived), + Derived(EntityDerived), /// A filter that matches a specific entity. - Equal(EntityKey), + Single(EntityKey), } impl EntityKey { @@ -1163,10 +1163,7 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn get_derived( - &self, - _query: &EntityDerived, - ) -> Result, StoreError> { + fn get_derived(&self, _query: &EntityDerived) -> Result, StoreError> { Ok(vec![]) } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index b33bd1ce5f2..d100dfaa0d8 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -187,10 +187,7 @@ pub trait ReadStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Reverse lookup - fn get_derived( - &self, - entity_derived: &EntityDerived - ) -> Result, StoreError>; + fn get_derived(&self, entity_derived: &EntityDerived) -> Result, StoreError>; fn input_schema(&self) -> Arc; } @@ -208,10 +205,7 @@ impl ReadStore for Arc { (**self).get_many(keys) } - fn get_derived( - &self, - entity_derived: &EntityDerived - ) -> Result, StoreError> { + fn get_derived(&self, entity_derived: &EntityDerived) -> Result, StoreError> { (**self).get_derived(entity_derived) } diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index 4f1218955af..b6b717c9979 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -179,8 +179,8 @@ impl GasSizeOf for EntityDerived { impl GasSizeOf for EntityMultiKey { fn gas_size_of(&self) -> Gas { match self { - EntityMultiKey::Equal(key) => key.gas_size_of(), - EntityMultiKey::All(key) => key.gas_size_of(), + EntityMultiKey::Single(key) => key.gas_size_of(), + EntityMultiKey::Derived(key) => key.gas_size_of(), } } } diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 790e5a80f8e..6c1feaf9e83 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -138,8 +138,8 @@ impl CacheWeight for EntityDerived { impl CacheWeight for EntityMultiKey { fn indirect_weight(&self) -> usize { match self { - EntityMultiKey::All(derived) => derived.indirect_weight(), - EntityMultiKey::Equal(key) => key.indirect_weight(), + EntityMultiKey::Derived(derived) => derived.indirect_weight(), + EntityMultiKey::Single(key) => key.indirect_weight(), } } } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 6bb2f89f9ce..a22f2395aae 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -239,7 +239,6 @@ impl HostExports { Ok(result) } - pub(crate) fn store_get_derived( &self, state: &mut BlockState, diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 4cd5e97cea3..a572e6a29f6 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -41,7 +41,7 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; -use crate::relational_queries::{FindChangesQuery, FindPossibleDeletionsQuery, FindDerivedQuery}; +use crate::relational_queries::{FindChangesQuery, FindDerivedQuery, FindPossibleDeletionsQuery}; use crate::{ primary::{Namespace, Site}, relational_queries::{ @@ -566,7 +566,7 @@ impl Layout { ) -> Result, StoreError> { let table = self.table_for_entity(&key.entity_type)?; let query = FindDerivedQuery::new(table, key, block); - + let mut entities = Vec::new(); for data in query.load::(conn)? { diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index a07c654bf8a..f7f40fa91c0 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -12,7 +12,7 @@ use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; -use graph::components::store::{EntityKey, EntityDerived}; +use graph::components::store::{EntityDerived, EntityKey}; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{ diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 5d587c1cd1f..b259d460425 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -256,7 +256,8 @@ impl SyncStore { block: BlockNumber, ) -> Result, StoreError> { retry::forever(&self.logger, "get_where", || { - self.writable.get_where(self.site.cheap_clone(), key, block) + self.writable + .get_derived(self.site.cheap_clone(), key, block) }) } From a371de6ebc65d9a88266e354d98df269f0feaf80 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 12:07:26 -0300 Subject: [PATCH 0086/2104] store,runtime: cleanup code --- runtime/wasm/src/module/mod.rs | 7 +------ store/postgres/src/relational.rs | 1 - store/postgres/src/relational_queries.rs | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 795070b4ddd..5a1e9a14c82 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1087,12 +1087,7 @@ impl WasmInstanceContext { gas, )?; - let entities: Vec> = entities - .iter() - .map(|entity| entity.clone().sorted()) - .collect(); - // .map(|name| asc_new(self, &*name, gas)) - // ..collect(); + let entities: Vec> = entities.iter().map(|entity| entity.clone().sorted()).collect(); let ret = asc_new(self, &entities, gas)?; Ok(ret) } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index a572e6a29f6..13c9bb90bcf 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -731,7 +731,6 @@ impl Layout { query.query_id, &self.site, )?; - println!("{}", debug_query(&query).to_string()); let query_clone = query.clone(); diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index f7f40fa91c0..0bd28e6e3e5 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1690,7 +1690,7 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { // Generate // select '..' as entity, to_jsonb(e.*) as data - // from schema.table e where id = $1 + // from schema.table e where field = $1 out.push_sql("select "); out.push_bind_param::(&self.table.object.as_str())?; out.push_sql(" as entity, to_jsonb(e.*) as data\n"); From cc1cdbc58d4bc6539bc01bc28e9e8bd7c8637eea Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 14:13:52 -0300 Subject: [PATCH 0087/2104] graph: fix missing get_derived on mock --- graph/tests/entity_cache.rs | 10 +++++++++- runtime/wasm/src/module/mod.rs | 1 - 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index bbec082ec3b..bbf0df9c13f 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use graph::components::store::{ DeploymentCursorTracker, EntityKey, EntityType, ReadStore, StoredDynamicDataSource, - WritableStore, + WritableStore, EntityDerived, }; use graph::{ components::store::{DeploymentId, DeploymentLocator}, @@ -60,6 +60,14 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } + fn get_derived( + &self, + _key: &EntityDerived, + ) -> Result, StoreError> { + let values: Vec = self.get_many_res.clone().into_iter().map(|(_, v)| v).collect(); + Ok(values) + } + fn input_schema(&self) -> Arc { SCHEMA.clone() } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 5a1e9a14c82..bd6d606ff30 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1078,7 +1078,6 @@ impl WasmInstanceContext { let entity_type: String = asc_get(self, entity_ptr, gas)?; let field: String = asc_get(self, field_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; - println!("store_get_derived: {} {} {}", entity_type, field, id); let entities = self.ctx.host_exports.store_get_derived( &mut self.ctx.state, entity_type.clone(), From 065569d9cc29e2010fd11032e3e01a5fd5e1be85 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 14:16:31 -0300 Subject: [PATCH 0088/2104] all: cargo format --- graph/tests/entity_cache.rs | 16 +++++++++------- runtime/wasm/src/module/mod.rs | 5 ++++- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index bbf0df9c13f..7024db0de5e 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -10,8 +10,8 @@ use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use graph::components::store::{ - DeploymentCursorTracker, EntityKey, EntityType, ReadStore, StoredDynamicDataSource, - WritableStore, EntityDerived, + DeploymentCursorTracker, EntityDerived, EntityKey, EntityType, ReadStore, + StoredDynamicDataSource, WritableStore, }; use graph::{ components::store::{DeploymentId, DeploymentLocator}, @@ -60,11 +60,13 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } - fn get_derived( - &self, - _key: &EntityDerived, - ) -> Result, StoreError> { - let values: Vec = self.get_many_res.clone().into_iter().map(|(_, v)| v).collect(); + fn get_derived(&self, _key: &EntityDerived) -> Result, StoreError> { + let values: Vec = self + .get_many_res + .clone() + .into_iter() + .map(|(_, v)| v) + .collect(); Ok(values) } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index bd6d606ff30..27d6dd69693 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1086,7 +1086,10 @@ impl WasmInstanceContext { gas, )?; - let entities: Vec> = entities.iter().map(|entity| entity.clone().sorted()).collect(); + let entities: Vec> = entities + .iter() + .map(|entity| entity.clone().sorted()) + .collect(); let ret = asc_new(self, &entities, gas)?; Ok(ret) } From 6b85ebd53ba738b1e2395e42d2ee33b278b3e183 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 18:50:24 -0300 Subject: [PATCH 0089/2104] change to load_related, use entitykey in cache --- core/src/subgraph/state.rs | 4 +- graph/src/components/store/entity_cache.rs | 141 +++++++-------------- graph/src/components/store/mod.rs | 14 +- graph/src/components/subgraph/instance.rs | 4 +- graph/src/data/schema.rs | 39 +++++- graph/src/runtime/gas/size_of.rs | 11 +- graph/src/util/cache_weight.rs | 19 +-- runtime/wasm/src/host_exports.rs | 6 +- runtime/wasm/src/module/mod.rs | 24 ++-- 9 files changed, 104 insertions(+), 158 deletions(-) diff --git a/core/src/subgraph/state.rs b/core/src/subgraph/state.rs index 19eab13b2a0..0d5edd84b65 100644 --- a/core/src/subgraph/state.rs +++ b/core/src/subgraph/state.rs @@ -1,5 +1,5 @@ use graph::{ - components::store::EntityMultiKey, + components::store::EntityKey, prelude::Entity, util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}, }; @@ -18,5 +18,5 @@ pub struct IndexingState { /// - The time THRESHOLD is passed /// - Or the subgraph has triggers for the block pub skip_ptr_updates_timer: Instant, - pub entity_lfu_cache: LfuCache>, + pub entity_lfu_cache: LfuCache>, } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 29eaea5d616..1a84d2ab3ea 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -3,11 +3,11 @@ use std::collections::HashMap; use std::fmt::{self, Debug}; use std::sync::Arc; -use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation, Value}; +use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; use crate::prelude::{Schema, ENV_VARS}; use crate::util::lfu_cache::LfuCache; -use super::{EntityDerived, EntityMultiKey}; +use super::EntityDerived; /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks @@ -19,7 +19,7 @@ use super::{EntityDerived, EntityMultiKey}; pub struct EntityCache { /// The state of entities in the store. An entry of `None` /// means that the entity is not present in the store - current: LfuCache>, + current: LfuCache>, /// The accumulated changes to an entity. updates: HashMap, @@ -47,7 +47,7 @@ impl Debug for EntityCache { pub struct ModificationsAndCache { pub modifications: Vec, - pub entity_lfu_cache: LfuCache>, + pub entity_lfu_cache: LfuCache>, } impl EntityCache { @@ -64,7 +64,7 @@ impl EntityCache { pub fn with_current( store: Arc, - current: LfuCache>, + current: LfuCache>, ) -> EntityCache { EntityCache { current, @@ -101,9 +101,7 @@ impl EntityCache { pub fn get(&mut self, eref: &EntityKey) -> Result, s::QueryExecutionError> { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. - let mut entity = self - .current - .get_entity(&*self.store, &EntityMultiKey::Single(eref.clone()))?; + let mut entity = self.current.get_entity(&*self.store, eref)?; // Always test the cache consistency in debug mode. debug_assert!(entity == self.store.get(eref).unwrap()); @@ -117,35 +115,24 @@ impl EntityCache { Ok(entity) } - pub fn get_derived( - &mut self, - eref: &EntityDerived, - ) -> Result, s::QueryExecutionError> { - self.current - .get_entity(&*self.store, &EntityMultiKey::Derived(eref.clone()))?; - let entity = self - .current - .get_entity(&*self.store, &EntityMultiKey::Derived(eref.clone()))?; - let entities = match entity { - Some(e) => { - // retrieve the list from the cache - let mut entities = Vec::new(); - if let Some(Value::List(list)) = e.get(eref.entity_field.as_str()) { - for id in list.iter() { - // we just created - if let Value::String(id) = id { - let key = EntityKey::from(id, eref); - match self.get(&key) { - Ok(Some(value)) => entities.push(value), - _ => (), - } - } - } - } - entities - } - None => Vec::new(), + pub fn load_related(&mut self, eref: &EntityDerived) -> Result, anyhow::Error> { + let (base_type, field) = self.schema.get_type_for_field(eref)?; + + let key = EntityDerived { + entity_id: eref.entity_id.clone(), + entity_field: field.into(), + entity_type: base_type.into(), + causality_region: eref.causality_region, }; + + let entities = self.store.get_derived(&key)?; + entities + .iter() + .filter(|e| e.contains_key("id")) + .for_each(|e| { + let key = EntityKey::from(&e.id().unwrap().into(), eref); + self.current.insert(key, Some(e.clone())); + }); Ok(entities) } @@ -258,11 +245,10 @@ impl EntityCache { // The first step is to make sure all entities being set are in `self.current`. // For each subgraph, we need a map of entity type to missing entity ids. - let missing = self.updates.keys().filter(|key| { - !self - .current - .contains_key(&EntityMultiKey::Single((*key).clone())) - }); + let missing = self + .updates + .keys() + .filter(|key| !self.current.contains_key(key)); // For immutable types, we assume that the subgraph is well-behaved, // and all updated immutable entities are in fact new, and skip @@ -274,14 +260,12 @@ impl EntityCache { let missing = missing.filter(|key| !self.schema.is_immutable(&key.entity_type)); for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { - self.current - .insert(EntityMultiKey::Single(entity_key), Some(entity)); + self.current.insert(entity_key, Some(entity)); } let mut mods = Vec::new(); - for (entity_key, update) in self.updates { + for (key, update) in self.updates { use s::EntityModification::*; - let key = EntityMultiKey::Single(entity_key.clone()); let current = self.current.remove(&key).and_then(|entity| entity); let modification = match (current, update) { @@ -291,10 +275,7 @@ impl EntityCache { let mut data = Entity::new(); data.merge_remove_null_fields(updates); self.current.insert(key.clone(), Some(data.clone())); - Some(Insert { - key: entity_key, - data, - }) + Some(Insert { key, data }) } // Entity may have been changed (Some(current), EntityOp::Update(updates)) => { @@ -302,10 +283,7 @@ impl EntityCache { data.merge_remove_null_fields(updates); self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { - key: entity_key, - data, - }) + Some(Overwrite { key, data }) } else { None } @@ -314,10 +292,7 @@ impl EntityCache { (Some(current), EntityOp::Overwrite(data)) => { self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { - key: entity_key, - data, - }) + Some(Overwrite { key, data }) } else { None } @@ -325,7 +300,7 @@ impl EntityCache { // Existing entity was deleted (Some(_), EntityOp::Remove) => { self.current.insert(key.clone(), None); - Some(Remove { key: entity_key }) + Some(Remove { key }) } // Entity was deleted, but it doesn't exist in the store (None, EntityOp::Remove) => None, @@ -343,53 +318,23 @@ impl EntityCache { } } -impl LfuCache> { +impl LfuCache> { // Helper for cached lookup of an entity. fn get_entity( &mut self, store: &(impl s::ReadStore + ?Sized), - key: &EntityMultiKey, + key: &EntityKey, ) -> Result, s::QueryExecutionError> { match self.get(key) { - None => match key { - EntityMultiKey::Single(store_key) => { - let mut entity = store.get(store_key)?; - if let Some(entity) = &mut entity { - // `__typename` is for queries not for mappings. - entity.remove("__typename"); - } - self.insert(key.clone(), entity.clone()); - Ok(entity) - } - EntityMultiKey::Derived(derived) => { - // we get all entities with the derived field - let mut entities = store.get_derived(derived)?; - // we asssume that derived fields contains ids - let entities = entities - .iter_mut() - .filter(|entity| entity.contains_key("id")) - .map(|entity| { - entity.remove("__typename"); - - // we insert each entity into the cache with the id key - let key = EntityKey::from(&entity.id().unwrap().into(), derived); - self.insert(EntityMultiKey::Single(key), Some(entity.clone())); - // return the value to save - Value::String(entity.id().unwrap()) - }); - let entities = entities.collect(); - - // create entity with the list of ids - let mut entity = Entity::new(); - entity.insert(derived.entity_field.to_string(), Value::List(entities)); - - // insert to cache the list of ids - // todo remove this comment - // self.insert(key.clone(), Some(entity.clone())); - - Ok(Some(entity.clone())) + None => { + let mut entity = store.get(key)?; + if let Some(entity) = &mut entity { + // `__typename` is for queries not for mappings. + entity.remove("__typename"); } - }, + self.insert(key.clone(), entity.clone()); + Ok(entity) + } Some(data) => Ok(data.clone()), } } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 7c35cb359ca..776592868e3 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -142,12 +142,11 @@ pub struct EntityKey { pub struct EntityDerived { /// Name of the entity type. pub entity_type: EntityType, - - pub entity_field: Word, - /// ID of the individual entity. pub entity_id: Word, + pub entity_field: Word, + /// This is the causality region of the data source that created the entity. /// /// In the case of an entity lookup, this is the causality region of the data source that is @@ -156,15 +155,6 @@ pub struct EntityDerived { pub causality_region: CausalityRegion, } -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum EntityMultiKey { - /// A filter that matches all entities of a given type. - Derived(EntityDerived), - - /// A filter that matches a specific entity. - Single(EntityKey), -} - impl EntityKey { // For use in tests only #[cfg(debug_assertions)] diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index b7f2a1efe19..f3df2c672e4 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -1,6 +1,6 @@ use crate::{ blockchain::Blockchain, - components::store::{EntityMultiKey, ReadStore, StoredDynamicDataSource}, + components::store::{EntityKey, ReadStore, StoredDynamicDataSource}, data::subgraph::schema::SubgraphError, data_source::DataSourceTemplate, prelude::*, @@ -35,7 +35,7 @@ pub struct BlockState { } impl BlockState { - pub fn new(store: impl ReadStore, lfu_cache: LfuCache>) -> Self { + pub fn new(store: impl ReadStore, lfu_cache: LfuCache>) -> Self { BlockState { entity_cache: EntityCache::with_current(Arc::new(store), lfu_cache), deterministic_errors: Vec::new(), diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 899ef70fc93..216e7952229 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,5 +1,5 @@ use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, EntityType}; +use crate::components::store::{EntityDerived, EntityKey, EntityType, SubgraphStore}; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::{self, ValueType}; @@ -539,6 +539,43 @@ impl Schema { } } + pub fn get_type_for_field(&self, key: &EntityDerived) -> Result<(&str, &str), Error> { + let field = self + .document + .get_object_type_definition(key.entity_type.as_str()) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, + key.entity_id, + key.entity_type, + ) + })? + .field(&key.entity_field) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown field `{}`", + key.entity_type, + key.entity_id, + key.entity_field, + ) + })?; + if field.is_derived() { + let derived_from = field.find_directive("derivedFrom").unwrap(); + let base_type = field.field_type.get_base_type(); + let field = derived_from.argument("field").unwrap(); + + Ok((base_type, field.as_str().unwrap())) + } else { + Err(anyhow!( + "Entity {}[{}]: field `{}` is not derived", + key.entity_type, + key.entity_id, + key.entity_field, + )) + } + } + pub fn is_immutable(&self, entity_type: &EntityType) -> bool { self.immutable_types.contains(entity_type) } diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index b6b717c9979..4466919dd01 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -1,7 +1,7 @@ //! Various implementations of GasSizeOf; use crate::{ - components::store::{EntityDerived, EntityKey, EntityMultiKey, EntityType}, + components::store::{EntityDerived, EntityKey, EntityType}, data::store::{scalar::Bytes, Value}, prelude::{BigDecimal, BigInt}, }; @@ -176,15 +176,6 @@ impl GasSizeOf for EntityDerived { } } -impl GasSizeOf for EntityMultiKey { - fn gas_size_of(&self) -> Gas { - match self { - EntityMultiKey::Single(key) => key.gas_size_of(), - EntityMultiKey::Derived(key) => key.gas_size_of(), - } - } -} - impl GasSizeOf for EntityType { fn gas_size_of(&self) -> Gas { self.as_str().gas_size_of() diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 6c1feaf9e83..af15a82b25d 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -1,5 +1,5 @@ use crate::{ - components::store::{EntityDerived, EntityKey, EntityMultiKey, EntityType}, + components::store::{EntityKey, EntityType}, data::value::Word, prelude::{q, BigDecimal, BigInt, Value}, }; @@ -127,23 +127,6 @@ impl CacheWeight for EntityKey { } } -impl CacheWeight for EntityDerived { - fn indirect_weight(&self) -> usize { - self.entity_id.indirect_weight() - + self.entity_type.indirect_weight() - + self.entity_field.indirect_weight() - } -} - -impl CacheWeight for EntityMultiKey { - fn indirect_weight(&self) -> usize { - match self { - EntityMultiKey::Derived(derived) => derived.indirect_weight(), - EntityMultiKey::Single(key) => key.indirect_weight(), - } - } -} - impl CacheWeight for [u8; 32] { fn indirect_weight(&self) -> usize { 0 diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index a22f2395aae..0f695ff742e 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -239,12 +239,12 @@ impl HostExports { Ok(result) } - pub(crate) fn store_get_derived( + pub(crate) fn store_load_related( &self, state: &mut BlockState, entity_type: String, - entity_field: String, entity_id: String, + entity_field: String, gas: &GasCounter, ) -> Result, anyhow::Error> { let store_key = EntityDerived { @@ -255,7 +255,7 @@ impl HostExports { }; self.check_entity_type_access(&store_key.entity_type)?; - let result = state.entity_cache.get_derived(&store_key)?; + let result = state.entity_cache.load_related(&store_key)?; gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; Ok(result) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 27d6dd69693..b8e6a0cbc36 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -528,12 +528,12 @@ impl WasmInstance { link!("store.get", store_get, "host_export_store_get", entity, id); link!( - "store.getDerived", - store_get_derived, + "store.loadRelated", + store_load_related, "host_export_store_get_derived", entity, - field, - id + id, + field ); link!( "store.set", @@ -1067,22 +1067,22 @@ impl WasmInstanceContext { Ok(ret) } - /// function store.getDerived(entity: string, field: string, id: string): Entity[] - pub fn store_get_derived( + /// function store.loadRelated(entity_type: string, id: string, field: string): Array + pub fn store_load_related( &mut self, gas: &GasCounter, - entity_ptr: AscPtr, - field_ptr: AscPtr, + entity_type_ptr: AscPtr, id_ptr: AscPtr, + field_ptr: AscPtr, ) -> Result>>, HostExportError> { - let entity_type: String = asc_get(self, entity_ptr, gas)?; - let field: String = asc_get(self, field_ptr, gas)?; + let entity_type: String = asc_get(self, entity_type_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; - let entities = self.ctx.host_exports.store_get_derived( + let field: String = asc_get(self, field_ptr, gas)?; + let entities = self.ctx.host_exports.store_load_related( &mut self.ctx.state, entity_type.clone(), - field.clone(), id.clone(), + field.clone(), gas, )?; From 34c8f0ee021c6b230b749bae9f9b5d9f6f04eec1 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 18:55:30 -0300 Subject: [PATCH 0090/2104] graph: convert string to entity type --- graph/src/components/store/entity_cache.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 1a84d2ab3ea..a5ccd3381b3 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -7,7 +7,7 @@ use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOpe use crate::prelude::{Schema, ENV_VARS}; use crate::util::lfu_cache::LfuCache; -use super::EntityDerived; +use super::{EntityDerived, EntityType}; /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks @@ -121,7 +121,7 @@ impl EntityCache { let key = EntityDerived { entity_id: eref.entity_id.clone(), entity_field: field.into(), - entity_type: base_type.into(), + entity_type: EntityType::new(base_type.to_string()), causality_region: eref.causality_region, }; From 0c33d3fe3189bf262681ab054346ddaabe49812b Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 8 Mar 2023 19:15:00 -0300 Subject: [PATCH 0091/2104] graph,graphql,runtime,store: refact structs --- graph/src/components/store/entity_cache.rs | 13 +++++++---- graph/src/components/store/mod.rs | 27 ++++++++++++++++++---- graph/src/components/store/traits.rs | 4 ++-- graph/src/data/schema.rs | 4 ++-- graph/src/runtime/gas/size_of.rs | 4 ++-- graph/tests/entity_cache.rs | 4 ++-- runtime/wasm/src/host_exports.rs | 4 ++-- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational.rs | 4 ++-- store/postgres/src/relational_queries.rs | 8 +++---- store/postgres/src/writable.rs | 10 ++++---- 11 files changed, 52 insertions(+), 32 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index a5ccd3381b3..fa06205983f 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -7,7 +7,7 @@ use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOpe use crate::prelude::{Schema, ENV_VARS}; use crate::util::lfu_cache::LfuCache; -use super::{EntityDerived, EntityType}; +use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest}; /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks @@ -115,13 +115,16 @@ impl EntityCache { Ok(entity) } - pub fn load_related(&mut self, eref: &EntityDerived) -> Result, anyhow::Error> { + pub fn load_related( + &mut self, + eref: &LoadRelatedRequest, + ) -> Result, anyhow::Error> { let (base_type, field) = self.schema.get_type_for_field(eref)?; - let key = EntityDerived { - entity_id: eref.entity_id.clone(), - entity_field: field.into(), + let key = DerivedEntityQuery { entity_type: EntityType::new(base_type.to_string()), + entity_field: field.into(), + value: eref.entity_id.clone(), causality_region: eref.causality_region, }; diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 776592868e3..33a6d7cdfa8 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -138,14 +138,31 @@ pub struct EntityKey { pub causality_region: CausalityRegion, } -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct EntityDerived { +#[derive(Debug, Clone)] +pub struct LoadRelatedRequest { /// Name of the entity type. pub entity_type: EntityType, /// ID of the individual entity. pub entity_id: Word, + /// Field the shall be loaded + pub entity_field: Word, + + /// This is the causality region of the data source that created the entity. + /// + /// In the case of an entity lookup, this is the causality region of the data source that is + /// doing the lookup. So if the entity exists but was created on a different causality region, + /// the lookup will return empty. + pub causality_region: CausalityRegion, +} +#[derive(Debug)] +pub struct DerivedEntityQuery { + /// Name of the entity to search + pub entity_type: EntityType, + /// The field to check pub entity_field: Word, + /// The value to compare against + pub value: Word, /// This is the causality region of the data source that created the entity. /// @@ -166,8 +183,8 @@ impl EntityKey { } } - pub fn from(id: &String, entity_derived: &EntityDerived) -> Self { - let clone = entity_derived.clone(); + pub fn from(id: &String, load_related_request: &LoadRelatedRequest) -> Self { + let clone = load_related_request.clone(); Self { entity_id: id.clone().into(), entity_type: clone.entity_type, @@ -1153,7 +1170,7 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn get_derived(&self, _query: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, _query: &DerivedEntityQuery) -> Result, StoreError> { Ok(vec![]) } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index d100dfaa0d8..094be6fa373 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -187,7 +187,7 @@ pub trait ReadStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Reverse lookup - fn get_derived(&self, entity_derived: &EntityDerived) -> Result, StoreError>; + fn get_derived(&self, entity_derived: &DerivedEntityQuery) -> Result, StoreError>; fn input_schema(&self) -> Arc; } @@ -205,7 +205,7 @@ impl ReadStore for Arc { (**self).get_many(keys) } - fn get_derived(&self, entity_derived: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, entity_derived: &DerivedEntityQuery) -> Result, StoreError> { (**self).get_derived(entity_derived) } diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 216e7952229..3d3ef328d9f 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,5 +1,5 @@ use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityDerived, EntityKey, EntityType, SubgraphStore}; +use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest, SubgraphStore}; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::{self, ValueType}; @@ -539,7 +539,7 @@ impl Schema { } } - pub fn get_type_for_field(&self, key: &EntityDerived) -> Result<(&str, &str), Error> { + pub fn get_type_for_field(&self, key: &LoadRelatedRequest) -> Result<(&str, &str), Error> { let field = self .document .get_object_type_definition(key.entity_type.as_str()) diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index 4466919dd01..8f4e535a1fd 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -1,7 +1,7 @@ //! Various implementations of GasSizeOf; use crate::{ - components::store::{EntityDerived, EntityKey, EntityType}, + components::store::{EntityKey, EntityType, LoadRelatedRequest}, data::store::{scalar::Bytes, Value}, prelude::{BigDecimal, BigInt}, }; @@ -168,7 +168,7 @@ impl GasSizeOf for EntityKey { } } -impl GasSizeOf for EntityDerived { +impl GasSizeOf for LoadRelatedRequest { fn gas_size_of(&self) -> Gas { self.entity_type.gas_size_of() + self.entity_id.gas_size_of() diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index 7024db0de5e..b3941f608c1 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -10,7 +10,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use graph::components::store::{ - DeploymentCursorTracker, EntityDerived, EntityKey, EntityType, ReadStore, + DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, ReadStore, StoredDynamicDataSource, WritableStore, }; use graph::{ @@ -60,7 +60,7 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } - fn get_derived(&self, _key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, _key: &DerivedEntityQuery) -> Result, StoreError> { let values: Vec = self .get_many_res .clone() diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 0f695ff742e..e943749b332 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -9,7 +9,7 @@ use wasmtime::Trap; use web3::types::H160; use graph::blockchain::Blockchain; -use graph::components::store::{EnsLookup, EntityDerived}; +use graph::components::store::{EnsLookup, LoadRelatedRequest}; use graph::components::store::{EntityKey, EntityType}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, @@ -247,7 +247,7 @@ impl HostExports { entity_field: String, gas: &GasCounter, ) -> Result, anyhow::Error> { - let store_key = EntityDerived { + let store_key = LoadRelatedRequest { entity_type: EntityType::new(entity_type), entity_id: entity_id.into(), entity_field: entity_field.into(), diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 08471490443..1d1c82bfaa8 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1122,7 +1122,7 @@ impl DeploymentStore { pub(crate) fn get_derived( &self, site: Arc, - key: &EntityDerived, + key: &DerivedEntityQuery, block: BlockNumber, ) -> Result, StoreError> { let conn = self.get_conn()?; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 13c9bb90bcf..3e3ed41ff59 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -49,7 +49,7 @@ use crate::{ FilterQuery, FindManyQuery, FindQuery, InsertQuery, RevertClampQuery, RevertRemoveQuery, }, }; -use graph::components::store::{EntityDerived, EntityKey, EntityType}; +use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, DocumentExt, ObjectTypeExt}; use graph::data::schema::{FulltextConfig, FulltextDefinition, Schema, SCHEMA_TYPE_NAME}; use graph::data::store::BYTES_SCALAR; @@ -561,7 +561,7 @@ impl Layout { pub fn find_derived( &self, conn: &PgConnection, - key: &EntityDerived, + key: &DerivedEntityQuery, block: BlockNumber, ) -> Result, StoreError> { let table = self.table_for_entity(&key.entity_type)?; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 0bd28e6e3e5..e7f891d8fd8 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -12,7 +12,7 @@ use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; -use graph::components::store::{EntityDerived, EntityKey}; +use graph::components::store::{DerivedEntityQuery, EntityKey}; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -1673,7 +1673,7 @@ impl<'a, Conn> RunQueryDsl for FindManyQuery<'a> {} #[derive(Debug, Clone, Constructor)] pub struct FindDerivedQuery<'a> { table: &'a Table, - key: &'a EntityDerived, + key: &'a DerivedEntityQuery, block: BlockNumber, } @@ -1681,10 +1681,10 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { out.unsafe_to_cache_prepared(); - let EntityDerived { + let DerivedEntityQuery { entity_type: _, entity_field, - entity_id, + value: entity_id, causality_region, } = self.key; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index b259d460425..d8b434be54c 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -4,7 +4,7 @@ use std::sync::Mutex; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::{DeploymentCursorTracker, EntityDerived, EntityKey, ReadStore}; +use graph::components::store::{DeploymentCursorTracker, DerivedEntityQuery, EntityKey, ReadStore}; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -252,7 +252,7 @@ impl SyncStore { fn get_derived( &self, - key: &EntityDerived, + key: &DerivedEntityQuery, block: BlockNumber, ) -> Result, StoreError> { retry::forever(&self.logger, "get_where", || { @@ -756,7 +756,7 @@ impl Queue { Ok(map) } - fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { let tracker = BlockTracker::new(); // TODO implement the whole async self.store.get_derived(key, tracker.query_block()) @@ -920,7 +920,7 @@ impl Writer { } } - fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { match self { Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX), Writer::Async(queue) => queue.get_derived(key), @@ -1016,7 +1016,7 @@ impl ReadStore for WritableStore { self.writer.get_many(keys) } - fn get_derived(&self, key: &EntityDerived) -> Result, StoreError> { + fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { self.writer.get_derived(key) } From 2b2762f5c1b0c416a87519c6319667a7893ca05d Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 14 Mar 2023 19:22:15 -0300 Subject: [PATCH 0092/2104] graph, store: add get_derived for queue --- graph/src/components/store/entity_cache.rs | 11 ++-- graph/src/components/store/mod.rs | 7 ++- graph/src/components/store/traits.rs | 10 +++- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational.rs | 13 +++-- store/postgres/src/writable.rs | 63 +++++++++++++++++++--- 6 files changed, 83 insertions(+), 23 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index fa06205983f..cd114f6a16c 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -129,13 +129,10 @@ impl EntityCache { }; let entities = self.store.get_derived(&key)?; - entities - .iter() - .filter(|e| e.contains_key("id")) - .for_each(|e| { - let key = EntityKey::from(&e.id().unwrap().into(), eref); - self.current.insert(key, Some(e.clone())); - }); + entities.iter().for_each(|(key, e)| { + self.current.insert(key.clone(), Some(e.clone())); + }); + let entities: Vec = entities.values().cloned().collect(); Ok(entities) } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 33a6d7cdfa8..c18352e4315 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1170,8 +1170,11 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn get_derived(&self, _query: &DerivedEntityQuery) -> Result, StoreError> { - Ok(vec![]) + fn get_derived( + &self, + _query: &DerivedEntityQuery, + ) -> Result, StoreError> { + Ok(BTreeMap::new()) } fn input_schema(&self) -> Arc { diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 094be6fa373..71a3dfd20d2 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -187,7 +187,10 @@ pub trait ReadStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Reverse lookup - fn get_derived(&self, entity_derived: &DerivedEntityQuery) -> Result, StoreError>; + fn get_derived( + &self, + entity_derived: &DerivedEntityQuery, + ) -> Result, StoreError>; fn input_schema(&self) -> Arc; } @@ -205,7 +208,10 @@ impl ReadStore for Arc { (**self).get_many(keys) } - fn get_derived(&self, entity_derived: &DerivedEntityQuery) -> Result, StoreError> { + fn get_derived( + &self, + entity_derived: &DerivedEntityQuery, + ) -> Result, StoreError> { (**self).get_derived(entity_derived) } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 1d1c82bfaa8..798a84e00fe 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1124,7 +1124,7 @@ impl DeploymentStore { site: Arc, key: &DerivedEntityQuery, block: BlockNumber, - ) -> Result, StoreError> { + ) -> Result, StoreError> { let conn = self.get_conn()?; let layout = self.layout(&conn, site)?; layout.find_derived(&conn, key, block) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 3e3ed41ff59..2edbaaeacfc 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -563,15 +563,22 @@ impl Layout { conn: &PgConnection, key: &DerivedEntityQuery, block: BlockNumber, - ) -> Result, StoreError> { + ) -> Result, StoreError> { let table = self.table_for_entity(&key.entity_type)?; let query = FindDerivedQuery::new(table, key, block); - let mut entities = Vec::new(); + let mut entities = BTreeMap::new(); for data in query.load::(conn)? { + let entity_type = data.entity_type(); let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; - entities.push(entity_data); + let key = EntityKey { + entity_type, + entity_id: entity_data.id()?.into(), + causality_region: CausalityRegion::from_entity(&entity_data), + }; + + entities.insert(key, entity_data); } Ok(entities) } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index d8b434be54c..5134a139a55 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -254,8 +254,8 @@ impl SyncStore { &self, key: &DerivedEntityQuery, block: BlockNumber, - ) -> Result, StoreError> { - retry::forever(&self.logger, "get_where", || { + ) -> Result, StoreError> { + self.retry("get_derived", || { self.writable .get_derived(self.site.cheap_clone(), key, block) }) @@ -756,10 +756,51 @@ impl Queue { Ok(map) } - fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { - let tracker = BlockTracker::new(); - // TODO implement the whole async - self.store.get_derived(key, tracker.query_block()) + fn get_derived( + &self, + key_derived: &DerivedEntityQuery, + ) -> Result, StoreError> { + let mut tracker = BlockTracker::new(); + + // Get entities from entries in the queue + let (mut entities_in_queue, entities_removed) = self.queue.fold( + (BTreeMap::new(), Vec::new()), + |(mut map, mut remove_list): (BTreeMap, Vec), req| { + tracker.update(req.as_ref()); + match req.as_ref() { + Request::Write { + block_ptr, mods, .. + } => { + if tracker.visible(block_ptr) { + for emod in mods { + let key = emod.entity_ref(); + // The key must be removed to avoid overwriting it with a stale value. + if key_derived.entity_type == key.entity_type { + match emod.entity() { + Some(entity) => { + map.insert(key.clone(), entity.clone()); + } + None => { + remove_list.push(key.clone()); + } + } + } + } + } + } + Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } + } + (map, remove_list) + }, + ); + let mut items_from_database = self.store.get_derived(key_derived, tracker.query_block())?; + // Remove any entities that were removed in the queue + items_from_database.retain(|key, _item| !entities_removed.contains(key)); + + // Extend the store results with the entities from the queue. + entities_in_queue.extend(items_from_database); + + Ok(entities_in_queue) } /// Load dynamic data sources by looking at both the queue and the store @@ -920,7 +961,10 @@ impl Writer { } } - fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { + fn get_derived( + &self, + key: &DerivedEntityQuery, + ) -> Result, StoreError> { match self { Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX), Writer::Async(queue) => queue.get_derived(key), @@ -1016,7 +1060,10 @@ impl ReadStore for WritableStore { self.writer.get_many(keys) } - fn get_derived(&self, key: &DerivedEntityQuery) -> Result, StoreError> { + fn get_derived( + &self, + key: &DerivedEntityQuery, + ) -> Result, StoreError> { self.writer.get_derived(key) } From 27d75aa65678e36041cbec69ec5a2ef4dac8e8e0 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 14 Mar 2023 19:36:09 -0300 Subject: [PATCH 0093/2104] store: fix problem where database overwrite queue --- store/postgres/src/writable.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 5134a139a55..88be64c23b5 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -793,14 +793,16 @@ impl Queue { (map, remove_list) }, ); + // We should filter this in the future to only get the entities that are needed let mut items_from_database = self.store.get_derived(key_derived, tracker.query_block())?; // Remove any entities that were removed in the queue items_from_database.retain(|key, _item| !entities_removed.contains(key)); // Extend the store results with the entities from the queue. - entities_in_queue.extend(items_from_database); + // This overwrites any entitiy from the database with the same key from queue + items_from_database.extend(entities_in_queue); - Ok(entities_in_queue) + Ok(items_from_database) } /// Load dynamic data sources by looking at both the queue and the store From 874637ebf66a68d3002da90efc49579e136051eb Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 14 Mar 2023 19:37:40 -0300 Subject: [PATCH 0094/2104] graph: fix mockstore --- graph/tests/entity_cache.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index b3941f608c1..c844a3a000d 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -60,14 +60,11 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } - fn get_derived(&self, _key: &DerivedEntityQuery) -> Result, StoreError> { - let values: Vec = self - .get_many_res - .clone() - .into_iter() - .map(|(_, v)| v) - .collect(); - Ok(values) + fn get_derived( + &self, + _key: &DerivedEntityQuery, + ) -> Result, StoreError> { + Ok(self.get_many_res.clone()) } fn input_schema(&self) -> Arc { From 4eb67eb4da8b241519e1d63902204c8649e31e91 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 14 Mar 2023 19:41:56 -0300 Subject: [PATCH 0095/2104] store: remove unnecessary mut --- store/postgres/src/writable.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 88be64c23b5..7ebbbd772ad 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -763,7 +763,7 @@ impl Queue { let mut tracker = BlockTracker::new(); // Get entities from entries in the queue - let (mut entities_in_queue, entities_removed) = self.queue.fold( + let (entities_in_queue, entities_removed) = self.queue.fold( (BTreeMap::new(), Vec::new()), |(mut map, mut remove_list): (BTreeMap, Vec), req| { tracker.update(req.as_ref()); From c57a845626d430989a4ceb000bac51b2a04b09be Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 15 Mar 2023 11:55:43 -0300 Subject: [PATCH 0096/2104] graph,store: add database optimization rename variables to be more meaningful --- graph/src/components/store/entity_cache.rs | 4 +-- graph/src/components/store/traits.rs | 2 +- store/postgres/src/deployment_store.rs | 5 +-- store/postgres/src/relational.rs | 7 ++-- store/postgres/src/relational_queries.rs | 18 ++++++++-- store/postgres/src/writable.rs | 41 +++++++++++----------- 6 files changed, 47 insertions(+), 30 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index cd114f6a16c..a6ed5ec9f6d 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -121,14 +121,14 @@ impl EntityCache { ) -> Result, anyhow::Error> { let (base_type, field) = self.schema.get_type_for_field(eref)?; - let key = DerivedEntityQuery { + let query = DerivedEntityQuery { entity_type: EntityType::new(base_type.to_string()), entity_field: field.into(), value: eref.entity_id.clone(), causality_region: eref.causality_region, }; - let entities = self.store.get_derived(&key)?; + let entities = self.store.get_derived(&query)?; entities.iter().for_each(|(key, e)| { self.current.insert(key.clone(), Some(e.clone())); }); diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 71a3dfd20d2..dc376b7f65b 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -189,7 +189,7 @@ pub trait ReadStore: Send + Sync + 'static { /// Reverse lookup fn get_derived( &self, - entity_derived: &DerivedEntityQuery, + query_derived: &DerivedEntityQuery, ) -> Result, StoreError>; fn input_schema(&self) -> Arc; diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 798a84e00fe..844dbe5dc61 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1122,12 +1122,13 @@ impl DeploymentStore { pub(crate) fn get_derived( &self, site: Arc, - key: &DerivedEntityQuery, + derived_query: &DerivedEntityQuery, block: BlockNumber, + excluded_keys: &Option>, ) -> Result, StoreError> { let conn = self.get_conn()?; let layout = self.layout(&conn, site)?; - layout.find_derived(&conn, key, block) + layout.find_derived(&conn, derived_query, block, excluded_keys) } pub(crate) fn get_changes( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 2edbaaeacfc..6d6f9c9c66b 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -561,11 +561,12 @@ impl Layout { pub fn find_derived( &self, conn: &PgConnection, - key: &DerivedEntityQuery, + derived_query: &DerivedEntityQuery, block: BlockNumber, + excluded_keys: &Option>, ) -> Result, StoreError> { - let table = self.table_for_entity(&key.entity_type)?; - let query = FindDerivedQuery::new(table, key, block); + let table = self.table_for_entity(&derived_query.entity_type)?; + let query = FindDerivedQuery::new(table, derived_query, block, excluded_keys); let mut entities = BTreeMap::new(); diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index e7f891d8fd8..00758db5c66 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1673,8 +1673,9 @@ impl<'a, Conn> RunQueryDsl for FindManyQuery<'a> {} #[derive(Debug, Clone, Constructor)] pub struct FindDerivedQuery<'a> { table: &'a Table, - key: &'a DerivedEntityQuery, + derived_query: &'a DerivedEntityQuery, block: BlockNumber, + excluded_keys: &'a Option>, } impl<'a> QueryFragment for FindDerivedQuery<'a> { @@ -1686,7 +1687,7 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { entity_field, value: entity_id, causality_region, - } = self.key; + } = self.derived_query; // Generate // select '..' as entity, to_jsonb(e.*) as data @@ -1697,6 +1698,19 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); + + if let Some(keys) = self.excluded_keys { + let primary_key = self.table.primary_key(); + out.push_identifier(primary_key.name.as_str())?; + out.push_sql(" not in ("); + for (i, value) in keys.iter().enumerate() { + if i > 0 { + out.push_sql(", "); + } + out.push_bind_param::(&value.entity_id.as_str())?; + } + out.push_sql(") and "); + } out.push_identifier(entity_field.as_str())?; out.push_sql(" = "); out.push_bind_param::(&entity_id.as_str())?; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 7ebbbd772ad..97bca3f6e09 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -254,10 +254,11 @@ impl SyncStore { &self, key: &DerivedEntityQuery, block: BlockNumber, + excluded_keys: Option>, ) -> Result, StoreError> { self.retry("get_derived", || { self.writable - .get_derived(self.site.cheap_clone(), key, block) + .get_derived(self.site.cheap_clone(), key, block, &excluded_keys) }) } @@ -763,9 +764,9 @@ impl Queue { let mut tracker = BlockTracker::new(); // Get entities from entries in the queue - let (entities_in_queue, entities_removed) = self.queue.fold( - (BTreeMap::new(), Vec::new()), - |(mut map, mut remove_list): (BTreeMap, Vec), req| { + let entities_in_queue = self.queue.fold( + BTreeMap::new(), + |mut map: BTreeMap>, req| { tracker.update(req.as_ref()); match req.as_ref() { Request::Write { @@ -774,33 +775,33 @@ impl Queue { if tracker.visible(block_ptr) { for emod in mods { let key = emod.entity_ref(); - // The key must be removed to avoid overwriting it with a stale value. + // we only select only the entities that match the query if key_derived.entity_type == key.entity_type { - match emod.entity() { - Some(entity) => { - map.insert(key.clone(), entity.clone()); - } - None => { - remove_list.push(key.clone()); - } - } + map.insert(key.clone(), emod.entity().cloned()); } } } } Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } } - (map, remove_list) + map }, ); - // We should filter this in the future to only get the entities that are needed - let mut items_from_database = self.store.get_derived(key_derived, tracker.query_block())?; - // Remove any entities that were removed in the queue - items_from_database.retain(|key, _item| !entities_removed.contains(key)); + + let excluded_keys: Vec = entities_in_queue.keys().cloned().collect(); + + // We filter to exclude the entities ids that we already have from the queue + let mut items_from_database = + self.store + .get_derived(key_derived, tracker.query_block(), Some(excluded_keys))?; // Extend the store results with the entities from the queue. // This overwrites any entitiy from the database with the same key from queue - items_from_database.extend(entities_in_queue); + let items_from_queue: BTreeMap = entities_in_queue + .into_iter() + .filter_map(|(key, entity)| entity.map(|entity| (key, entity))) + .collect(); + items_from_database.extend(items_from_queue); Ok(items_from_database) } @@ -968,7 +969,7 @@ impl Writer { key: &DerivedEntityQuery, ) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX), + Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, None), Writer::Async(queue) => queue.get_derived(key), } } From 7e462090e4905fd5b088043d61db1a8ffd7b132f Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Wed, 15 Mar 2023 16:55:02 -0300 Subject: [PATCH 0097/2104] store: check for length in excluded_keys --- store/postgres/src/relational_queries.rs | 2 +- store/postgres/src/writable.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 00758db5c66..facadb6907d 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1699,7 +1699,7 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); - if let Some(keys) = self.excluded_keys { + if let Some(keys) = self.excluded_keys.as_ref().filter(|keys| keys.len() > 0) { let primary_key = self.table.primary_key(); out.push_identifier(primary_key.name.as_str())?; out.push_sql(" not in ("); diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 97bca3f6e09..2ed8fe59973 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -759,7 +759,7 @@ impl Queue { fn get_derived( &self, - key_derived: &DerivedEntityQuery, + derived_query: &DerivedEntityQuery, ) -> Result, StoreError> { let mut tracker = BlockTracker::new(); @@ -775,8 +775,8 @@ impl Queue { if tracker.visible(block_ptr) { for emod in mods { let key = emod.entity_ref(); - // we only select only the entities that match the query - if key_derived.entity_type == key.entity_type { + // we select just the entities that match the query + if derived_query.entity_type == key.entity_type { map.insert(key.clone(), emod.entity().cloned()); } } @@ -793,7 +793,7 @@ impl Queue { // We filter to exclude the entities ids that we already have from the queue let mut items_from_database = self.store - .get_derived(key_derived, tracker.query_block(), Some(excluded_keys))?; + .get_derived(derived_query, tracker.query_block(), Some(excluded_keys))?; // Extend the store results with the entities from the queue. // This overwrites any entitiy from the database with the same key from queue From 846eed5f42b10e81dc905de063e3c1992dabd5f9 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Thu, 16 Mar 2023 23:55:18 -0300 Subject: [PATCH 0098/2104] graph,runtime,store: fix requested changes --- graph/src/components/store/entity_cache.rs | 4 +- graph/src/data/schema.rs | 44 ++++++++++++++++++++-- runtime/wasm/src/module/mod.rs | 8 ++-- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational.rs | 2 +- store/postgres/src/relational_queries.rs | 6 +-- store/postgres/src/writable.rs | 10 +++-- 7 files changed, 56 insertions(+), 20 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index a6ed5ec9f6d..af618bb8aad 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -119,11 +119,11 @@ impl EntityCache { &mut self, eref: &LoadRelatedRequest, ) -> Result, anyhow::Error> { - let (base_type, field) = self.schema.get_type_for_field(eref)?; + let (base_type, field) = self.schema.get_field_related(eref)?; let query = DerivedEntityQuery { entity_type: EntityType::new(base_type.to_string()), - entity_field: field.into(), + entity_field: field.name.clone().into(), value: eref.entity_id.clone(), causality_region: eref.causality_region, }; diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 3d3ef328d9f..311671a6d48 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -539,7 +539,22 @@ impl Schema { } } - pub fn get_type_for_field(&self, key: &LoadRelatedRequest) -> Result<(&str, &str), Error> { + /// Returns the field that has the relationship with the key requested + /// This works as a reverse search for the Field related to the query + /// + /// example: + /// + /// type Account @entity { + /// wallets: [Wallet!]! @derivedFrom("account") + /// } + /// type Wallet { + /// account: Account! + /// balance: Int! + /// } + /// + /// When asked to load the related entities from "Account" in the field "wallets" + /// This function will return the type "Wallet" with the field "account" + pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &Field), Error> { let field = self .document .get_object_type_definition(key.entity_type.as_str()) @@ -561,11 +576,32 @@ impl Schema { ) })?; if field.is_derived() { - let derived_from = field.find_directive("derivedFrom").unwrap(); + let derived_from = field.find_directive("derivedfrom").unwrap(); let base_type = field.field_type.get_base_type(); - let field = derived_from.argument("field").unwrap(); + let field_name = derived_from.argument("field").unwrap(); + + let field = self + .document + .get_object_type_definition(base_type) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, + key.entity_id, + key.entity_type, + ) + })? + .field(field_name.as_str().unwrap()) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown field `{}`", + key.entity_type, + key.entity_id, + key.entity_field, + ) + })?; - Ok((base_type, field.as_str().unwrap())) + Ok((base_type, field)) } else { Err(anyhow!( "Entity {}[{}]: field `{}` is not derived", diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index b8e6a0cbc36..94eda62d807 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -530,7 +530,7 @@ impl WasmInstance { link!( "store.loadRelated", store_load_related, - "host_export_store_get_derived", + "host_export_store_load_related", entity, id, field @@ -1086,10 +1086,8 @@ impl WasmInstanceContext { gas, )?; - let entities: Vec> = entities - .iter() - .map(|entity| entity.clone().sorted()) - .collect(); + let entities: Vec> = + entities.into_iter().map(|entity| entity.sorted()).collect(); let ret = asc_new(self, &entities, gas)?; Ok(ret) } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 844dbe5dc61..724f4a24bdc 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1124,7 +1124,7 @@ impl DeploymentStore { site: Arc, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Option>, + excluded_keys: &Vec, ) -> Result, StoreError> { let conn = self.get_conn()?; let layout = self.layout(&conn, site)?; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 6d6f9c9c66b..4e578db1ec1 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -563,7 +563,7 @@ impl Layout { conn: &PgConnection, derived_query: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: &Option>, + excluded_keys: &Vec, ) -> Result, StoreError> { let table = self.table_for_entity(&derived_query.entity_type)?; let query = FindDerivedQuery::new(table, derived_query, block, excluded_keys); diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index facadb6907d..35b86278d5d 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1675,7 +1675,7 @@ pub struct FindDerivedQuery<'a> { table: &'a Table, derived_query: &'a DerivedEntityQuery, block: BlockNumber, - excluded_keys: &'a Option>, + excluded_keys: &'a Vec, } impl<'a> QueryFragment for FindDerivedQuery<'a> { @@ -1699,11 +1699,11 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); - if let Some(keys) = self.excluded_keys.as_ref().filter(|keys| keys.len() > 0) { + if self.excluded_keys.len() > 0 { let primary_key = self.table.primary_key(); out.push_identifier(primary_key.name.as_str())?; out.push_sql(" not in ("); - for (i, value) in keys.iter().enumerate() { + for (i, value) in self.excluded_keys.iter().enumerate() { if i > 0 { out.push_sql(", "); } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 2ed8fe59973..f95157ee3b7 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -254,7 +254,7 @@ impl SyncStore { &self, key: &DerivedEntityQuery, block: BlockNumber, - excluded_keys: Option>, + excluded_keys: Vec, ) -> Result, StoreError> { self.retry("get_derived", || { self.writable @@ -776,7 +776,9 @@ impl Queue { for emod in mods { let key = emod.entity_ref(); // we select just the entities that match the query - if derived_query.entity_type == key.entity_type { + if derived_query.entity_type == key.entity_type + && derived_query.value == key.entity_id + { map.insert(key.clone(), emod.entity().cloned()); } } @@ -793,7 +795,7 @@ impl Queue { // We filter to exclude the entities ids that we already have from the queue let mut items_from_database = self.store - .get_derived(derived_query, tracker.query_block(), Some(excluded_keys))?; + .get_derived(derived_query, tracker.query_block(), excluded_keys)?; // Extend the store results with the entities from the queue. // This overwrites any entitiy from the database with the same key from queue @@ -969,7 +971,7 @@ impl Writer { key: &DerivedEntityQuery, ) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, None), + Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, vec![]), Writer::Async(queue) => queue.get_derived(key), } } From de4a3ea99802d02e67befa41a2c19147e6eef787 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Fri, 17 Mar 2023 19:32:01 -0300 Subject: [PATCH 0099/2104] graph: fix case-sensitive mistake --- graph/src/data/schema.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 311671a6d48..a34f4c4f574 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -545,7 +545,7 @@ impl Schema { /// example: /// /// type Account @entity { - /// wallets: [Wallet!]! @derivedFrom("account") + /// wallets: [Wallet!]! @derivedFrom(field: "account") /// } /// type Wallet { /// account: Account! @@ -576,7 +576,7 @@ impl Schema { ) })?; if field.is_derived() { - let derived_from = field.find_directive("derivedfrom").unwrap(); + let derived_from = field.find_directive("derivedFrom").unwrap(); let base_type = field.field_type.get_base_type(); let field_name = derived_from.argument("field").unwrap(); From 54f9965e5593dd68551f88a4b7627379621806cc Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 21 Mar 2023 12:06:57 -0300 Subject: [PATCH 0100/2104] graph,store: add tests for load_related --- Cargo.lock | 3 + graph/Cargo.toml | 4 + graph/tests/entity_cache.rs | 417 ++++++++++++++++++++++++++++++++- store/postgres/src/writable.rs | 21 +- 4 files changed, 430 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9952a8ca8e0..2eccc35d7cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1544,8 +1544,11 @@ dependencies = [ "ethabi", "futures 0.1.31", "futures 0.3.16", + "graph-chain-ethereum", + "graph-store-postgres", "graphql-parser", "hex", + "hex-literal", "http", "isatty", "itertools", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index c71c64c94f0..9686948ea27 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -66,8 +66,12 @@ web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-pat serde_plain = "1.0.1" [dev-dependencies] +test-store = { path = "../store/test-store" } +graph-store-postgres = { path = "../store/postgres" } +graph-chain-ethereum = { path = "../chain/ethereum" } clap = { version = "3.2.23", features = ["derive", "env"] } maplit = "1.0.2" +hex-literal = "0.3" [build-dependencies] tonic-build = { workspace = true } diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index c844a3a000d..38a490a2622 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -1,22 +1,28 @@ use async_trait::async_trait; use graph::blockchain::block_stream::FirehoseCursor; -use graph::blockchain::BlockPtr; -use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; -use graph::data_source::CausalityRegion; -use graph::prelude::{Schema, StopwatchMetrics, StoreError, UnfailOutcome}; -use lazy_static::lazy_static; -use slog::Logger; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; - use graph::components::store::{ - DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, ReadStore, - StoredDynamicDataSource, WritableStore, + DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, LoadRelatedRequest, + ReadStore, StoredDynamicDataSource, WritableStore, }; +use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; +use graph::data_source::CausalityRegion; +use graph::prelude::*; use graph::{ components::store::{DeploymentId, DeploymentLocator}, prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, }; +use hex_literal::hex; + +use graph::semver::Version; +use lazy_static::lazy_static; +use slog::Logger; +use std::collections::{BTreeMap, BTreeSet}; +use std::marker::PhantomData; +use std::sync::Arc; +use web3::types::H256; + +use graph_store_postgres::SubgraphStore as DieselSubgraphStore; +use test_store::*; lazy_static! { static ref SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("entity_cache").unwrap(); @@ -358,3 +364,392 @@ fn consecutive_modifications() { },]) ); } + +const ACCOUNT_GQL: &str = " + type Account @entity { + id: ID! + name: String! + email: String! + age: Int! + wallets: [Wallet!]! @derivedFrom(field: \"account\") + } + + type Wallet @entity { + id: ID! + balance: Int! + account: Account! + } +"; + +const ACCOUNT: &str = "Account"; +const WALLET: &str = "Wallet"; + +lazy_static! { + static ref LOAD_RELATED_ID_STRING: String = String::from("loadrelatedsubgraph"); + static ref LOAD_RELATED_ID: DeploymentHash = + DeploymentHash::new(LOAD_RELATED_ID_STRING.as_str()).unwrap(); + static ref LOAD_RELATED_SUBGRAPH: Schema = + Schema::parse(ACCOUNT_GQL, LOAD_RELATED_ID.clone()).expect("Failed to parse user schema"); + static ref TEST_BLOCK_1_PTR: BlockPtr = ( + H256::from(hex!( + "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13" + )), + 1u64 + ) + .into(); +} + +fn remove_test_data(store: Arc) { + store + .delete_all_entities_for_test_use_only() + .expect("deleting test entities succeeds"); +} + +fn run_store_test(test: F) +where + F: FnOnce( + EntityCache, + Arc, + DeploymentLocator, + Arc, + ) -> R + + Send + + 'static, + R: std::future::Future + Send + 'static, +{ + run_test_sequentially(|store| async move { + let subgraph_store = store.subgraph_store(); + // Reset state before starting + remove_test_data(subgraph_store.clone()); + + // Seed database with test data + let deployment = insert_test_data(subgraph_store.clone()).await; + let writable = store + .subgraph_store() + .writable(LOGGER.clone(), deployment.id) + .await + .expect("we can get a writable store"); + + // we send the information to the database + writable.flush().await.unwrap(); + + let read_store = Arc::new(writable.clone()); + + let cache = EntityCache::new(read_store); + // Run test and wait for the background writer to finish its work so + // it won't conflict with the next test + test(cache, subgraph_store.clone(), deployment, writable.clone()).await; + writable.flush().await.unwrap(); + }); +} + +async fn insert_test_data(store: Arc) -> DeploymentLocator { + let manifest = SubgraphManifest:: { + id: LOAD_RELATED_ID.clone(), + spec_version: Version::new(1, 0, 0), + features: Default::default(), + description: None, + repository: None, + schema: LOAD_RELATED_SUBGRAPH.clone(), + data_sources: vec![], + graft: None, + templates: vec![], + chain: PhantomData, + }; + + // Create SubgraphDeploymentEntity + let deployment = DeploymentCreate::new(String::new(), &manifest, None); + let name = SubgraphName::new("test/store").unwrap(); + let node_id = NodeId::new("test").unwrap(); + let deployment = store + .create_subgraph_deployment( + name, + &LOAD_RELATED_SUBGRAPH, + deployment, + node_id, + NETWORK_NAME.to_string(), + SubgraphVersionSwitchingMode::Instant, + ) + .unwrap(); + + // 1 account 3 wallets + let test_entity_1 = create_account_entity("1", "Johnton", "tonofjohn@email.com", 67_i32); + let wallet_entity_1 = create_wallet_operation("1", "1", 67_i32); + let wallet_entity_2 = create_wallet_operation("2", "1", 92_i32); + let wallet_entity_3 = create_wallet_operation("3", "1", 192_i32); + // 1 account 1 wallet + let test_entity_2 = create_account_entity("2", "Cindini", "dinici@email.com", 42_i32); + let wallet_entity_4 = create_wallet_operation("4", "2", 32_i32); + // 1 account 0 wallets + let test_entity_3 = create_account_entity("3", "Shaqueeena", "queensha@email.com", 28_i32); + transact_entity_operations( + &store, + &deployment, + GENESIS_PTR.clone(), + vec![ + test_entity_1, + test_entity_2, + test_entity_3, + wallet_entity_1, + wallet_entity_2, + wallet_entity_3, + wallet_entity_4, + ], + ) + .await + .unwrap(); + deployment +} + +fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityOperation { + let mut test_entity = Entity::new(); + + test_entity.insert("id".to_owned(), Value::String(id.to_owned())); + test_entity.insert("name".to_owned(), Value::String(name.to_owned())); + test_entity.insert("email".to_owned(), Value::String(email.to_owned())); + test_entity.insert("age".to_owned(), Value::Int(age)); + + EntityOperation::Set { + key: EntityKey::data(ACCOUNT.to_owned(), id.to_owned()), + data: test_entity, + } +} + +fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { + let mut test_wallet = Entity::new(); + + test_wallet.insert("id".to_owned(), Value::String(id.to_owned())); + test_wallet.insert("account".to_owned(), Value::String(account_id.to_owned())); + test_wallet.insert("balance".to_owned(), Value::Int(balance)); + test_wallet +} +fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { + let test_wallet = create_wallet_entity(id, account_id, balance); + EntityOperation::Set { + key: EntityKey::data(WALLET.to_owned(), id.to_owned()), + data: test_wallet, + } +} + +#[test] +fn check_for_account_with_multiple_wallets() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + let account_id = "1"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_1 = create_wallet_entity("1", account_id, 67_i32); + let wallet_2 = create_wallet_entity("2", account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_account_with_single_wallet() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + let account_id = "2"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_1 = create_wallet_entity("4", account_id, 32_i32); + let expeted_vec = vec![wallet_1]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_account_with_no_wallet() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + let account_id = "3"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let expeted_vec = vec![]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_account_that_doesnt_exist() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + let account_id = "4"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let expeted_vec = vec![]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_non_existent_field() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + let account_id = "1"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "friends".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap_err(); + let expected = format!( + "Entity {}[{}]: unknown field `{}`", + request.entity_type, request.entity_id, request.entity_field, + ); + + assert_eq!(format!("{}", result), expected); + }); +} + +#[test] +fn check_for_insert_async_store() { + run_store_test(|mut cache, store, deployment, _writable| async move { + let account_id = "2"; + // insert a new wallet + let wallet_entity_5 = create_wallet_operation("5", account_id, 79_i32); + let wallet_entity_6 = create_wallet_operation("6", account_id, 200_i32); + + transact_entity_operations( + &store, + &deployment, + TEST_BLOCK_1_PTR.clone(), + vec![wallet_entity_5, wallet_entity_6], + ) + .await + .unwrap(); + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_1 = create_wallet_entity("4", account_id, 32_i32); + let wallet_2 = create_wallet_entity("5", account_id, 79_i32); + let wallet_3 = create_wallet_entity("6", account_id, 200_i32); + let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; + + assert_eq!(result, expeted_vec); + }); +} +#[test] +fn check_for_insert_async_not_related() { + run_store_test(|mut cache, store, deployment, _writable| async move { + let account_id = "2"; + // insert a new wallet + let wallet_entity_5 = create_wallet_operation("5", account_id, 79_i32); + let wallet_entity_6 = create_wallet_operation("6", account_id, 200_i32); + + transact_entity_operations( + &store, + &deployment, + TEST_BLOCK_1_PTR.clone(), + vec![wallet_entity_5, wallet_entity_6], + ) + .await + .unwrap(); + let account_id = "1"; + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_1 = create_wallet_entity("1", account_id, 67_i32); + let wallet_2 = create_wallet_entity("2", account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_update_async_related() { + run_store_test(|mut cache, store, deployment, writable| async move { + let account_id = "1"; + let entity_key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let wallet_entity_update = create_wallet_operation("1", account_id, 79_i32); + + let new_data = match wallet_entity_update { + EntityOperation::Set { ref data, .. } => data.clone(), + _ => unreachable!(), + }; + assert_ne!(writable.get(&entity_key).unwrap().unwrap(), new_data); + // insert a new wallet + transact_entity_operations( + &store, + &deployment, + TEST_BLOCK_1_PTR.clone(), + vec![wallet_entity_update], + ) + .await + .unwrap(); + + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_2 = create_wallet_entity("2", account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let expeted_vec = vec![new_data, wallet_2, wallet_3]; + + assert_eq!(result, expeted_vec); + }); +} + +#[test] +fn check_for_delete_async_related() { + run_store_test(|mut cache, store, deployment, _writable| async move { + let account_id = "1"; + let del_key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + // delete wallet + transact_entity_operations( + &store, + &deployment, + TEST_BLOCK_1_PTR.clone(), + vec![EntityOperation::Remove { key: del_key }], + ) + .await + .unwrap(); + + let request = LoadRelatedRequest { + entity_type: EntityType::new(ACCOUNT.to_string()), + entity_field: "wallets".into(), + entity_id: account_id.into(), + causality_region: CausalityRegion::ONCHAIN, + }; + let result = cache.load_related(&request).unwrap(); + let wallet_2 = create_wallet_entity("2", account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let expeted_vec = vec![wallet_2, wallet_3]; + + assert_eq!(result, expeted_vec); + }); +} diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index f95157ee3b7..2227a6e7a9c 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -776,10 +776,23 @@ impl Queue { for emod in mods { let key = emod.entity_ref(); // we select just the entities that match the query - if derived_query.entity_type == key.entity_type - && derived_query.value == key.entity_id - { - map.insert(key.clone(), emod.entity().cloned()); + if derived_query.entity_type == key.entity_type { + if let Some(entity) = emod.entity().cloned() { + if let Some(related_id) = + entity.get(derived_query.entity_field.as_str()) + { + // we check only the field agains the value + if related_id.to_string() + == derived_query.value.to_string() + { + map.insert(key.clone(), Some(entity)); + } + } + } else { + // if the entity was deleted, we add here with no checks + // just for removing from the query + map.insert(key.clone(), emod.entity().cloned()); + } } } } From bed588760c6266edd00989dc9705b467b7143485 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Tue, 21 Mar 2023 12:34:48 -0300 Subject: [PATCH 0101/2104] runtime,graph: fix warnings and rebase types runtime: fix rebase problem --- Cargo.lock | 1 + graph/src/data/schema.rs | 2 +- store/postgres/src/deployment_store.rs | 4 ++-- store/postgres/src/writable.rs | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2eccc35d7cd..5f2b4fa8999 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1580,6 +1580,7 @@ dependencies = [ "stable-hash 0.4.2", "strum", "strum_macros", + "test-store", "thiserror", "tiny-keccak 1.5.0", "tokio", diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index a34f4c4f574..dc0f026f77b 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,5 +1,5 @@ use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest, SubgraphStore}; +use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::{self, ValueType}; diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 724f4a24bdc..32e040f95e4 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -6,8 +6,8 @@ use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - EntityDerived, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, - StoredDynamicDataSource, VersionStats, + DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, + PruningStrategy, StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; use graph::data::query::Trace; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 2227a6e7a9c..a5127aa715b 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -256,7 +256,7 @@ impl SyncStore { block: BlockNumber, excluded_keys: Vec, ) -> Result, StoreError> { - self.retry("get_derived", || { + retry::forever(&self.logger, "get_derived", || { self.writable .get_derived(self.site.cheap_clone(), key, block, &excluded_keys) }) From 2096d90c835fcbb2106ec3327d5a35257f723aef Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 11:50:43 -0700 Subject: [PATCH 0102/2104] gaph, store: Rename `PruningStrategy::Copy` to `Rebuild` --- graph/src/components/store/mod.rs | 10 +++++----- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational/prune.rs | 2 +- store/postgres/tests/graft.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index c18352e4315..3b6cfbde91b 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1208,7 +1208,7 @@ pub enum PrunePhase { impl PrunePhase { pub fn strategy(&self) -> PruningStrategy { match self { - PrunePhase::CopyFinal | PrunePhase::CopyNonfinal => PruningStrategy::Copy, + PrunePhase::CopyFinal | PrunePhase::CopyNonfinal => PruningStrategy::Rebuild, PrunePhase::Delete => PruningStrategy::Delete, } } @@ -1247,9 +1247,9 @@ pub trait PruneReporter: Send + 'static { /// Select how pruning should be done #[derive(Clone, Copy, Debug, Display, PartialEq)] pub enum PruningStrategy { - /// Copy the data we want to keep to new tables and swap them out for - /// the existing tables - Copy, + /// Rebuild by copying the data we want to keep to new tables and swap + /// them out for the existing tables + Rebuild, /// Delete unneeded data from the existing tables Delete, } @@ -1357,7 +1357,7 @@ impl PruneRequest { // will remove. let removal_ratio = self.history_pct(stats) * (1.0 - stats.ratio); if removal_ratio >= self.copy_threshold { - Some(PruningStrategy::Copy) + Some(PruningStrategy::Rebuild) } else if removal_ratio >= self.delete_threshold { Some(PruningStrategy::Delete) } else { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 32e040f95e4..a966165f28e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1969,7 +1969,7 @@ impl PruneReporter for OngoingPruneReporter { fn prune_batch(&mut self, _table: &str, rows: usize, phase: PrunePhase, _finished: bool) { match phase.strategy() { - PruningStrategy::Copy => self.rows_copied += rows, + PruningStrategy::Rebuild => self.rows_copied += rows, PruningStrategy::Delete => self.rows_deleted += rows, } } diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 80b06b9af93..b540f555e5b 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -414,7 +414,7 @@ impl Layout { for (table, strat) in &prunable_tables { reporter.start_table(table.name.as_str()); match strat { - PruningStrategy::Copy => { + PruningStrategy::Rebuild => { if recreate_dst_nsp { catalog::recreate_schema(conn, dst_nsp.as_str())?; recreate_dst_nsp = false; diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 5fdb48dd03e..2eae7e7b5b6 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -569,7 +569,7 @@ fn prune() { ); } - for strategy in [PruningStrategy::Copy, PruningStrategy::Delete] { + for strategy in [PruningStrategy::Rebuild, PruningStrategy::Delete] { run_test(move |store, src| async move { store .set_history_blocks(&src, -3, 10) @@ -612,7 +612,7 @@ fn prune() { let mut req = PruneRequest::new(&src, 3, 1, 0, 6)?; // Change the thresholds so that we select the desired strategy match strategy { - PruningStrategy::Copy => { + PruningStrategy::Rebuild => { req.copy_threshold = 0.0; req.delete_threshold = 0.0; } From 017671c22e7423e434c05987c8513ddde57fcff3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 11:56:41 -0700 Subject: [PATCH 0103/2104] all: Rename 'copy_threshold' to 'rebuild_threshold' --- docs/environment-variables.md | 16 +++++++-------- graph/src/components/store/mod.rs | 33 +++++++++++++++--------------- graph/src/env/store.rs | 14 ++++++------- node/src/bin/manager.rs | 12 +++++------ node/src/manager/commands/prune.rs | 6 +++--- store/postgres/tests/graft.rs | 4 ++-- 6 files changed, 43 insertions(+), 42 deletions(-) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 04433d4d0e3..635abc040c5 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -227,14 +227,14 @@ those. 1.1 means that the subgraph will be pruned every time it contains 10% more history (in blocks) than its history limit. The default value is 1.2 and the value must be at least 1.01 -- `GRAPH_STORE_HISTORY_COPY_THRESHOLD`, - `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: when pruning, prune by copying the - entities we will keep to new tables if we estimate that we will remove - more than a factor of `COPY_THRESHOLD` of the deployment's history. If we - estimate to remove a factor between `COPY_THRESHOLD` and - `DELETE_THRESHOLD`, prune by deleting from the existing tables of the +- `GRAPH_STORE_HISTORY_REBUILD_THRESHOLD`, + `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: when pruning, prune by copying + the entities we will keep to new tables if we estimate that we will + remove more than a factor of `REBUILD_THRESHOLD` of the deployment's + history. If we estimate to remove a factor between `REBUILD_THRESHOLD` + and `DELETE_THRESHOLD`, prune by deleting from the existing tables of the deployment. If we estimate to remove less than `DELETE_THRESHOLD` entities, do not change the table. Both settings are floats, and default - to 0.5 for the `COPY_THRESHOLD` and 0.05 for the `DELETE_THRESHOLD`; they - must be between 0 and 1, and `COPY_THRESHOLD` must be bigger than + to 0.5 for the `REBUILD_THRESHOLD` and 0.05 for the `DELETE_THRESHOLD`; + they must be between 0 and 1, and `REBUILD_THRESHOLD` must be bigger than `DELETE_THRESHOLD`. diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 3b6cfbde91b..a1199ac22ae 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1270,12 +1270,12 @@ pub struct PruneRequest { pub final_block: BlockNumber, /// The latest block, i.e., the subgraph head pub latest_block: BlockNumber, - /// Use the copy strategy when removing more than this fraction of - /// history. Initialized from `ENV_VARS.store.copy_threshold`, but can - /// be modified after construction - pub copy_threshold: f64, + /// Use the rebuild strategy when removing more than this fraction of + /// history. Initialized from `ENV_VARS.store.rebuild_threshold`, but + /// can be modified after construction + pub rebuild_threshold: f64, /// Use the delete strategy when removing more than this fraction of - /// history but less than `copy_threshold`. Initialized from + /// history but less than `rebuild_threshold`. Initialized from /// `ENV_VARS.store.delete_threshold`, but can be modified after /// construction pub delete_threshold: f64, @@ -1293,11 +1293,11 @@ impl PruneRequest { first_block: BlockNumber, latest_block: BlockNumber, ) -> Result { - let copy_threshold = ENV_VARS.store.copy_threshold; + let rebuild_threshold = ENV_VARS.store.rebuild_threshold; let delete_threshold = ENV_VARS.store.delete_threshold; - if copy_threshold < 0.0 || copy_threshold > 1.0 { + if rebuild_threshold < 0.0 || rebuild_threshold > 1.0 { return Err(constraint_violation!( - "the copy threshold must be between 0 and 1 but is {copy_threshold}" + "the copy threshold must be between 0 and 1 but is {rebuild_threshold}" )); } if delete_threshold < 0.0 || delete_threshold > 1.0 { @@ -1331,19 +1331,20 @@ impl PruneRequest { earliest_block, final_block, latest_block, - copy_threshold, + rebuild_threshold, delete_threshold, }) } /// Determine what strategy to use for pruning /// - /// We are pruning `history_pct` of the blocks from a table that has a ratio - /// of `version_ratio` entities to versions. If we are removing more than - /// `copy_threshold` percent of the versions, we prune by copying, and if we - /// are removing more than `delete_threshold` percent of the versions, we - /// prune by deleting. If we would remove less than `delete_threshold` - /// percent of the versions, we don't prune. + /// We are pruning `history_pct` of the blocks from a table that has a + /// ratio of `version_ratio` entities to versions. If we are removing + /// more than `rebuild_threshold` percent of the versions, we prune by + /// rebuilding, and if we are removing more than `delete_threshold` + /// percent of the versions, we prune by deleting. If we would remove + /// less than `delete_threshold` percent of the versions, we don't + /// prune. pub fn strategy(&self, stats: &VersionStats) -> Option { // If the deployment doesn't have enough history to cover the reorg // threshold, do not prune @@ -1356,7 +1357,7 @@ impl PruneRequest { // that `history_pct` will tell us how much of that data pruning // will remove. let removal_ratio = self.history_pct(stats) * (1.0 - stats.ratio); - if removal_ratio >= self.copy_threshold { + if removal_ratio >= self.rebuild_threshold { Some(PruningStrategy::Rebuild) } else if removal_ratio >= self.delete_threshold { Some(PruningStrategy::Delete) diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index f89f394bf17..8492b0e1b49 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -85,11 +85,11 @@ pub struct EnvVarsStore { pub batch_target_duration: Duration, /// Prune tables where we will remove at least this fraction of entity - /// versions by copying. Set by `GRAPH_STORE_HISTORY_COPY_THRESHOLD`. - /// The default is 0.5 - pub copy_threshold: f64, + /// versions by rebuilding the table. Set by + /// `GRAPH_STORE_HISTORY_REBUILD_THRESHOLD`. The default is 0.5 + pub rebuild_threshold: f64, /// Prune tables where we will remove at least this fraction of entity - /// versions, but fewer than `copy_threshold`, by deleting. Set by + /// versions, but fewer than `rebuild_threshold`, by deleting. Set by /// `GRAPH_STORE_HISTORY_DELETE_THRESHOLD`. The default is 0.05 pub delete_threshold: f64, /// How much history a subgraph with limited history can accumulate @@ -134,7 +134,7 @@ impl From for EnvVarsStore { connection_idle_timeout: Duration::from_secs(x.connection_idle_timeout_in_secs), write_queue_size: x.write_queue_size, batch_target_duration: Duration::from_secs(x.batch_target_duration_in_secs), - copy_threshold: x.copy_threshold.0, + rebuild_threshold: x.rebuild_threshold.0, delete_threshold: x.delete_threshold.0, history_slack_factor: x.history_slack_factor.0, } @@ -180,8 +180,8 @@ pub struct InnerStore { write_queue_size: usize, #[envconfig(from = "GRAPH_STORE_BATCH_TARGET_DURATION", default = "180")] batch_target_duration_in_secs: u64, - #[envconfig(from = "GRAPH_STORE_HISTORY_COPY_THRESHOLD", default = "0.5")] - copy_threshold: ZeroToOneF64, + #[envconfig(from = "GRAPH_STORE_HISTORY_REBUILD_THRESHOLD", default = "0.5")] + rebuild_threshold: ZeroToOneF64, #[envconfig(from = "GRAPH_STORE_HISTORY_DELETE_THRESHOLD", default = "0.05")] delete_threshold: ZeroToOneF64, #[envconfig(from = "GRAPH_STORE_HISTORY_SLACK_FACTOR", default = "1.2")] diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index b67afff336a..ba9ea30fe41 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -253,12 +253,12 @@ pub enum Command { Prune { /// The deployment to prune (see `help info`) deployment: DeploymentSearch, - /// Prune by copying when removing more than this fraction of - /// history. Defaults to GRAPH_STORE_HISTORY_COPY_THRESHOLD + /// Prune by rebuilding tables when removing more than this fraction + /// of history. Defaults to GRAPH_STORE_HISTORY_REBUILD_THRESHOLD #[clap(long, short)] - copy_threshold: Option, + rebuild_threshold: Option, /// Prune by deleting when removing more than this fraction of - /// history but less than copy_threshold. Defaults to + /// history but less than rebuild_threshold. Defaults to /// GRAPH_STORE_HISTORY_DELETE_THRESHOLD #[clap(long, short)] delete_threshold: Option, @@ -1390,7 +1390,7 @@ async fn main() -> anyhow::Result<()> { Prune { deployment, history, - copy_threshold, + rebuild_threshold, delete_threshold, once, } => { @@ -1400,7 +1400,7 @@ async fn main() -> anyhow::Result<()> { primary_pool, deployment, history, - copy_threshold, + rebuild_threshold, delete_threshold, once, ) diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index 52288dcab09..c169577ee65 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -161,7 +161,7 @@ pub async fn run( primary_pool: ConnectionPool, search: DeploymentSearch, history: usize, - copy_threshold: Option, + rebuild_threshold: Option, delete_threshold: Option, once: bool, ) -> Result<(), anyhow::Error> { @@ -198,8 +198,8 @@ pub async fn run( status.earliest_block_number, latest, )?; - if let Some(copy_threshold) = copy_threshold { - req.copy_threshold = copy_threshold; + if let Some(rebuild_threshold) = rebuild_threshold { + req.rebuild_threshold = rebuild_threshold; } if let Some(delete_threshold) = delete_threshold { req.delete_threshold = delete_threshold; diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index 2eae7e7b5b6..c401afeaa2e 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -613,11 +613,11 @@ fn prune() { // Change the thresholds so that we select the desired strategy match strategy { PruningStrategy::Rebuild => { - req.copy_threshold = 0.0; + req.rebuild_threshold = 0.0; req.delete_threshold = 0.0; } PruningStrategy::Delete => { - req.copy_threshold = 1.0; + req.rebuild_threshold = 1.0; req.delete_threshold = 0.0; } } From fb0aca521519e410f99075e2b5bdb1313bf182af Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 12:24:15 -0700 Subject: [PATCH 0104/2104] store: Improve comments for pruning, rename 'copy' to 'rebuild' internally --- store/postgres/src/relational/prune.rs | 33 +++++++++++++------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index b540f555e5b..2a848cc0c2f 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -345,30 +345,29 @@ impl Layout { /// Remove all data from the underlying deployment that is not needed to /// respond to queries before block `earliest_block`. The `req` is used - /// to determine which strategy should be used for pruning, copy or + /// to determine which strategy should be used for pruning, rebuild or /// delete. /// /// Blocks before `req.final_block` are considered final and it is /// assumed that they will not be modified in any way while pruning is /// running. /// - /// The copy strategy implemented here works well for situations in + /// The rebuild strategy implemented here works well for situations in /// which pruning will remove a large amount of data from the subgraph /// (say, at least 50%) /// - /// The strategy for `prune_by_copying` is to copy all data that is - /// needed to respond to queries at block heights at or after - /// `earliest_block` to a new table and then to replace the existing - /// tables with these new tables atomically in a transaction. Copying - /// happens in two stages that are performed for each table in turn: we - /// first copy data for final blocks without blocking writes, and then - /// copy data for nonfinal blocks. The latter blocks writes by taking a - /// lock on the row for the deployment in `subgraph_deployment` (via - /// `deployment::lock`) The process for switching to the new tables - /// needs to take the naming of various database objects that Postgres - /// creates automatically into account so that they all have the same - /// names as the original objects to ensure that pruning can be done - /// again without risking name clashes. + /// The strategy for rebuilding is to copy all data that is needed to + /// respond to queries at block heights at or after `earliest_block` to + /// a new table and then to replace the existing tables with these new + /// tables atomically in a transaction. Rebuilding happens in two stages + /// that are performed for each table in turn: we first copy data for + /// final blocks without blocking writes, and then copy data for + /// nonfinal blocks. The latter blocks writes by taking an advisory lock + /// on the deployment (via `deployment::lock`) The process for switching + /// to the new tables needs to take the naming of various database + /// objects that Postgres creates automatically into account so that + /// they all have the same names as the original objects to ensure that + /// pruning can be done again without risking name clashes. /// /// The reason this strategy works well when a lot (or even the /// majority) of the data needs to be removed is that in the more @@ -380,8 +379,8 @@ impl Layout { /// tables. But a full vacuum takes an `access exclusive` lock which /// prevents both reads and writes to the table, which means it would /// also block queries to the deployment, often for extended periods of - /// time. The `prune_by_copying` strategy never blocks reads, it only - /// ever blocks writes. + /// time. The rebuild strategy never blocks reads, it only ever blocks + /// writes. pub fn prune( &self, logger: &Logger, From 705db27162ba4394b0a15a9e14a3e5eee6adee74 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 13:15:40 -0700 Subject: [PATCH 0105/2104] docs: Explain how pruning works and how it is configured --- docs/implementation/README.md | 1 + docs/implementation/pruning.md | 82 ++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 docs/implementation/pruning.md diff --git a/docs/implementation/README.md b/docs/implementation/README.md index 441c5f279aa..31d4eb694a6 100644 --- a/docs/implementation/README.md +++ b/docs/implementation/README.md @@ -9,3 +9,4 @@ the code should go into comments. * [Time-travel Queries](./time-travel.md) * [SQL Query Generation](./sql-query-generation.md) * [Adding support for a new chain](./add-chain.md) +* [Pruning](./pruning.md) diff --git a/docs/implementation/pruning.md b/docs/implementation/pruning.md new file mode 100644 index 00000000000..af99a51f660 --- /dev/null +++ b/docs/implementation/pruning.md @@ -0,0 +1,82 @@ +## Pruning deployments + +Pruning is an operation that deletes data from a deployment that is only +needed to respond to queries at block heights before a certain block. In +GraphQL, those are only queries with a constraint `block { number: } }` +or a similar constraint by block hash where `n` is before the block to +which the deployment is pruned. Queries that are run at a block height +greater than that are not affected by pruning, and there is no difference +between running these queries against an unpruned and a pruned deployment. + +Because pruning reduces the amount of data in a deployment, it reduces the +amount of storage needed for that deployment, and is beneficial for both +query performance and indexing speed. Especially compared to the default of +keeping all history for a deployment, it can often reduce the amount of +data for a deployment by a very large amount and speed up queries +considerably. See [caveats](#caveats) below for the downsides. + +The block `b` to which a deployment is pruned is controlled by how many +blocks `history_blocks` of history to retain; `b` is calculated internally +using `history_blocks` and the latest block of the deployment when the +prune operation is performed. When pruning finishes, it updates the +`earliest_block` for the deployment. The `earliest_block` can be retrieved +through the `index-node` status API, and `graph-node` will return an error +for any query that tries to time-travel to a point before +`earliest_block`. The value of `history_blocks` must be greater than +`ETHEREUM_REORG_THRESHOLD` to make sure that reverts can never conflict +with pruning. + +Pruning is started by running `graphman prune`. That command will perform +an initial prune of the deployment and set the subgraph's `history_blocks` +setting which is used to periodically check whether the deployment has +accumulated more history than that. Whenever the deployment does contain +more history than that, the deployment is automatically repruned. If +ongoing pruning is not desired, pass the `--once` flag to `graphman +prune`. Ongoing pruning can be turned off by setting `history_blocks` to a +very large value with the `--history` flag. + +Repruning is performed whenever the deployment has more than +`history_blocks * GRAPH_STORE_HISTORY_SLACK_FACTOR` blocks of history. The +environment variable `GRAPH_STORE_HISTORY_SLACK_FACTOR` therefore controls +how often repruning is performed: with +`GRAPH_STORE_HISTORY_SLACK_FACTOR=1.5` and `history_blocks` set to 10,000, +a reprune will happen every 5,000 blocks. After the initial pruning, a +reprune therefore happens every `history_blocks * (1 - +GRAPH_STORE_HISTORY_SLACK_FACTOR)` blocks. This value should be set high +enough so that repruning occurs relatively infrequently to not cause too +much database work. + +Pruning uses two different strategies for how to remove unneeded data: +rebuilding tables and deleting old entity versions. Deleting old entity +versions is straightforward: this strategy deletes rows from the underlying +tables. Rebuilding tables will copy the data that should be kept from the +existing tables into new tables and then replaces the existing tables with +these much smaller tables. Which strategy to use is determined for each +table individually, and governed by the settings for +`GRAPH_STORE_HISTORY_REBUILD_THRESHOLD` and +`GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: if we estimate that we will remove +more than `REBUILD_THRESHOLD` of the table, the table will be rebuilt. If +we estimate that we will remove a fraction between `REBUILD_THRESHOLD` and +`DELETE_THRESHOLD` of the table, unneeded entity versions will be +deleted. If we estimate to remove less than `DELETE_THRESHOLD`, the table +is not changed at all. With both strategies, operations are broken into +batches that should each take `GRAPH_STORE_BATCH_TARGET_DURATION` seconds +to avoid causing very long-running transactions. + +### Caveats + +Pruning is a user-visible operation and does affect some of the things that +can be done with a deployment: + +* because it removes history, it restricts how far back time-travel queries + can be performed. This will only be an issue for entities that keep + lifetime statistics about some object (e.g., a token) and are used to + produce time series: after pruning, it is only possible to produce a time + series that goes back no more than `history_blocks`. It is very + beneficial though for entities that keep daily or similar statistics + about some object as it removes data that is not needed once the time + period is over, and does not affect how far back time series based on + these objects can be retrieved. +* it restricts how far back a graft can be performed. Because it removes + history, it becomes impossible to graft more than `history_blocks` before + the current deployment head. From 6504d975298a35701d97288c40cea76fc5b7c873 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 11:50:28 -0700 Subject: [PATCH 0106/2104] NEWS.md: Mention pruning in the release notes --- NEWS.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS.md b/NEWS.md index 215de1f71d5..f825a7f16f0 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,6 +2,10 @@ ## Unreleased +- the behavior for `graphman prune` has changed: running just `graphman + prune` will mark the subgraph for ongoing pruning in addition to + performing an initial pruning. To avoid ongoing pruning, use `graphman + prune --once` ([docs](./docs/implementation/pruning.md)) - the materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) ### Fixes From 3c3ad9309ae894d9f86e8ea14b9e3a141545f46e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 16:13:03 -0700 Subject: [PATCH 0107/2104] docs: Improve pruning doc based on Adam's review comments --- docs/implementation/pruning.md | 47 +++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/docs/implementation/pruning.md b/docs/implementation/pruning.md index af99a51f660..4faf66f4e31 100644 --- a/docs/implementation/pruning.md +++ b/docs/implementation/pruning.md @@ -1,12 +1,15 @@ ## Pruning deployments -Pruning is an operation that deletes data from a deployment that is only -needed to respond to queries at block heights before a certain block. In -GraphQL, those are only queries with a constraint `block { number: } }` -or a similar constraint by block hash where `n` is before the block to -which the deployment is pruned. Queries that are run at a block height -greater than that are not affected by pruning, and there is no difference -between running these queries against an unpruned and a pruned deployment. +Subgraphs, by default, store a full version history for entities, allowing +consumers to query the subgraph as of any historical block. Pruning is an +operation that deletes entity versions from a deployment older than a +certain block, so it is no longer possible to query the deployment as of +prior blocks. In GraphQL, those are only queries with a constraint `block { +number: } }` or a similar constraint by block hash where `n` is before +the block to which the deployment is pruned. Queries that are run at a +block height greater than that are not affected by pruning, and there is no +difference between running these queries against an unpruned and a pruned +deployment. Because pruning reduces the amount of data in a deployment, it reduces the amount of storage needed for that deployment, and is beneficial for both @@ -54,14 +57,28 @@ existing tables into new tables and then replaces the existing tables with these much smaller tables. Which strategy to use is determined for each table individually, and governed by the settings for `GRAPH_STORE_HISTORY_REBUILD_THRESHOLD` and -`GRAPH_STORE_HISTORY_DELETE_THRESHOLD`: if we estimate that we will remove -more than `REBUILD_THRESHOLD` of the table, the table will be rebuilt. If -we estimate that we will remove a fraction between `REBUILD_THRESHOLD` and -`DELETE_THRESHOLD` of the table, unneeded entity versions will be -deleted. If we estimate to remove less than `DELETE_THRESHOLD`, the table -is not changed at all. With both strategies, operations are broken into -batches that should each take `GRAPH_STORE_BATCH_TARGET_DURATION` seconds -to avoid causing very long-running transactions. +`GRAPH_STORE_HISTORY_DELETE_THRESHOLD`, both numbers between 0 and 1: if we +estimate that we will remove more than `REBUILD_THRESHOLD` of the table, +the table will be rebuilt. If we estimate that we will remove a fraction +between `REBUILD_THRESHOLD` and `DELETE_THRESHOLD` of the table, unneeded +entity versions will be deleted. If we estimate to remove less than +`DELETE_THRESHOLD`, the table is not changed at all. With both strategies, +operations are broken into batches that should each take +`GRAPH_STORE_BATCH_TARGET_DURATION` seconds to avoid causing very +long-running transactions. + +Pruning, in most cases, runs in parallel with indexing and does not block +it. When the rebuild strategy is used, pruning does block indexing while it +copies non-final entities from the existing table to the new table. + +The initial prune started by `graphman prune` prints a progress report on +the console. For the ongoing prune runs that are periodically performed, +the following information is logged: a message `Start pruning historical +entities` which includes the earliest and latest block, a message `Analyzed +N tables`, and a message `Finished pruning entities` with details about how +much was deleted or copied and how long that took. Pruning analyzes tables, +if that seems necessary, because its estimates of how much of a table is +likely not needed are based on Postgres statistics. ### Caveats From 1280949ebba215e707c45f7e3f1b0802994285fa Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 28 Mar 2023 16:21:43 -0700 Subject: [PATCH 0108/2104] store: Use the right logger for pruning We need to use the logger that adds information about the subgraph --- store/postgres/src/deployment_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index a966165f28e..ab8956c7a75 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1290,10 +1290,10 @@ impl DeploymentStore { site: Arc, req: PruneRequest, ) -> Result<(), StoreError> { - let logger = logger.cheap_clone(); - retry::forever_async(&logger, "prune", move || { + let logger2 = logger.cheap_clone(); + retry::forever_async(&logger2, "prune", move || { let store = store.cheap_clone(); - let reporter = OngoingPruneReporter::new(store.logger.cheap_clone()); + let reporter = OngoingPruneReporter::new(logger.cheap_clone()); let site = site.cheap_clone(); async move { store.prune(reporter, site, req).await.map(|_| ()) } }) From 5fa50f987187530cd551aad3c4496350c5832081 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Thu, 30 Mar 2023 17:37:12 +0100 Subject: [PATCH 0109/2104] chain/ethereum: RuntimeAdapter lazy EthAdapter (#4508) --- chain/ethereum/src/network.rs | 4 ++++ chain/ethereum/src/runtime/runtime_adapter.rs | 12 +++++++----- node/src/chain.rs | 5 ++++- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 77e4dab13b9..747a55e8886 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -193,6 +193,10 @@ impl EthereumNetworks { } } + pub fn insert_empty(&mut self, name: String) { + self.networks.entry(name).or_default(); + } + pub fn insert( &mut self, name: String, diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 71e20532ed3..3a0c7f7e62a 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -43,15 +43,17 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { fn host_fns(&self, ds: &DataSource) -> Result, Error> { let abis = ds.mapping.abis.clone(); let call_cache = self.call_cache.cheap_clone(); - // Ethereum calls should prioritise call-only adapters if one is available. - let eth_adapter = self.eth_adapters.call_or_cheapest(Some(&NodeCapabilities { - archive: ds.mapping.requires_archive()?, - traces: false, - }))?; + let eth_adapters = self.eth_adapters.cheap_clone(); + let archive = ds.mapping.requires_archive()?; let ethereum_call = HostFn { name: "ethereum.call", func: Arc::new(move |ctx, wasm_ptr| { + // Ethereum calls should prioritise call-only adapters if one is available. + let eth_adapter = eth_adapters.call_or_cheapest(Some(&NodeCapabilities { + archive, + traces: false, + }))?; ethereum_call(ð_adapter, call_cache.cheap_clone(), ctx, wasm_ptr, &abis) .map(|ptr| ptr.wasm_ptr()) }), diff --git a/node/src/chain.rs b/node/src/chain.rs index 6a7a0be8ab2..961a929bfb3 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -438,7 +438,10 @@ pub async fn create_ethereum_networks_for_chain( let (web3, call_only) = match &provider.details { ProviderDetails::Web3Call(web3) => (web3, true), ProviderDetails::Web3(web3) => (web3, false), - _ => continue, + _ => { + parsed_networks.insert_empty(network_name.to_string()); + continue; + } }; let capabilities = web3.node_capabilities(); From bd4f6fb28f64ec2020a8026b1080e5cb41b2a845 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 10:32:21 -0700 Subject: [PATCH 0110/2104] graph, store: Move the entity_cache test to test-store --- Cargo.lock | 3 --- graph/Cargo.toml | 3 --- graph/tests/README.md | 5 +++++ store/test-store/tests/graph.rs | 3 +++ .../tests => store/test-store/tests/graph}/entity_cache.rs | 1 - 5 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 graph/tests/README.md create mode 100644 store/test-store/tests/graph.rs rename {graph/tests => store/test-store/tests/graph}/entity_cache.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 5f2b4fa8999..c909ae5a88b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1544,8 +1544,6 @@ dependencies = [ "ethabi", "futures 0.1.31", "futures 0.3.16", - "graph-chain-ethereum", - "graph-store-postgres", "graphql-parser", "hex", "hex-literal", @@ -1580,7 +1578,6 @@ dependencies = [ "stable-hash 0.4.2", "strum", "strum_macros", - "test-store", "thiserror", "tiny-keccak 1.5.0", "tokio", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 9686948ea27..39aba9d540f 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -66,9 +66,6 @@ web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-pat serde_plain = "1.0.1" [dev-dependencies] -test-store = { path = "../store/test-store" } -graph-store-postgres = { path = "../store/postgres" } -graph-chain-ethereum = { path = "../chain/ethereum" } clap = { version = "3.2.23", features = ["derive", "env"] } maplit = "1.0.2" hex-literal = "0.3" diff --git a/graph/tests/README.md b/graph/tests/README.md new file mode 100644 index 00000000000..ff99b410d4b --- /dev/null +++ b/graph/tests/README.md @@ -0,0 +1,5 @@ +Put integration tests for this crate into `store/test-store/tests/graph`. +This avoids cyclic dev-dependencies which make rust-analyzer nearly +unusable. Once [this +issue](https://github.com/rust-lang/rust-analyzer/issues/14167) has been +fixed, we can move tests back here diff --git a/store/test-store/tests/graph.rs b/store/test-store/tests/graph.rs new file mode 100644 index 00000000000..6c8d2915540 --- /dev/null +++ b/store/test-store/tests/graph.rs @@ -0,0 +1,3 @@ +pub mod graph { + pub mod entity_cache; +} diff --git a/graph/tests/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs similarity index 99% rename from graph/tests/entity_cache.rs rename to store/test-store/tests/graph/entity_cache.rs index 38a490a2622..d0fc7dd3b26 100644 --- a/graph/tests/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, LoadRelatedRequest, From e1e943d4d9553afe4fcf56a3de942b6e9ab6c34c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 10:52:47 -0700 Subject: [PATCH 0111/2104] chain, store: Move ethereum tests to test-store --- Cargo.lock | 1 - chain/ethereum/Cargo.toml | 1 - chain/ethereum/tests/README.md | 5 +++++ store/test-store/tests/chain.rs | 5 +++++ .../tests/chain/ethereum}/full-text.graphql | 0 .../chain/ethereum}/ipfs-on-ethereum-contracts.ts | 0 .../chain/ethereum}/ipfs-on-ethereum-contracts.wasm | Bin .../test-store/tests/chain/ethereum}/manifest.rs | 2 +- 8 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 chain/ethereum/tests/README.md create mode 100644 store/test-store/tests/chain.rs rename {chain/ethereum/tests => store/test-store/tests/chain/ethereum}/full-text.graphql (100%) rename {chain/ethereum/tests => store/test-store/tests/chain/ethereum}/ipfs-on-ethereum-contracts.ts (100%) rename {chain/ethereum/tests => store/test-store/tests/chain/ethereum}/ipfs-on-ethereum-contracts.wasm (100%) rename {chain/ethereum/tests => store/test-store/tests/chain/ethereum}/manifest.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index c909ae5a88b..c69bea07c66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1653,7 +1653,6 @@ dependencies = [ "prost-types", "semver", "serde", - "test-store", "tiny-keccak 1.5.0", "tonic-build", "uuid", diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index 170b9f554cc..4963b28ad79 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -25,7 +25,6 @@ graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] -test-store = { path = "../../store/test-store" } base64 = "0.20.0" uuid = { version = "1.3.0", features = ["v4"] } diff --git a/chain/ethereum/tests/README.md b/chain/ethereum/tests/README.md new file mode 100644 index 00000000000..e0444bc179f --- /dev/null +++ b/chain/ethereum/tests/README.md @@ -0,0 +1,5 @@ +Put integration tests for this crate into +`store/test-store/tests/chain/ethereum`. This avoids cyclic dev-dependencies +which make rust-analyzer nearly unusable. Once [this +issue](https://github.com/rust-lang/rust-analyzer/issues/14167) has been +fixed, we can move tests back here diff --git a/store/test-store/tests/chain.rs b/store/test-store/tests/chain.rs new file mode 100644 index 00000000000..3364791c26e --- /dev/null +++ b/store/test-store/tests/chain.rs @@ -0,0 +1,5 @@ +pub mod chain { + pub mod ethereum { + pub mod manifest; + } +} diff --git a/chain/ethereum/tests/full-text.graphql b/store/test-store/tests/chain/ethereum/full-text.graphql similarity index 100% rename from chain/ethereum/tests/full-text.graphql rename to store/test-store/tests/chain/ethereum/full-text.graphql diff --git a/chain/ethereum/tests/ipfs-on-ethereum-contracts.ts b/store/test-store/tests/chain/ethereum/ipfs-on-ethereum-contracts.ts similarity index 100% rename from chain/ethereum/tests/ipfs-on-ethereum-contracts.ts rename to store/test-store/tests/chain/ethereum/ipfs-on-ethereum-contracts.ts diff --git a/chain/ethereum/tests/ipfs-on-ethereum-contracts.wasm b/store/test-store/tests/chain/ethereum/ipfs-on-ethereum-contracts.wasm similarity index 100% rename from chain/ethereum/tests/ipfs-on-ethereum-contracts.wasm rename to store/test-store/tests/chain/ethereum/ipfs-on-ethereum-contracts.wasm diff --git a/chain/ethereum/tests/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs similarity index 99% rename from chain/ethereum/tests/manifest.rs rename to store/test-store/tests/chain/ethereum/manifest.rs index 5d1c7bb3a84..f2278311cf9 100644 --- a/chain/ethereum/tests/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -18,8 +18,8 @@ use graph::{ data::subgraph::SubgraphFeature, }; +use graph::semver::Version; use graph_chain_ethereum::{Chain, NodeCapabilities}; -use semver::Version; use test_store::LOGGER; const GQL_SCHEMA: &str = "type Thing @entity { id: ID! }"; From b74748873edbd5b5a993d8487607aeefa6b96cdc Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 11:30:01 -0700 Subject: [PATCH 0112/2104] core, store: Move core tests to test-store --- Cargo.lock | 2 +- core/Cargo.toml | 1 - core/tests/README.md | 5 +++++ store/test-store/Cargo.toml | 3 +++ store/test-store/tests/core.rs | 3 +++ store/test-store/tests/core/fixtures/ipfs_folder/hello.txt | 1 + store/test-store/tests/core/fixtures/ipfs_folder/random.txt | 1 + {core/tests => store/test-store/tests/core}/interfaces.rs | 0 8 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 core/tests/README.md create mode 100644 store/test-store/tests/core.rs create mode 100644 store/test-store/tests/core/fixtures/ipfs_folder/hello.txt create mode 100644 store/test-store/tests/core/fixtures/ipfs_folder/random.txt rename {core/tests => store/test-store/tests/core}/interfaces.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index c69bea07c66..b64ebea1d57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1729,7 +1729,6 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "test-store", "tower 0.4.12", "tower-test", "uuid", @@ -4367,6 +4366,7 @@ dependencies = [ "graphql-parser", "hex-literal", "lazy_static", + "pretty_assertions", "prost-types", "serde", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index 6100eeb7aa9..5c270e4a98f 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -32,7 +32,6 @@ anyhow = "1.0" [dev-dependencies] tower-test = { git = "https://github.com/tower-rs/tower.git" } -test-store = { path = "../store/test-store" } hex = "0.4.3" graphql-parser = "0.4.0" pretty_assertions = "1.3.0" diff --git a/core/tests/README.md b/core/tests/README.md new file mode 100644 index 00000000000..261623bcccf --- /dev/null +++ b/core/tests/README.md @@ -0,0 +1,5 @@ +Put integration tests for this crate into `store/test-store/tests/core`. +This avoids cyclic dev-dependencies which make rust-analyzer nearly +unusable. Once [this +issue](https://github.com/rust-lang/rust-analyzer/issues/14167) has been +fixed, we can move tests back here diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index 2ddd85b0989..fd134344991 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -18,3 +18,6 @@ hex-literal = "0.3" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2"] } serde = "1.0" prost-types = { workspace = true } + +[dev-dependencies] +pretty_assertions = "1.3.0" diff --git a/store/test-store/tests/core.rs b/store/test-store/tests/core.rs new file mode 100644 index 00000000000..46d45977a1f --- /dev/null +++ b/store/test-store/tests/core.rs @@ -0,0 +1,3 @@ +pub mod core { + pub mod interfaces; +} diff --git a/store/test-store/tests/core/fixtures/ipfs_folder/hello.txt b/store/test-store/tests/core/fixtures/ipfs_folder/hello.txt new file mode 100644 index 00000000000..3b18e512dba --- /dev/null +++ b/store/test-store/tests/core/fixtures/ipfs_folder/hello.txt @@ -0,0 +1 @@ +hello world diff --git a/store/test-store/tests/core/fixtures/ipfs_folder/random.txt b/store/test-store/tests/core/fixtures/ipfs_folder/random.txt new file mode 100644 index 00000000000..87332e5d5cc --- /dev/null +++ b/store/test-store/tests/core/fixtures/ipfs_folder/random.txt @@ -0,0 +1 @@ +20c12d76-0e6a-428c-b6c9-b7e384ccb6fc \ No newline at end of file diff --git a/core/tests/interfaces.rs b/store/test-store/tests/core/interfaces.rs similarity index 100% rename from core/tests/interfaces.rs rename to store/test-store/tests/core/interfaces.rs From c74154d04d5ef5b32cb89161d909eed1f35506ea Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 11:33:26 -0700 Subject: [PATCH 0113/2104] graphql, store: Move graphql tests to test-store --- Cargo.lock | 2 -- graphql/Cargo.toml | 2 -- graphql/tests/README.md | 5 +++++ store/test-store/tests/graphql.rs | 4 ++++ .../test-store/tests/graphql}/introspection.rs | 3 --- {graphql/tests => store/test-store/tests/graphql}/query.rs | 3 --- 6 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 graphql/tests/README.md create mode 100644 store/test-store/tests/graphql.rs rename {graphql/tests => store/test-store/tests/graphql}/introspection.rs (99%) rename {graphql/tests => store/test-store/tests/graphql}/query.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index b64ebea1d57..bb6368e5cae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1744,7 +1744,6 @@ dependencies = [ "crossbeam", "defer", "graph", - "graph-chain-ethereum", "graphql-parser", "graphql-tools", "indexmap", @@ -1753,7 +1752,6 @@ dependencies = [ "pretty_assertions", "stable-hash 0.3.3", "stable-hash 0.4.2", - "test-store", ] [[package]] diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 17bd166e8d1..9d779a39270 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -20,5 +20,3 @@ async-recursion = "1.0.0" [dev-dependencies] pretty_assertions = "1.3.0" -test-store = { path = "../store/test-store" } -graph-chain-ethereum = { path = "../chain/ethereum" } diff --git a/graphql/tests/README.md b/graphql/tests/README.md new file mode 100644 index 00000000000..c2b55fa311e --- /dev/null +++ b/graphql/tests/README.md @@ -0,0 +1,5 @@ +Put integration tests for this crate into `store/test-store/tests/graphql`. +This avoids cyclic dev-dependencies which make rust-analyzer nearly +unusable. Once [this +issue](https://github.com/rust-lang/rust-analyzer/issues/14167) has been +fixed, we can move tests back here diff --git a/store/test-store/tests/graphql.rs b/store/test-store/tests/graphql.rs new file mode 100644 index 00000000000..3ae1fcd2b74 --- /dev/null +++ b/store/test-store/tests/graphql.rs @@ -0,0 +1,4 @@ +pub mod graphql { + pub mod introspection; + pub mod query; +} diff --git a/graphql/tests/introspection.rs b/store/test-store/tests/graphql/introspection.rs similarity index 99% rename from graphql/tests/introspection.rs rename to store/test-store/tests/graphql/introspection.rs index ab2360e2567..43ba9bff433 100644 --- a/graphql/tests/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate pretty_assertions; - use std::sync::Arc; use graph::data::graphql::{object, object_value, ObjectOrInterface}; diff --git a/graphql/tests/query.rs b/store/test-store/tests/graphql/query.rs similarity index 99% rename from graphql/tests/query.rs rename to store/test-store/tests/graphql/query.rs index af3f871fd6a..1f4c02a97ae 100644 --- a/graphql/tests/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate pretty_assertions; - use graph::components::store::EntityKey; use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; From 04ba48f6ace36a34759036e8264dd4432b2f8b7f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 11:39:31 -0700 Subject: [PATCH 0114/2104] store: Move postgres tests to test-store --- Cargo.lock | 3 +-- store/postgres/Cargo.toml | 2 -- store/postgres/tests/README.md | 5 +++++ store/test-store/Cargo.toml | 1 + store/test-store/tests/postgres.rs | 9 +++++++++ .../tests => test-store/tests/postgres}/chain_head.rs | 2 +- .../tests => test-store/tests/postgres}/graft.rs | 0 .../tests => test-store/tests/postgres}/relational.rs | 0 .../tests/postgres}/relational_bytes.rs | 0 .../tests => test-store/tests/postgres}/store.rs | 0 .../tests => test-store/tests/postgres}/subgraph.rs | 0 .../tests => test-store/tests/postgres}/writable.rs | 0 12 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 store/postgres/tests/README.md create mode 100644 store/test-store/tests/postgres.rs rename store/{postgres/tests => test-store/tests/postgres}/chain_head.rs (99%) rename store/{postgres/tests => test-store/tests/postgres}/graft.rs (100%) rename store/{postgres/tests => test-store/tests/postgres}/relational.rs (100%) rename store/{postgres/tests => test-store/tests/postgres}/relational_bytes.rs (100%) rename store/{postgres/tests => test-store/tests/postgres}/store.rs (100%) rename store/{postgres/tests => test-store/tests/postgres}/subgraph.rs (100%) rename store/{postgres/tests => test-store/tests/postgres}/writable.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index bb6368e5cae..c46f5235f34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1928,7 +1928,6 @@ dependencies = [ "futures 0.3.16", "git-testament", "graph", - "graph-chain-ethereum", "graph-core", "graph-graphql", "graphql-parser", @@ -1946,7 +1945,6 @@ dependencies = [ "rand", "serde", "stable-hash 0.3.3", - "test-store", "uuid", ] @@ -4362,6 +4360,7 @@ dependencies = [ "graph-node", "graph-store-postgres", "graphql-parser", + "hex", "hex-literal", "lazy_static", "pretty_assertions", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index ee185f9f05f..f407b0aff71 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -40,6 +40,4 @@ pretty_assertions = "1.3.0" futures = "0.3" clap = "3.2.23" graphql-parser = "0.4.0" -test-store = { path = "../test-store" } hex-literal = "0.3" -graph-chain-ethereum = { path = "../../chain/ethereum" } diff --git a/store/postgres/tests/README.md b/store/postgres/tests/README.md new file mode 100644 index 00000000000..9fa18d53625 --- /dev/null +++ b/store/postgres/tests/README.md @@ -0,0 +1,5 @@ +Put integration tests for this crate into `store/test-store/tests/postgres`. +This avoids cyclic dev-dependencies which make rust-analyzer nearly +unusable. Once [this +issue](https://github.com/rust-lang/rust-analyzer/issues/14167) has been +fixed, we can move tests back here diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index fd134344991..c03cdc6ca8c 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -20,4 +20,5 @@ serde = "1.0" prost-types = { workspace = true } [dev-dependencies] +hex = "0.4.3" pretty_assertions = "1.3.0" diff --git a/store/test-store/tests/postgres.rs b/store/test-store/tests/postgres.rs new file mode 100644 index 00000000000..71c7e3a37c1 --- /dev/null +++ b/store/test-store/tests/postgres.rs @@ -0,0 +1,9 @@ +pub mod postgres { + pub mod chain_head; + pub mod graft; + pub mod relational; + pub mod relational_bytes; + pub mod store; + pub mod subgraph; + pub mod writable; +} diff --git a/store/postgres/tests/chain_head.rs b/store/test-store/tests/postgres/chain_head.rs similarity index 99% rename from store/postgres/tests/chain_head.rs rename to store/test-store/tests/postgres/chain_head.rs index 9614dc0ae79..612333bc411 100644 --- a/store/postgres/tests/chain_head.rs +++ b/store/test-store/tests/postgres/chain_head.rs @@ -1,7 +1,7 @@ //! Test ChainStore implementation of Store, in particular, how //! the chain head pointer gets updated in various situations -use futures::executor; +use graph::prelude::futures03::executor; use std::future::Future; use std::sync::Arc; diff --git a/store/postgres/tests/graft.rs b/store/test-store/tests/postgres/graft.rs similarity index 100% rename from store/postgres/tests/graft.rs rename to store/test-store/tests/postgres/graft.rs diff --git a/store/postgres/tests/relational.rs b/store/test-store/tests/postgres/relational.rs similarity index 100% rename from store/postgres/tests/relational.rs rename to store/test-store/tests/postgres/relational.rs diff --git a/store/postgres/tests/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs similarity index 100% rename from store/postgres/tests/relational_bytes.rs rename to store/test-store/tests/postgres/relational_bytes.rs diff --git a/store/postgres/tests/store.rs b/store/test-store/tests/postgres/store.rs similarity index 100% rename from store/postgres/tests/store.rs rename to store/test-store/tests/postgres/store.rs diff --git a/store/postgres/tests/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs similarity index 100% rename from store/postgres/tests/subgraph.rs rename to store/test-store/tests/postgres/subgraph.rs diff --git a/store/postgres/tests/writable.rs b/store/test-store/tests/postgres/writable.rs similarity index 100% rename from store/postgres/tests/writable.rs rename to store/test-store/tests/postgres/writable.rs From 8417383525889c6d24695b0a802916358446d105 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 16:08:32 -0700 Subject: [PATCH 0115/2104] store: Several improvements to Writable - check that the writer thread is still running when accepting items to be queued. It makes no sense to accept requests when the writer thread is not running, and should therefore fail early. - change the mechanism for how tests can single-step a deployment to be specific to that deployment to avoid problems with tests running at the same time. This was a huge footgun, and getting caught in that looks like a deadlock - implement Debug for queued requests --- store/postgres/src/writable.rs | 133 ++++++++++++++++---- store/test-store/tests/postgres/writable.rs | 4 +- 2 files changed, 109 insertions(+), 28 deletions(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index a5127aa715b..cda37b79aeb 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -5,6 +5,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{DeploymentCursorTracker, DerivedEntityQuery, EntityKey, ReadStore}; +use graph::constraint_violation; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -12,6 +13,7 @@ use graph::prelude::{ BLOCK_NUMBER_MAX, }; use graph::slog::info; +use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ cheap_clone::CheapClone, @@ -448,6 +450,29 @@ enum Request { Stop, } +impl std::fmt::Debug for Request { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Write { + block_ptr, + mods, + store, + .. + } => write!( + f, + "write[{}, {:p}, {} entities]", + block_ptr.number, + store.as_ref(), + mods.len() + ), + Self::RevertTo { + block_ptr, store, .. + } => write!(f, "revert[{}, {:p}]", block_ptr.number, store.as_ref()), + Self::Stop => write!(f, "stop"), + } + } +} + enum ExecResult { Continue, Stop, @@ -520,28 +545,56 @@ struct Queue { /// allowed to process as many requests as it can #[cfg(debug_assertions)] pub(crate) mod test_support { - use std::sync::atomic::{AtomicBool, Ordering}; + use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + }; - use graph::{prelude::lazy_static, util::bounded_queue::BoundedQueue}; + use graph::{ + components::store::{DeploymentId, DeploymentLocator}, + prelude::lazy_static, + util::bounded_queue::BoundedQueue, + }; lazy_static! { - static ref DO_STEP: AtomicBool = AtomicBool::new(false); - static ref ALLOWED_STEPS: BoundedQueue<()> = BoundedQueue::with_capacity(1_000); + static ref STEPS: Mutex>>> = + Mutex::new(HashMap::new()); } - pub(super) async fn take_step() { - if DO_STEP.load(Ordering::SeqCst) { - ALLOWED_STEPS.pop().await + pub(super) async fn take_step(deployment: &DeploymentLocator) { + let steps = STEPS.lock().unwrap().get(&deployment.id).cloned(); + if let Some(steps) = steps { + steps.pop().await; } } /// Allow the writer to process `steps` requests. After calling this, /// the writer will only process the number of requests it is allowed to - pub async fn allow_steps(steps: usize) { + pub async fn allow_steps(deployment: &DeploymentLocator, steps: usize) { + let queue = { + let mut map = STEPS.lock().unwrap(); + map.entry(deployment.id) + .or_insert_with(|| Arc::new(BoundedQueue::with_capacity(1_000))) + .clone() + }; for _ in 0..steps { - ALLOWED_STEPS.push(()).await + queue.push(()).await } - DO_STEP.store(true, Ordering::SeqCst); + } +} + +impl std::fmt::Debug for Queue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let reqs = self.queue.fold(vec![], |mut reqs, req| { + reqs.push(req.clone()); + reqs + }); + + write!(f, "reqs[{} : ", self.store.site)?; + for req in reqs { + write!(f, " {:?}", req)?; + } + writeln!(f, "]") } } @@ -552,11 +605,11 @@ impl Queue { store: Arc, capacity: usize, registry: Arc, - ) -> Arc { + ) -> (Arc, JoinHandle<()>) { async fn start_writer(queue: Arc, logger: Logger) { loop { #[cfg(debug_assertions)] - test_support::take_step().await; + test_support::take_step(&queue.store.site.as_ref().into()).await; // We peek at the front of the queue, rather than pop it // right away, so that query methods like `get` have access @@ -623,9 +676,9 @@ impl Queue { }; let queue = Arc::new(queue); - graph::spawn(start_writer(queue.cheap_clone(), logger)); + let handle = graph::spawn(start_writer(queue.cheap_clone(), logger)); - queue + (queue, handle) } /// Add a write request to the queue @@ -637,6 +690,7 @@ impl Queue { /// Wait for the background writer to finish processing queued entries async fn flush(&self) -> Result<(), StoreError> { + self.check_err()?; self.queue.wait_empty().await; self.check_err() } @@ -880,7 +934,10 @@ impl Queue { /// A shim to allow bypassing any pipelined store handling if need be enum Writer { Sync(Arc), - Async(Arc), + Async { + queue: Arc, + join_handle: JoinHandle<()>, + }, } impl Writer { @@ -894,7 +951,24 @@ impl Writer { if capacity == 0 { Self::Sync(store) } else { - Self::Async(Queue::start(logger, store, capacity, registry)) + let (queue, join_handle) = Queue::start(logger, store.clone(), capacity, registry); + Self::Async { queue, join_handle } + } + } + + fn check_queue_running(&self) -> Result<(), StoreError> { + match self { + Writer::Sync(_) => Ok(()), + Writer::Async { join_handle, queue } => { + if join_handle.is_finished() { + Err(constraint_violation!( + "Subgraph writer for {} is not running", + queue.store.site + )) + } else { + Ok(()) + } + } } } @@ -920,7 +994,8 @@ impl Writer { &manifest_idx_and_name, &processed_data_sources, ), - Writer::Async(queue) => { + Writer::Async { queue, .. } => { + self.check_queue_running()?; let req = Request::Write { store: queue.store.cheap_clone(), stopwatch: queue.stopwatch.cheap_clone(), @@ -944,7 +1019,8 @@ impl Writer { ) -> Result<(), StoreError> { match self { Writer::Sync(store) => store.revert_block_operations(block_ptr_to, &firehose_cursor), - Writer::Async(queue) => { + Writer::Async { queue, .. } => { + self.check_queue_running()?; let req = Request::RevertTo { store: queue.store.cheap_clone(), block_ptr: block_ptr_to, @@ -958,14 +1034,17 @@ impl Writer { async fn flush(&self) -> Result<(), StoreError> { match self { Writer::Sync { .. } => Ok(()), - Writer::Async(queue) => queue.flush().await, + Writer::Async { queue, .. } => { + self.check_queue_running()?; + queue.flush().await + } } } fn get(&self, key: &EntityKey) -> Result, StoreError> { match self { Writer::Sync(store) => store.get(key, BLOCK_NUMBER_MAX), - Writer::Async(queue) => queue.get(key), + Writer::Async { queue, .. } => queue.get(key), } } @@ -975,7 +1054,7 @@ impl Writer { ) -> Result, StoreError> { match self { Writer::Sync(store) => store.get_many(keys, BLOCK_NUMBER_MAX), - Writer::Async(queue) => queue.get_many(keys), + Writer::Async { queue, .. } => queue.get_many(keys), } } @@ -985,7 +1064,7 @@ impl Writer { ) -> Result, StoreError> { match self { Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, vec![]), - Writer::Async(queue) => queue.get_derived(key), + Writer::Async { queue, .. } => queue.get_derived(key), } } @@ -999,28 +1078,30 @@ impl Writer { .load_dynamic_data_sources(BLOCK_NUMBER_MAX, manifest_idx_and_name) .await } - Writer::Async(queue) => queue.load_dynamic_data_sources(manifest_idx_and_name).await, + Writer::Async { queue, .. } => { + queue.load_dynamic_data_sources(manifest_idx_and_name).await + } } } fn poisoned(&self) -> bool { match self { Writer::Sync(_) => false, - Writer::Async(queue) => queue.poisoned(), + Writer::Async { queue, .. } => queue.poisoned(), } } async fn stop(&self) -> Result<(), StoreError> { match self { Writer::Sync(_) => Ok(()), - Writer::Async(queue) => queue.stop().await, + Writer::Async { queue, .. } => queue.stop().await, } } fn deployment_synced(&self) { match self { Writer::Sync(_) => {} - Writer::Async(queue) => queue.deployment_synced(), + Writer::Async { queue, .. } => queue.deployment_synced(), } } } diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index a171de104cf..1c0733e0574 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -121,11 +121,11 @@ async fn insert_count(store: &Arc, deployment: &DeploymentL async fn pause_writer(deployment: &DeploymentLocator) { flush(deployment).await.unwrap(); - writable::allow_steps(0).await; + writable::allow_steps(deployment, 0).await; } async fn resume_writer(deployment: &DeploymentLocator, steps: usize) { - writable::allow_steps(steps).await; + writable::allow_steps(deployment, steps).await; flush(deployment).await.unwrap(); } From 3ba05c329bc9e35ee63b059ca7a60de368823d85 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 31 Mar 2023 18:11:27 -0700 Subject: [PATCH 0116/2104] all: Remove some unneeded dev-dependencies --- Cargo.lock | 20 +++++++------------- core/Cargo.toml | 4 ---- graphql/Cargo.toml | 3 --- store/postgres/Cargo.toml | 2 -- 4 files changed, 7 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c46f5235f34..b44deb1a5bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1718,13 +1718,10 @@ dependencies = [ "graph-chain-near", "graph-chain-substreams", "graph-runtime-wasm", - "graphql-parser", - "hex", "ipfs-api", "ipfs-api-backend-hyper", "lazy_static", "lru_time_cache", - "pretty_assertions", "semver", "serde", "serde_json", @@ -1749,7 +1746,6 @@ dependencies = [ "indexmap", "lazy_static", "parking_lot 0.12.1", - "pretty_assertions", "stable-hash 0.3.3", "stable-hash 0.4.2", ] @@ -1925,14 +1921,12 @@ dependencies = [ "diesel_derives", "diesel_migrations", "fallible-iterator", - "futures 0.3.16", "git-testament", "graph", "graph-core", "graph-graphql", "graphql-parser", "hex", - "hex-literal", "itertools", "lazy_static", "lru_time_cache", @@ -2209,9 +2203,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -2769,13 +2763,14 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multiaddr" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" dependencies = [ "arrayref", "byteorder", "data-encoding", + "log", "multibase", "multihash 0.17.0", "percent-encoding", @@ -5048,12 +5043,11 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index 5c270e4a98f..fbc4e27c723 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -32,10 +32,6 @@ anyhow = "1.0" [dev-dependencies] tower-test = { git = "https://github.com/tower-rs/tower.git" } -hex = "0.4.3" -graphql-parser = "0.4.0" -pretty_assertions = "1.3.0" -anyhow = "1.0" ipfs-api-backend-hyper = "0.6" ipfs-api = { version = "0.17.0", features = [ "with-hyper-rustls", diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 9d779a39270..3e247f66988 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -17,6 +17,3 @@ defer = "0.1" parking_lot = "0.12" anyhow = "1.0" async-recursion = "1.0.0" - -[dev-dependencies] -pretty_assertions = "1.3.0" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index f407b0aff71..90c977aa3b2 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -37,7 +37,5 @@ hex = "0.4.3" pretty_assertions = "1.3.0" [dev-dependencies] -futures = "0.3" clap = "3.2.23" graphql-parser = "0.4.0" -hex-literal = "0.3" From a3bcfc65eb8a8f4135160a28b663e23449d26668 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 10 Apr 2023 12:13:48 -0700 Subject: [PATCH 0117/2104] graph, test-store: Small improvements * Do not arbitrarily truncate subgraph names to 32 characters; that can lead to strange surprises when the names of two test subgraphs only differ after the 32nd character * Use a more direct method to get rid of test subgraphs. Using `record_unused_deployments` is a bit too roundabout for tests, and can lead to not deleting a subgraph if a previous run recorded the subgraph as unused, but didn't delete it --- graph/src/data/subgraph/mod.rs | 6 ++++++ store/test-store/src/store.rs | 20 ++++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index b731d3a50a1..7ab0ac7588f 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -218,6 +218,12 @@ impl SubgraphName { Ok(SubgraphName(s)) } + /// Tests are allowed to create arbitrary subgraph names + #[cfg(debug_assertions)] + pub fn new_unchecked(s: impl Into) -> Self { + SubgraphName(s.into()) + } + pub fn as_str(&self) -> &str { self.0.as_str() } diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 9d421f4268b..c55f8599fc5 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -170,11 +170,7 @@ pub async fn create_subgraph( yaml.insert("dataSources".into(), Vec::::new().into()); let yaml = serde_yaml::to_string(&yaml).unwrap(); let deployment = DeploymentCreate::new(yaml, &manifest, None).graft(base); - let name = { - let mut name = subgraph_id.to_string(); - name.truncate(32); - SubgraphName::new(name).unwrap() - }; + let name = SubgraphName::new_unchecked(subgraph_id.to_string()); let deployment = SUBGRAPH_STORE.create_deployment_replace( name, &schema, @@ -198,14 +194,14 @@ pub async fn create_test_subgraph(subgraph_id: &DeploymentHash, schema: &str) -> } pub fn remove_subgraph(id: &DeploymentHash) { - let name = { - let mut name = id.to_string(); - name.truncate(32); - SubgraphName::new(name).unwrap() - }; + let name = SubgraphName::new_unchecked(id.to_string()); SUBGRAPH_STORE.remove_subgraph(name).unwrap(); - for detail in SUBGRAPH_STORE.record_unused_deployments().unwrap() { - SUBGRAPH_STORE.remove_deployment(detail.id).unwrap(); + let locs = SUBGRAPH_STORE.locators(id.as_str()).unwrap(); + let conn = primary_connection(); + for loc in locs { + let site = conn.locate_site(loc.clone()).unwrap().unwrap(); + conn.unassign_subgraph(&site).unwrap(); + SUBGRAPH_STORE.remove_deployment(site.id).unwrap(); } } From 824fec470613765930e17233093372773adbd6c9 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Wed, 12 Apr 2023 15:43:49 +0200 Subject: [PATCH 0118/2104] Fix: typos (#4523) * Fix: typos --- README.md | 2 +- docs/getting-started.md | 2 +- docs/graphman.md | 10 +++++----- docs/metrics.md | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ca0ac229c43..809a86b7a17 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ the connection string, check the [Postgres documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). `graph-node` uses a few Postgres extensions. If the Postgres user with which you run `graph-node` is a superuser, `graph-node` will enable these -extensions when it initalizes the database. If the Postgres user is not a +extensions when it initializes the database. If the Postgres user is not a superuser, you will need to create the extensions manually since only superusers are allowed to do that. To create them you need to connect as a superuser, which in many installations is the `postgres` user: diff --git a/docs/getting-started.md b/docs/getting-started.md index e7ea53a7ca1..52f7ea190a1 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -62,7 +62,7 @@ Below, we outline the required steps to build a subgraph from scratch, which wil 2. [Create the Postgres database](#22-create-the-postgres-db) - 3. [Start the Graph Node and Connect to an Etheruem node](#23-starting-the-graph-node-and-connecting-to-an-etheruem-node) + 3. [Start the Graph Node and Connect to an Ethereum node](#23-starting-the-graph-node-and-connecting-to-an-ethereum-node) 4. [Deploy the subgraph](#24-deploying-the-subgraph) 3. Query the subgraph diff --git a/docs/graphman.md b/docs/graphman.md index 0964efc6051..d3f6fdf3f33 100644 --- a/docs/graphman.md +++ b/docs/graphman.md @@ -52,7 +52,7 @@ By default, it shows the following attributes for the deployment: - **name** - **status** *(`pending` or `current`)* - **id** *(the `Qm...` identifier for the deployment's subgraph)* -- **namespace** *(The database schema which contain's that deployment data tables)* +- **namespace** *(The database schema which contains that deployment data tables)* - **shard** - **active** *(If there are multiple entries for the same subgraph, only one of them will be active. That's the one we use for querying)* - **chain** @@ -169,7 +169,7 @@ primary shard. No indexed data is lost as a result of this command. -This sub-command is used as previus step towards removing all data from unused subgraphs, followed by +This sub-command is used as previous step towards removing all data from unused subgraphs, followed by `graphman unused remove`. A deployment is unused if it fulfills all of these criteria: @@ -236,7 +236,7 @@ Remove a specific unused deployment ### SYNOPSIS - Delete a deployment and all it's indexed data + Delete a deployment and all its indexed data The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Since the same IPFS hash can be deployed in multiple shards, it is possible to @@ -322,7 +322,7 @@ is useful to diagnose the integrity of cached blocks and eventually fix them. ### OPTIONS -Blocks can be selected by different methods. The `check-blocks` command let's you use the block hash, a single +Blocks can be selected by different methods. The `check-blocks` command lets you use the block hash, a single number or a number range to refer to which blocks it should verify: #### `by-hash` @@ -338,7 +338,7 @@ number or a number range to refer to which blocks it should verify: graphman --config chain check-blocks by-range [-f|--from ] [-t|--to ] [--delete-duplicates] The `by-range` method lets you scan for numeric block ranges and offers the `--from` and `--to` options for -you to define the search bounds. If one of those options is ommited, `graphman` will consider an open bound +you to define the search bounds. If one of those options is omitted, `graphman` will consider an open bound and will scan all blocks up to or after that number. Over time, it can happen that a JSON RPC provider offers different blocks for the same block number. In those diff --git a/docs/metrics.md b/docs/metrics.md index 7a545b1469a..c0bb08a1a0c 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -27,7 +27,7 @@ Track the **last reverted block** for a subgraph deployment - `deployment_sync_secs` total **time spent syncing** - `deployment_transact_block_operations_duration` -Measures **duration of commiting all the entity operations** in a block and **updating the subgraph pointer** +Measures **duration of committing all the entity operations** in a block and **updating the subgraph pointer** - `deployment_trigger_processing_duration` Measures **duration of trigger processing** for a subgraph deployment - `eth_rpc_errors` From e556be6fd490d5a03f8e72c78fc9795f8251903f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Apr 2023 16:55:49 +0000 Subject: [PATCH 0119/2104] build(deps): bump h2 from 0.3.13 to 0.3.17 Bumps [h2](https://github.com/hyperium/h2) from 0.3.13 to 0.3.17. - [Release notes](https://github.com/hyperium/h2/releases) - [Changelog](https://github.com/hyperium/h2/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/h2/compare/v0.3.13...v0.3.17) --- updated-dependencies: - dependency-name: h2 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b44deb1a5bf..eec33570c2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1996,9 +1996,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" dependencies = [ "bytes", "fnv", From e36fc6338895d840a41416eafe47a55709c51719 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Mon, 17 Apr 2023 14:22:46 +0100 Subject: [PATCH 0120/2104] chain/ethereum: Add provider string to logger on eth_call (#4548) --- chain/ethereum/src/ethereum_adapter.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index a7a7500297b..3b044900e39 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -10,6 +10,7 @@ use graph::data::subgraph::API_VERSION_0_0_7; use graph::prelude::ethabi::ParamType; use graph::prelude::ethabi::Token; use graph::prelude::tokio::try_join; +use graph::slog::o; use graph::{ blockchain::{block_stream::BlockWithTriggers, BlockPtr, IngestorError}, prelude::{ @@ -426,6 +427,7 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> impl Future + Send { let web3 = self.web3.clone(); + let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); // Ganache does not support calls by block hash. // See https://github.com/trufflesuite/ganache-cli/issues/973 From 0f49bab1ca08fd02940d7fabfd3920374f0674be Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 15 Apr 2023 13:45:51 -0700 Subject: [PATCH 0121/2104] graph, store: Remove SqlValue, implement ToSql directly for Value The indirection just caused some unneeded allocations --- graph/src/data/store/mod.rs | 3 ++ .../src/data/store/sql.rs | 30 +++++++------------ store/postgres/src/lib.rs | 1 - store/postgres/src/relational_queries.rs | 16 +++++----- 4 files changed, 21 insertions(+), 29 deletions(-) rename store/postgres/src/sql_value.rs => graph/src/data/store/sql.rs (76%) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 3d1b38a095c..8d91b9933ea 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -26,6 +26,9 @@ pub mod scalar; // Ethereum compatibility. pub mod ethereum; +/// Conversion of values to/from SQL +pub mod sql; + /// Filter subscriptions #[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum SubscriptionFilter { diff --git a/store/postgres/src/sql_value.rs b/graph/src/data/store/sql.rs similarity index 76% rename from store/postgres/src/sql_value.rs rename to graph/src/data/store/sql.rs index 22439449f2b..2df61a8ebe9 100644 --- a/store/postgres/src/sql_value.rs +++ b/graph/src/data/store/sql.rs @@ -1,24 +1,16 @@ +use anyhow::anyhow; use diesel::pg::Pg; use diesel::serialize::{self, Output, ToSql}; use diesel::sql_types::{Binary, Bool, Integer, Text}; -use graph::prelude::anyhow::anyhow; + use std::io::Write; use std::str::FromStr; -use graph::data::store::{scalar, Value}; - -#[derive(Clone, Debug, PartialEq, Eq, AsExpression)] -pub struct SqlValue(Value); - -impl SqlValue { - pub fn new_array(values: Vec) -> Vec { - values.into_iter().map(SqlValue).collect() - } -} +use super::{scalar, Value}; -impl ToSql for SqlValue { +impl ToSql for Value { fn to_sql(&self, out: &mut Output) -> serialize::Result { - match &self.0 { + match self { Value::Bool(b) => >::to_sql(b, out), v => Err(anyhow!( "Failed to convert non-boolean attribute value to boolean in SQL: {}", @@ -29,9 +21,9 @@ impl ToSql for SqlValue { } } -impl ToSql for SqlValue { +impl ToSql for Value { fn to_sql(&self, out: &mut Output) -> serialize::Result { - match &self.0 { + match self { Value::Int(i) => >::to_sql(i, out), v => Err(anyhow!( "Failed to convert non-int attribute value to int in SQL: {}", @@ -42,9 +34,9 @@ impl ToSql for SqlValue { } } -impl ToSql for SqlValue { +impl ToSql for Value { fn to_sql(&self, out: &mut Output) -> serialize::Result { - match &self.0 { + match self { Value::String(s) => >::to_sql(s, out), Value::Bytes(h) => >::to_sql(&h.to_string(), out), v => Err(anyhow!( @@ -56,9 +48,9 @@ impl ToSql for SqlValue { } } -impl ToSql for SqlValue { +impl ToSql for Value { fn to_sql(&self, out: &mut Output) -> serialize::Result { - match &self.0 { + match self { Value::Bytes(h) => <_ as ToSql>::to_sql(&h.as_slice(), out), Value::String(s) => { <_ as ToSql>::to_sql(scalar::Bytes::from_str(s)?.as_slice(), out) diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index 32b673258ef..11b1988c091 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -33,7 +33,6 @@ pub mod query_store; mod relational; mod relational_queries; mod retry; -mod sql_value; mod store; mod store_events; mod subgraph_store; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 35b86278d5d..d8a09433e76 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -36,7 +36,6 @@ use crate::relational::{ Column, ColumnType, IdType, Layout, SqlName, Table, BYTE_ARRAY_PREFIX_SIZE, PRIMARY_KEY_COLUMN, STRING_PREFIX_SIZE, }; -use crate::sql_value::SqlValue; use crate::{ block_range::{ BlockRangeColumn, BlockRangeLowerBoundClause, BlockRangeUpperBoundClause, BLOCK_COLUMN, @@ -579,7 +578,6 @@ impl<'a> QueryFragment for QueryValue<'a> { } Value::Bool(b) => out.push_bind_param::(b), Value::List(values) => { - let sql_values = SqlValue::new_array(values.clone()); match &column_type { ColumnType::BigDecimal | ColumnType::BigInt => { let text_values: Vec<_> = values.iter().map(|v| v.to_string()).collect(); @@ -587,12 +585,12 @@ impl<'a> QueryFragment for QueryValue<'a> { out.push_sql("::numeric[]"); Ok(()) } - ColumnType::Boolean => out.push_bind_param::, _>(&sql_values), - ColumnType::Bytes => out.push_bind_param::, _>(&sql_values), - ColumnType::Int => out.push_bind_param::, _>(&sql_values), - ColumnType::String => out.push_bind_param::, _>(&sql_values), + ColumnType::Boolean => out.push_bind_param::, _>(values), + ColumnType::Bytes => out.push_bind_param::, _>(values), + ColumnType::Int => out.push_bind_param::, _>(values), + ColumnType::String => out.push_bind_param::, _>(values), ColumnType::Enum(enum_type) => { - out.push_bind_param::, _>(&sql_values)?; + out.push_bind_param::, _>(values)?; out.push_sql("::"); out.push_sql(enum_type.name.as_str()); out.push_sql("[]"); @@ -600,11 +598,11 @@ impl<'a> QueryFragment for QueryValue<'a> { } // TSVector will only be in a Value::List() for inserts so "to_tsvector" can always be used here ColumnType::TSVector(config) => { - if sql_values.is_empty() { + if values.is_empty() { out.push_sql("''::tsvector"); } else { out.push_sql("("); - for (i, value) in sql_values.iter().enumerate() { + for (i, value) in values.iter().enumerate() { if i > 0 { out.push_sql(") || "); } From 3f2306ee0585bc10a10b855968e9cec004497701 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 19 Apr 2023 10:41:44 +0100 Subject: [PATCH 0122/2104] node: Fix fresh start with firehose only (#4553) --- node/src/main.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/node/src/main.rs b/node/src/main.rs index 9d7c79830dc..9c38192f5ba 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,5 +1,6 @@ use clap::Parser as _; use ethereum::chain::{EthereumAdapterSelector, EthereumBlockRefetcher, EthereumStreamBuilder}; +use ethereum::codec::HeaderOnlyBlock; use ethereum::{BlockIngestor, EthereumNetworks, RuntimeAdapter}; use git_testament::{git_testament, render_testament}; use graph::blockchain::client::ChainClient; @@ -296,9 +297,19 @@ async fn main() { ) .await; + // This only has idents for chains with rpc adapters. let (eth_networks, ethereum_idents) = connect_ethereum_networks(&logger, eth_networks).await; + let (eth_firehose_only_networks, eth_firehose_only_idents) = + connect_firehose_networks::( + &logger, + firehose_networks_by_kind + .remove(&BlockchainKind::Ethereum) + .unwrap_or_else(FirehoseNetworks::new), + ) + .await; + let (near_networks, near_idents) = connect_firehose_networks::( &logger, @@ -318,6 +329,7 @@ async fn main() { let network_identifiers = ethereum_idents .into_iter() + .chain(eth_firehose_only_idents) .chain(arweave_idents) .chain(near_idents) .chain(cosmos_idents) @@ -334,12 +346,18 @@ async fn main() { metrics_registry.clone(), ); + let eth_firehose_only_networks = if eth_firehose_only_networks.networks.len() == 0 { + None + } else { + Some(ð_firehose_only_networks) + }; + let ethereum_chains = ethereum_networks_as_chains( &mut blockchain_map, &logger, node_id.clone(), metrics_registry.clone(), - firehose_networks_by_kind.get(&BlockchainKind::Ethereum), + eth_firehose_only_networks, substreams_networks_by_kind.get(&BlockchainKind::Ethereum), ð_networks, network_store.as_ref(), From dd3e88827de91531ccc5be9cace78c667310aa58 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Thu, 20 Apr 2023 15:37:35 +0100 Subject: [PATCH 0123/2104] core: Add more stopwatch sections to runner process (#4558) --- core/src/subgraph/runner.rs | 244 ++++++++++++++++++++---------------- 1 file changed, 137 insertions(+), 107 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 3ef7ce18845..455edaa55fb 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -28,6 +28,10 @@ use std::time::{Duration, Instant}; const MINUTE: Duration = Duration::from_secs(60); const SKIP_PTR_UPDATES_THRESHOLD: Duration = Duration::from_secs(60 * 5); +const HANDLE_REVERT_SECTION_NAME: &str = "handle_revert"; +const PROCESS_BLOCK_SECTION_NAME: &str = "process_block"; +const PROCESS_TRIGGERS_SECTION_NAME: &str = "process_triggers"; +const HANDLE_CREATED_DS_SECTION_NAME: &str = "handle_new_data_sources"; pub struct SubgraphRunner where @@ -220,36 +224,44 @@ where // Causality region for onchain triggers. let causality_region = PoICausalityRegion::from_network(&self.inputs.network); - // Process events one after the other, passing in entity operations - // collected previously to every new event being processed - let mut block_state = match self - .process_triggers( - &proof_of_indexing, - &block, - triggers.into_iter().map(TriggerData::Onchain), - &causality_region, - ) - .await - { - // Triggers processed with no errors or with only deterministic errors. - Ok(block_state) => block_state, - - // Some form of unknown or non-deterministic error ocurred. - Err(MappingError::Unknown(e)) => return Err(BlockProcessingError::Unknown(e)), - Err(MappingError::PossibleReorg(e)) => { - info!(logger, - "Possible reorg detected, retrying"; - "error" => format!("{:#}", e), - ); - - // In case of a possible reorg, we want this function to do nothing and restart the - // block stream so it has a chance to detect the reorg. - // - // The state is unchanged at this point, except for having cleared the entity cache. - // Losing the cache is a bit annoying but not an issue for correctness. - // - // See also b21fa73b-6453-4340-99fb-1a78ec62efb1. - return Ok(Action::Restart); + let mut block_state = { + let _section = self + .metrics + .stream + .stopwatch + .start_section(PROCESS_TRIGGERS_SECTION_NAME); + + // Process events one after the other, passing in entity operations + // collected previously to every new event being processed + match self + .process_triggers( + &proof_of_indexing, + &block, + triggers.into_iter().map(TriggerData::Onchain), + &causality_region, + ) + .await + { + // Triggers processed with no errors or with only deterministic errors. + Ok(block_state) => block_state, + + // Some form of unknown or non-deterministic error ocurred. + Err(MappingError::Unknown(e)) => return Err(BlockProcessingError::Unknown(e)), + Err(MappingError::PossibleReorg(e)) => { + info!(logger, + "Possible reorg detected, retrying"; + "error" => format!("{:#}", e), + ); + + // In case of a possible reorg, we want this function to do nothing and restart the + // block stream so it has a chance to detect the reorg. + // + // The state is unchanged at this point, except for having cleared the entity cache. + // Losing the cache is a bit annoying but not an issue for correctness. + // + // See also b21fa73b-6453-4340-99fb-1a78ec62efb1. + return Ok(Action::Restart); + } } }; @@ -257,89 +269,97 @@ where // to restart the block stream with the new filters. let needs_restart = block_state.has_created_data_sources() && !self.inputs.static_filters; - // This loop will: - // 1. Instantiate created data sources. - // 2. Process those data sources for the current block. - // Until no data sources are created or MAX_DATA_SOURCES is hit. - - // Note that this algorithm processes data sources spawned on the same block _breadth - // first_ on the tree implied by the parent-child relationship between data sources. Only a - // very contrived subgraph would be able to observe this. - while block_state.has_created_data_sources() { - // Instantiate dynamic data sources, removing them from the block state. - let (data_sources, runtime_hosts) = - self.create_dynamic_data_sources(block_state.drain_created_data_sources())?; - - let filter = C::TriggerFilter::from_data_sources( - data_sources.iter().filter_map(DataSource::as_onchain), - ); - - let block: Arc = if self.inputs.chain.is_refetch_block_required() { - Arc::new( - self.inputs - .chain - .refetch_firehose_block(&logger, firehose_cursor.clone()) - .await?, - ) - } else { - block.cheap_clone() - }; - - // Reprocess the triggers from this block that match the new data sources - let block_with_triggers = self - .inputs - .triggers_adapter - .triggers_in_block(&logger, block.as_ref().clone(), &filter) - .await?; + { + let _section = self + .metrics + .stream + .stopwatch + .start_section(HANDLE_CREATED_DS_SECTION_NAME); + + // This loop will: + // 1. Instantiate created data sources. + // 2. Process those data sources for the current block. + // Until no data sources are created or MAX_DATA_SOURCES is hit. + + // Note that this algorithm processes data sources spawned on the same block _breadth + // first_ on the tree implied by the parent-child relationship between data sources. Only a + // very contrived subgraph would be able to observe this. + while block_state.has_created_data_sources() { + // Instantiate dynamic data sources, removing them from the block state. + let (data_sources, runtime_hosts) = + self.create_dynamic_data_sources(block_state.drain_created_data_sources())?; + + let filter = C::TriggerFilter::from_data_sources( + data_sources.iter().filter_map(DataSource::as_onchain), + ); - let triggers = block_with_triggers.trigger_data; + let block: Arc = if self.inputs.chain.is_refetch_block_required() { + Arc::new( + self.inputs + .chain + .refetch_firehose_block(&logger, firehose_cursor.clone()) + .await?, + ) + } else { + block.cheap_clone() + }; - if triggers.len() == 1 { - info!( - &logger, - "1 trigger found in this block for the new data sources" - ); - } else if triggers.len() > 1 { - info!( - &logger, - "{} triggers found in this block for the new data sources", - triggers.len() - ); - } + // Reprocess the triggers from this block that match the new data sources + let block_with_triggers = self + .inputs + .triggers_adapter + .triggers_in_block(&logger, block.as_ref().clone(), &filter) + .await?; - // Add entity operations for the new data sources to the block state - // and add runtimes for the data sources to the subgraph instance. - self.persist_dynamic_data_sources(&mut block_state, data_sources); + let triggers = block_with_triggers.trigger_data; - // Process the triggers in each host in the same order the - // corresponding data sources have been created. - for trigger in triggers { - block_state = self - .ctx - .process_trigger_in_hosts( + if triggers.len() == 1 { + info!( &logger, - &runtime_hosts, - &block, - &TriggerData::Onchain(trigger), - block_state, - &proof_of_indexing, - &causality_region, - &self.inputs.debug_fork, - &self.metrics.subgraph, - self.inputs.instrument, - ) - .await - .map_err(|e| { - // This treats a `PossibleReorg` as an ordinary error which will fail the subgraph. - // This can cause an unnecessary subgraph failure, to fix it we need to figure out a - // way to revert the effect of `create_dynamic_data_sources` so we may return a - // clean context as in b21fa73b-6453-4340-99fb-1a78ec62efb1. - match e { - MappingError::PossibleReorg(e) | MappingError::Unknown(e) => { - BlockProcessingError::Unknown(e) + "1 trigger found in this block for the new data sources" + ); + } else if triggers.len() > 1 { + info!( + &logger, + "{} triggers found in this block for the new data sources", + triggers.len() + ); + } + + // Add entity operations for the new data sources to the block state + // and add runtimes for the data sources to the subgraph instance. + self.persist_dynamic_data_sources(&mut block_state, data_sources); + + // Process the triggers in each host in the same order the + // corresponding data sources have been created. + for trigger in triggers { + block_state = self + .ctx + .process_trigger_in_hosts( + &logger, + &runtime_hosts, + &block, + &TriggerData::Onchain(trigger), + block_state, + &proof_of_indexing, + &causality_region, + &self.inputs.debug_fork, + &self.metrics.subgraph, + self.inputs.instrument, + ) + .await + .map_err(|e| { + // This treats a `PossibleReorg` as an ordinary error which will fail the subgraph. + // This can cause an unnecessary subgraph failure, to fix it we need to figure out a + // way to revert the effect of `create_dynamic_data_sources` so we may return a + // clean context as in b21fa73b-6453-4340-99fb-1a78ec62efb1. + match e { + MappingError::PossibleReorg(e) | MappingError::Unknown(e) => { + BlockProcessingError::Unknown(e) + } } - } - })?; + })?; + } } } @@ -622,10 +642,20 @@ where ) -> Result { let action = match event { Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor))) => { + let _section = self + .metrics + .stream + .stopwatch + .start_section(PROCESS_BLOCK_SECTION_NAME); self.handle_process_block(block, cursor, cancel_handle) .await? } Some(Ok(BlockStreamEvent::Revert(revert_to_ptr, cursor))) => { + let _section = self + .metrics + .stream + .stopwatch + .start_section(HANDLE_REVERT_SECTION_NAME); self.handle_revert(revert_to_ptr, cursor).await? } // Log and drop the errors from the block_stream From 224e802a212f09d3063e7219174c251320806910 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 19:17:14 +0000 Subject: [PATCH 0124/2104] build(deps): bump tokio from 1.26.0 to 1.27.0 Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.26.0...tokio-1.27.0) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- Cargo.lock | 94 ++++++++++++++++++++++++++---------------------- graph/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 54 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eec33570c2c..a091937cf30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,7 +108,7 @@ checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -130,7 +130,7 @@ checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -141,7 +141,7 @@ checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -536,7 +536,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -892,7 +892,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d" dependencies = [ "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -916,7 +916,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 1.0.107", ] [[package]] @@ -927,7 +927,7 @@ checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -953,7 +953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.107", ] [[package]] @@ -972,7 +972,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn", + "syn 1.0.107", ] [[package]] @@ -1003,7 +1003,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -1022,7 +1022,7 @@ checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -1182,7 +1182,7 @@ checksum = "7dfca278e5f84b45519acaaff758ebfa01f18e96998bc24b8f1b722dd804b9bf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -1404,7 +1404,7 @@ checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -1507,7 +1507,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn", + "syn 1.0.107", "time 0.3.17", ] @@ -1794,7 +1794,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -2343,7 +2343,7 @@ checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -2705,7 +2705,7 @@ dependencies = [ "migrations_internals", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -2829,7 +2829,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.107", "synstructure", ] @@ -2973,7 +2973,7 @@ checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3033,7 +3033,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3147,7 +3147,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3258,7 +3258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9e07e3a46d0771a8a06b5f4441527802830b43e679ba12f44960f48dd4c6803" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.107", ] [[package]] @@ -3303,7 +3303,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.107", "version_check", ] @@ -3371,7 +3371,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.107", "tempfile", "which", ] @@ -3386,7 +3386,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3799,7 +3799,7 @@ checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3889,7 +3889,7 @@ checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -3963,7 +3963,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -4243,7 +4243,7 @@ dependencies = [ "heck 0.3.3", "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -4263,6 +4263,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -4277,7 +4288,7 @@ checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", "unicode-xid", ] @@ -4386,7 +4397,7 @@ checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -4471,14 +4482,13 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", @@ -4501,13 +4511,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.12", ] [[package]] @@ -4732,7 +4742,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -4849,7 +4859,7 @@ checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -4904,7 +4914,7 @@ checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", ] [[package]] @@ -5094,7 +5104,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.107", "wasm-bindgen-shared", ] @@ -5128,7 +5138,7 @@ checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.107", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 39aba9d540f..bde60f11dff 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -44,7 +44,7 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" -tokio = { version = "1.26.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.27.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.12", features = ["sync"] } tokio-retry = "0.3.0" url = "2.3.1" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 33a30a3a866..5a9e254e514 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,7 +22,7 @@ hyper = "0.14" serde = "1.0" serde_yaml = "0.8" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } -tokio = { version = "1.16.1", features = ["rt", "macros", "process"] } +tokio = { version = "1.27.0", features = ["rt", "macros", "process"] } uuid = { version = "1.3.0", features = ["v4"] } [dev-dependencies] From 1fe6c927dc47a1d591171100b52d0af73e349e65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 19:16:28 +0000 Subject: [PATCH 0125/2104] build(deps): bump hex-literal from 0.3.4 to 0.4.1 Bumps [hex-literal](https://github.com/RustCrypto/utils) from 0.3.4 to 0.4.1. - [Release notes](https://github.com/RustCrypto/utils/releases) - [Commits](https://github.com/RustCrypto/utils/compare/hex-literal-v0.3.4...hex-literal-v0.4.1) --- updated-dependencies: - dependency-name: hex-literal dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- store/test-store/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a091937cf30..7f857a637fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2095,9 +2095,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hmac" diff --git a/graph/Cargo.toml b/graph/Cargo.toml index bde60f11dff..96dfb595b0d 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -68,7 +68,7 @@ serde_plain = "1.0.1" [dev-dependencies] clap = { version = "3.2.23", features = ["derive", "env"] } maplit = "1.0.2" -hex-literal = "0.3" +hex-literal = "0.4" [build-dependencies] tonic-build = { workspace = true } diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index c03cdc6ca8c..b05e15f6cab 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -14,7 +14,7 @@ graph = { path = "../../graph" } graph-store-postgres = { path = "../postgres" } graph-chain-ethereum = { path = "../../chain/ethereum" } lazy_static = "1.1" -hex-literal = "0.3" +hex-literal = "0.4" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2"] } serde = "1.0" prost-types = { workspace = true } From 33f5aff8ce0007ecb967c5521635920b527c73f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Apr 2023 22:56:05 +0000 Subject: [PATCH 0126/2104] build(deps): bump env_logger from 0.9.3 to 0.10.0 Bumps [env_logger](https://github.com/rust-cli/env_logger) from 0.9.3 to 0.10.0. - [Release notes](https://github.com/rust-cli/env_logger/releases) - [Changelog](https://github.com/rust-cli/env_logger/blob/main/CHANGELOG.md) - [Commits](https://github.com/rust-cli/env_logger/compare/v0.9.3...v0.10.0) --- updated-dependencies: - dependency-name: env_logger dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- Cargo.lock | 70 +++++++++++++++++++++++++++++++++++++++++++++---- node/Cargo.toml | 2 +- 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f857a637fc..80aa9e7d62e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1154,12 +1154,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ - "atty", "humantime 2.1.0", + "is-terminal", "log", "regex", "termcolor", @@ -1196,6 +1196,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "errno" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "errno-dragonfly" version = "0.1.1" @@ -1756,7 +1767,7 @@ version = "0.30.0" dependencies = [ "clap", "diesel", - "env_logger 0.9.3", + "env_logger 0.10.0", "futures 0.3.16", "git-testament", "graph", @@ -2087,6 +2098,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -2366,6 +2383,17 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "io-lifetimes" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "ipfs-api" version = "0.17.0" @@ -2425,6 +2453,18 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +[[package]] +name = "is-terminal" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "256017f749ab3117e93acb91063009e1f1bb56d03965b14c2c8df4eb02c524d8" +dependencies = [ + "hermit-abi 0.3.1", + "io-lifetimes", + "rustix", + "windows-sys 0.45.0", +] + [[package]] name = "isatty" version = "0.1.9" @@ -2604,6 +2644,12 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +[[package]] +name = "linux-raw-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" + [[package]] name = "lock_api" version = "0.4.6" @@ -3703,6 +3749,20 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.37.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2aae838e49b3d63e9274e1c01833cc8139d3fec468c3b84688c628f44b1ae11d" +dependencies = [ + "bitflags", + "errno 0.3.0", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", +] + [[package]] name = "rustls" version = "0.20.4" @@ -5220,7 +5280,7 @@ dependencies = [ "base64 0.13.1", "bincode", "directories-next", - "errno", + "errno 0.2.7", "file-per-thread-logger", "libc", "log", diff --git a/node/Cargo.toml b/node/Cargo.toml index 98ec4269db8..b001e7f9e5a 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -14,7 +14,7 @@ path = "src/bin/manager.rs" [dependencies] clap = { version = "3.2.23", features = ["derive", "env"] } -env_logger = "0.9.3" +env_logger = "0.10.0" git-testament = "0.2" graphql-parser = "0.4.0" futures = { version = "0.3.1", features = ["compat"] } From 400ac9679c075e70d68c9748f9232e4c5e690d54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 22:00:57 +0000 Subject: [PATCH 0127/2104] build(deps): bump openssl from 0.10.48 to 0.10.50 Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.48 to 0.10.50. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.48...openssl-v0.10.50) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 9 ++++----- store/postgres/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 80aa9e7d62e..9d8b9ca31f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2998,9 +2998,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.48" +version = "0.10.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3030,11 +3030,10 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.83" +version = "0.9.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 90c977aa3b2..d85f2606386 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -22,7 +22,7 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.48" +openssl = "0.10.50" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" From ffef39576c22fad93653b440c7ab9c0b93299f70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:42:48 +0000 Subject: [PATCH 0128/2104] build(deps): bump proc-macro2 from 1.0.54 to 1.0.56 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.54 to 1.0.56. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.54...1.0.56) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- runtime/derive/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d8b9ca31f9..0d6cda5241b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3365,9 +3365,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.54" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] diff --git a/runtime/derive/Cargo.toml b/runtime/derive/Cargo.toml index c78a5441897..5272c94fe17 100644 --- a/runtime/derive/Cargo.toml +++ b/runtime/derive/Cargo.toml @@ -9,5 +9,5 @@ proc-macro = true [dependencies] syn = { version = "1.0.98", features = ["full"] } quote = "1.0" -proc-macro2 = "1.0.51" +proc-macro2 = "1.0.56" heck = "0.4" From ca3e4b93f197cc7d8f6778c6f1370205641d3134 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:43:04 +0000 Subject: [PATCH 0129/2104] build(deps): bump prost from 0.11.8 to 0.11.9 Bumps [prost](https://github.com/tokio-rs/prost) from 0.11.8 to 0.11.9. - [Release notes](https://github.com/tokio-rs/prost/releases) - [Commits](https://github.com/tokio-rs/prost/compare/v0.11.8...v0.11.9) --- updated-dependencies: - dependency-name: prost dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d6cda5241b..83c67e10fb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3391,9 +3391,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -3423,9 +3423,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", diff --git a/Cargo.toml b/Cargo.toml index 7e0f7294b1c..a79c476e4b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ repository = "https://github.com/graphprotocol/graph-node" license = "MIT OR Apache-2.0" [workspace.dependencies] -prost = "0.11.8" +prost = "0.11.9" prost-types = "0.11.8" tonic = { version = "0.8.3", features = ["tls-roots", "gzip"] } tonic-build = { version = "0.8.4", features = ["prost"] } From 1637874f8f7b9f0943d562ff43e9f7e08add7a9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:42:54 +0000 Subject: [PATCH 0130/2104] build(deps): bump async-recursion from 1.0.0 to 1.0.4 Bumps [async-recursion](https://github.com/dcchut/async-recursion) from 1.0.0 to 1.0.4. - [Release notes](https://github.com/dcchut/async-recursion/releases) - [Commits](https://github.com/dcchut/async-recursion/compare/v1.0.0...v1.0.4) --- updated-dependencies: - dependency-name: async-recursion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 6 +++--- graphql/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83c67e10fb7..0f130622c51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,13 +102,13 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.12", ] [[package]] diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 3e247f66988..b7e9d3458e3 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -16,4 +16,4 @@ stable-hash = { version = "0.4.2" } defer = "0.1" parking_lot = "0.12" anyhow = "1.0" -async-recursion = "1.0.0" +async-recursion = "1.0.4" From 640d15da23c8f488ab5b6d96ed5998c0a469387f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:43:15 +0000 Subject: [PATCH 0131/2104] build(deps): bump uuid from 1.3.0 to 1.3.1 Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.3.0 to 1.3.1. - [Release notes](https://github.com/uuid-rs/uuid/releases) - [Commits](https://github.com/uuid-rs/uuid/compare/1.3.0...1.3.1) --- updated-dependencies: - dependency-name: uuid dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- chain/ethereum/Cargo.toml | 2 +- core/Cargo.toml | 2 +- runtime/wasm/Cargo.toml | 2 +- server/websocket/Cargo.toml | 2 +- store/postgres/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f130622c51..979b1633331 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5085,9 +5085,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" dependencies = [ "getrandom", ] diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index 4963b28ad79..48befcf1ffa 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -26,7 +26,7 @@ graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] base64 = "0.20.0" -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } [build-dependencies] tonic-build = { workspace = true } diff --git a/core/Cargo.toml b/core/Cargo.toml index fbc4e27c723..a7f27b9d887 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -36,4 +36,4 @@ ipfs-api-backend-hyper = "0.6" ipfs-api = { version = "0.17.0", features = [ "with-hyper-rustls", ], default-features = false } -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index bd95ac89e28..191d48ba620 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -14,7 +14,7 @@ bs58 = "0.4.0" graph-runtime-derive = { path = "../derive" } semver = "1.0.16" lazy_static = "1.4" -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } strum = "0.21.0" strum_macros = "0.21.1" bytes = "1.0" diff --git a/server/websocket/Cargo.toml b/server/websocket/Cargo.toml index 1b171b5f8bc..76b13559f3f 100644 --- a/server/websocket/Cargo.toml +++ b/server/websocket/Cargo.toml @@ -12,5 +12,5 @@ lazy_static = "1.2.0" serde = "1.0" serde_derive = "1.0" tokio-tungstenite = "0.17" -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } anyhow = "1.0" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index d85f2606386..17af801f10a 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -26,7 +26,7 @@ openssl = "0.10.50" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } diesel_derives = "1.4.1" anyhow = "1.0.70" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 5a9e254e514..f64b82e87a0 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -23,7 +23,7 @@ serde = "1.0" serde_yaml = "0.8" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } tokio = { version = "1.27.0", features = ["rt", "macros", "process"] } -uuid = { version = "1.3.0", features = ["v4"] } +uuid = { version = "1.3.1", features = ["v4"] } [dev-dependencies] anyhow = "1.0.70" From c99229cdacd5d9681fb9a4f34f60d32c862b5eee Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 8 Apr 2022 18:27:09 -0700 Subject: [PATCH 0132/2104] core, graph, runtime: Add store.get_in_block --- core/src/subgraph/runner.rs | 17 ++- graph/src/components/store/entity_cache.rs | 33 ++++- graph/src/components/store/mod.rs | 2 +- runtime/wasm/src/host_exports.rs | 5 +- runtime/wasm/src/module/mod.rs | 135 ++++++++++++------- store/test-store/tests/graph/entity_cache.rs | 43 +++++- 6 files changed, 162 insertions(+), 73 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 455edaa55fb..510e9d195d1 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -6,7 +6,7 @@ use crate::subgraph::stream::new_block_stream; use atomic_refcell::AtomicRefCell; use graph::blockchain::block_stream::{BlockStreamEvent, BlockWithTriggers, FirehoseCursor}; use graph::blockchain::{Block, Blockchain, DataSource as _, TriggerFilter as _}; -use graph::components::store::{EmptyStore, EntityKey, StoredDynamicDataSource}; +use graph::components::store::{EmptyStore, EntityKey, GetScope, StoredDynamicDataSource}; use graph::components::{ store::ModificationsAndCache, subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, @@ -1034,14 +1034,13 @@ async fn update_proof_of_indexing( }; // Grab the current digest attribute on this entity - let prev_poi = - entity_cache - .get(&entity_key) - .map_err(Error::from)? - .map(|entity| match entity.get("digest") { - Some(Value::Bytes(b)) => b.clone(), - _ => panic!("Expected POI entity to have a digest and for it to be bytes"), - }); + let prev_poi = entity_cache + .get(&entity_key, GetScope::Store) + .map_err(Error::from)? + .map(|entity| match entity.get("digest") { + Some(Value::Bytes(b)) => b.clone(), + _ => panic!("Expected POI entity to have a digest and for it to be bytes"), + }); // Finish the POI stream, getting the new POI value. let updated_proof_of_indexing = stream.pause(prev_poi.as_deref()); diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index af618bb8aad..3535f8dd107 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -9,6 +9,14 @@ use crate::util::lfu_cache::LfuCache; use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest}; +/// The scope in which the `EntityCache` should perform a `get` operation +pub enum GetScope { + /// Get from all previously stored entities in the store + Store, + /// Get from the entities that have been stored during this block + InBlock, +} + /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks /// how entities are modified, and caches all entities looked up from the @@ -98,18 +106,29 @@ impl EntityCache { self.handler_updates.clear(); } - pub fn get(&mut self, eref: &EntityKey) -> Result, s::QueryExecutionError> { + pub fn get( + &mut self, + key: &EntityKey, + scope: GetScope, + ) -> Result, s::QueryExecutionError> { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. - let mut entity = self.current.get_entity(&*self.store, eref)?; + let mut entity = match scope { + GetScope::Store => self.current.get_entity(&*self.store, key)?, + GetScope::InBlock => None, + }; - // Always test the cache consistency in debug mode. - debug_assert!(entity == self.store.get(eref).unwrap()); + // Always test the cache consistency in debug mode. The test only + // makes sense when we were actually asked to read from the store + debug_assert!(match scope { + GetScope::Store => entity == self.store.get(key).unwrap(), + GetScope::InBlock => true, + }); - if let Some(op) = self.updates.get(eref).cloned() { + if let Some(op) = self.updates.get(key).cloned() { entity = op.apply_to(entity) } - if let Some(op) = self.handler_updates.get(eref).cloned() { + if let Some(op) = self.handler_updates.get(key).cloned() { entity = op.apply_to(entity) } Ok(entity) @@ -183,7 +202,7 @@ impl EntityCache { // lookup in the database and check again with an entity that merges // the existing entity with the changes if !is_valid { - let entity = self.get(&key)?.ok_or_else(|| { + let entity = self.get(&key, GetScope::Store)?.ok_or_else(|| { anyhow!( "Failed to read entity {}[{}] back from cache", key.entity_type, diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index a1199ac22ae..17bb533b419 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -2,7 +2,7 @@ mod entity_cache; mod err; mod traits; -pub use entity_cache::{EntityCache, ModificationsAndCache}; +pub use entity_cache::{EntityCache, GetScope, ModificationsAndCache}; use diesel::types::{FromSql, ToSql}; pub use err::StoreError; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index e943749b332..ef2ac1c09c5 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -9,7 +9,7 @@ use wasmtime::Trap; use web3::types::H160; use graph::blockchain::Blockchain; -use graph::components::store::{EnsLookup, LoadRelatedRequest}; +use graph::components::store::{EnsLookup, GetScope, LoadRelatedRequest}; use graph::components::store::{EntityKey, EntityType}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, @@ -225,6 +225,7 @@ impl HostExports { entity_type: String, entity_id: String, gas: &GasCounter, + scope: GetScope, ) -> Result, anyhow::Error> { let store_key = EntityKey { entity_type: EntityType::new(entity_type), @@ -233,7 +234,7 @@ impl HostExports { }; self.check_entity_type_access(&store_key.entity_type)?; - let result = state.entity_cache.get(&store_key)?; + let result = state.entity_cache.get(&store_key, scope)?; gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; Ok(result) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 94eda62d807..cbcc4eea3e8 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -9,6 +9,7 @@ use std::time::Instant; use anyhow::anyhow; use anyhow::Error; +use graph::components::store::GetScope; use graph::slog::SendSyncRefUnwindSafeKV; use never::Never; use semver::Version; @@ -535,6 +536,13 @@ impl WasmInstance { id, field ); + link!( + "store.get_in_block", + store_get_in_block, + "host_export_store_get_in_block", + entity, + id + ); link!( "store.set", store_set, @@ -910,6 +918,71 @@ impl WasmInstanceContext { experimental_features, }) } + + fn store_get_scoped( + &mut self, + gas: &GasCounter, + entity_ptr: AscPtr, + id_ptr: AscPtr, + scope: GetScope, + ) -> Result, HostExportError> { + let _timer = self + .host_metrics + .cheap_clone() + .time_host_fn_execution_region("store_get"); + + let entity_type: String = asc_get(self, entity_ptr, gas)?; + let id: String = asc_get(self, id_ptr, gas)?; + let entity_option = self.ctx.host_exports.store_get( + &mut self.ctx.state, + entity_type.clone(), + id.clone(), + gas, + scope, + )?; + + if self.ctx.instrument { + debug!(self.ctx.logger, "store_get"; + "type" => &entity_type, + "id" => &id, + "found" => entity_option.is_some()); + } + + let ret = match entity_option { + Some(entity) => { + let _section = self + .host_metrics + .stopwatch + .start_section("store_get_asc_new"); + asc_new(self, &entity.sorted(), gas)? + } + None => match &self.ctx.debug_fork { + Some(fork) => { + let entity_option = fork.fetch(entity_type, id).map_err(|e| { + HostExportError::Unknown(anyhow!( + "store_get: failed to fetch entity from the debug fork: {}", + e + )) + })?; + match entity_option { + Some(entity) => { + let _section = self + .host_metrics + .stopwatch + .start_section("store_get_asc_new"); + let entity = asc_new(self, &entity.sorted(), gas)?; + self.store_set(gas, entity_ptr, id_ptr, entity)?; + entity + } + None => AscPtr::null(), + } + } + None => AscPtr::null(), + }, + }; + + Ok(ret) + } } // Implementation of externals. @@ -1012,59 +1085,17 @@ impl WasmInstanceContext { entity_ptr: AscPtr, id_ptr: AscPtr, ) -> Result, HostExportError> { - let _timer = self - .host_metrics - .cheap_clone() - .time_host_fn_execution_region("store_get"); - - let entity_type: String = asc_get(self, entity_ptr, gas)?; - let id: String = asc_get(self, id_ptr, gas)?; - let entity_option = self.ctx.host_exports.store_get( - &mut self.ctx.state, - entity_type.clone(), - id.clone(), - gas, - )?; - if self.ctx.instrument { - debug!(self.ctx.logger, "store_get"; - "type" => &entity_type, - "id" => &id, - "found" => entity_option.is_some()); - } - let ret = match entity_option { - Some(entity) => { - let _section = self - .host_metrics - .stopwatch - .start_section("store_get_asc_new"); - asc_new(self, &entity.sorted(), gas)? - } - None => match &self.ctx.debug_fork { - Some(fork) => { - let entity_option = fork.fetch(entity_type, id).map_err(|e| { - HostExportError::Unknown(anyhow!( - "store_get: failed to fetch entity from the debug fork: {}", - e - )) - })?; - match entity_option { - Some(entity) => { - let _section = self - .host_metrics - .stopwatch - .start_section("store_get_asc_new"); - let entity = asc_new(self, &entity.sorted(), gas)?; - self.store_set(gas, entity_ptr, id_ptr, entity)?; - entity - } - None => AscPtr::null(), - } - } - None => AscPtr::null(), - }, - }; + self.store_get_scoped(gas, entity_ptr, id_ptr, GetScope::Store) + } - Ok(ret) + /// function store.get_in_block(entity: string, id: string): Entity | null + pub fn store_get_in_block( + &mut self, + gas: &GasCounter, + entity_ptr: AscPtr, + id_ptr: AscPtr, + ) -> Result, HostExportError> { + self.store_get_scoped(gas, entity_ptr, id_ptr, GetScope::InBlock) } /// function store.loadRelated(entity_type: string, id: string, field: string): Array diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index d0fc7dd3b26..d284a98d107 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -1,7 +1,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, LoadRelatedRequest, - ReadStore, StoredDynamicDataSource, WritableStore, + DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, GetScope, + LoadRelatedRequest, ReadStore, StoredDynamicDataSource, WritableStore, }; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; @@ -752,3 +752,42 @@ fn check_for_delete_async_related() { assert_eq!(result, expeted_vec); }); } + +#[test] +fn scoped_get() { + run_store_test(|mut cache, _store, _deployment, _writable| async move { + // Key for an existing entity that is in the store + let key1 = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let wallet1 = create_wallet_entity("1", "1", 67); + + // Create a new entity that is not in the store + let wallet5 = create_wallet_entity("5", "5", 100); + let key5 = EntityKey::data(WALLET.to_owned(), "5".to_owned()); + cache.set(key5.clone(), wallet5.clone()).unwrap(); + + // For the new entity, we can retrieve it with either scope + let act5 = cache.get(&key5, GetScope::InBlock).unwrap(); + assert_eq!(Some(&wallet5), act5.as_ref()); + let act5 = cache.get(&key5, GetScope::Store).unwrap(); + assert_eq!(Some(&wallet5), act5.as_ref()); + + // For an entity in the store, we can not get it `InBlock` but with + // `Store` + let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + assert_eq!(None, act1); + let act1 = cache.get(&key1, GetScope::Store).unwrap(); + assert_eq!(Some(&wallet1), act1.as_ref()); + // Even after reading from the store, the entity is not visible with + // `InBlock` + let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + assert_eq!(None, act1); + // But if it gets updated, it becomes visible with either scope + let mut wallet1 = wallet1; + wallet1.set("balance", 70); + cache.set(key1.clone(), wallet1.clone()).unwrap(); + let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + assert_eq!(Some(&wallet1), act1.as_ref()); + let act1 = cache.get(&key1, GetScope::Store).unwrap(); + assert_eq!(Some(&wallet1), act1.as_ref()); + }); +} From 9f4a1821146b18f6f49165305e9a8c0795120fad Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 14 Apr 2023 15:16:08 -0700 Subject: [PATCH 0133/2104] core: Log how many entries the cache had at each block --- core/src/subgraph/runner.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 510e9d195d1..e0bc04d1b91 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -202,15 +202,9 @@ where "block_hash" => format!("{}", block_ptr.hash) )); - if triggers.len() == 1 { - debug!(&logger, "1 candidate trigger in this block"); - } else { - debug!( - &logger, - "{} candidate triggers in this block", - triggers.len() - ); - } + debug!(logger, "Start processing block"; + "triggers" => triggers.len(), + "cached_entities" => self.state.entity_lfu_cache.len()); let proof_of_indexing = if self.inputs.store.supports_proof_of_indexing().await? { Some(Arc::new(AtomicRefCell::new(ProofOfIndexing::new( From 2988570bccad617c754164017d1f935fc89e65a3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 1 Apr 2023 14:03:50 -0700 Subject: [PATCH 0134/2104] graph, graphql: Move graphql::schema to graph::schema --- Cargo.lock | 1 - graph/src/lib.rs | 2 ++ {graphql => graph}/src/schema/api.rs | 8 +++--- {graphql => graph}/src/schema/ast.rs | 26 +++++++++---------- {graphql => graph}/src/schema/meta.graphql | 0 {graphql => graph}/src/schema/mod.rs | 2 +- graphql/examples/schema.rs | 2 +- graphql/src/execution/ast.rs | 3 +-- graphql/src/execution/execution.rs | 4 +-- graphql/src/execution/query.rs | 5 ++-- graphql/src/introspection/resolver.rs | 2 +- graphql/src/introspection/schema.rs | 2 +- graphql/src/lib.rs | 4 --- graphql/src/store/prefetch.rs | 2 +- graphql/src/store/query.rs | 2 +- graphql/src/store/resolver.rs | 6 ++--- graphql/src/subscription/mod.rs | 3 +-- graphql/src/values/coercion.rs | 2 +- store/postgres/Cargo.toml | 8 ++++-- store/postgres/src/deployment_store.rs | 2 +- .../test-store/tests/graphql/introspection.rs | 5 ++-- 21 files changed, 45 insertions(+), 46 deletions(-) rename {graphql => graph}/src/schema/api.rs (99%) rename {graphql => graph}/src/schema/ast.rs (96%) rename {graphql => graph}/src/schema/meta.graphql (100%) rename {graphql => graph}/src/schema/mod.rs (75%) diff --git a/Cargo.lock b/Cargo.lock index 979b1633331..384067d2009 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1935,7 +1935,6 @@ dependencies = [ "git-testament", "graph", "graph-core", - "graph-graphql", "graphql-parser", "hex", "itertools", diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 1a2d6f6e05c..23992e0f96a 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -30,6 +30,8 @@ pub mod substreams; pub mod endpoint; +pub mod schema; + /// Helpers for parsing environment variables. pub mod env; diff --git a/graphql/src/schema/api.rs b/graph/src/schema/api.rs similarity index 99% rename from graphql/src/schema/api.rs rename to graph/src/schema/api.rs index fc9186e2d6d..9e549a53efe 100644 --- a/graphql/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -6,12 +6,12 @@ use lazy_static::lazy_static; use crate::schema::ast; -use graph::data::{ +use crate::data::{ graphql::ext::{DirectiveExt, DocumentExt, ValueExt}, schema::{META_FIELD_NAME, META_FIELD_TYPE, SCHEMA_TYPE_NAME}, }; -use graph::prelude::s::{Value, *}; -use graph::prelude::*; +use crate::prelude::s::{Value, *}; +use crate::prelude::*; use thiserror::Error; #[derive(Error, Debug)] @@ -867,7 +867,7 @@ fn add_field_arguments( #[cfg(test)] mod tests { - use graph::data::graphql::DocumentExt; + use crate::data::graphql::DocumentExt; use graphql_parser::schema::*; use super::api_schema; diff --git a/graphql/src/schema/ast.rs b/graph/src/schema/ast.rs similarity index 96% rename from graphql/src/schema/ast.rs rename to graph/src/schema/ast.rs index c78904fbe69..e57a9d58089 100644 --- a/graphql/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -1,18 +1,16 @@ -use graph::cheap_clone::CheapClone; use graphql_parser::Pos; use lazy_static::lazy_static; use std::ops::Deref; use std::str::FromStr; use std::sync::Arc; -use graph::data::graphql::ext::DirectiveFinder; -use graph::data::graphql::{DocumentExt, ObjectOrInterface}; -use graph::prelude::anyhow::anyhow; -use graph::prelude::{s, Error, ValueType}; +use crate::cheap_clone::CheapClone; +use crate::data::graphql::ext::DirectiveFinder; +use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectOrInterface}; +use crate::prelude::anyhow::anyhow; +use crate::prelude::{s, Error, ValueType}; -use crate::query::ast as qast; - -pub(crate) enum FilterOp { +pub enum FilterOp { Not, GreaterThan, LessThan, @@ -39,7 +37,7 @@ pub(crate) enum FilterOp { } /// Split a "name_eq" style name into an attribute ("name") and a filter op (`Equal`). -pub(crate) fn parse_field_as_filter(key: &str) -> (String, FilterOp) { +pub fn parse_field_as_filter(key: &str) -> (String, FilterOp) { let (suffix, op) = match key { k if k.ends_with("_not") => ("_not", FilterOp::Not), k if k.ends_with("_gt") => ("_gt", FilterOp::GreaterThan), @@ -389,7 +387,7 @@ pub fn get_derived_from_field<'a>( field_definition: &'a s::Field, ) -> Option<&'a s::Field> { get_derived_from_directive(field_definition) - .and_then(|directive| qast::get_argument_value(&directive.arguments, "field")) + .and_then(|directive| directive.argument("field")) .and_then(|value| match value { s::Value::String(s) => Some(s), _ => None, @@ -407,9 +405,9 @@ pub fn is_list(field_type: &s::Type) -> bool { #[test] fn entity_validation() { - use graph::components::store::EntityKey; - use graph::data::store; - use graph::prelude::{DeploymentHash, Entity}; + use crate::components::store::EntityKey; + use crate::data::store; + use crate::prelude::{DeploymentHash, Entity}; fn make_thing(name: &str) -> Entity { let mut thing = Entity::new(); @@ -441,7 +439,7 @@ fn entity_validation() { }"; let subgraph = DeploymentHash::new("doesntmatter").unwrap(); let schema = - graph::prelude::Schema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); + crate::prelude::Schema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); let id = thing.id().unwrap_or("none".to_owned()); let key = EntityKey::data("Thing".to_owned(), id.clone()); diff --git a/graphql/src/schema/meta.graphql b/graph/src/schema/meta.graphql similarity index 100% rename from graphql/src/schema/meta.graphql rename to graph/src/schema/meta.graphql diff --git a/graphql/src/schema/mod.rs b/graph/src/schema/mod.rs similarity index 75% rename from graphql/src/schema/mod.rs rename to graph/src/schema/mod.rs index 6df51907471..85544cfb7ac 100644 --- a/graphql/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -4,4 +4,4 @@ pub mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; -pub use self::api::{api_schema, APISchemaError}; +pub use api::{api_schema, APISchemaError}; diff --git a/graphql/examples/schema.rs b/graphql/examples/schema.rs index 0bf77f7a7bc..2d4ee14e346 100644 --- a/graphql/examples/schema.rs +++ b/graphql/examples/schema.rs @@ -3,7 +3,7 @@ use std::env; use std::fs; use std::process::exit; -use graph_graphql::schema::api::api_schema; +use graph::schema::api::api_schema; pub fn usage(msg: &str) -> ! { println!("{}", msg); diff --git a/graphql/src/execution/ast.rs b/graphql/src/execution/ast.rs index 2a0c19e6d36..464836c1566 100644 --- a/graphql/src/execution/ast.rs +++ b/graphql/src/execution/ast.rs @@ -4,11 +4,10 @@ use graph::{ components::store::EntityType, data::graphql::ObjectOrInterface, prelude::{anyhow, q, r, s, ApiSchema, QueryExecutionError, ValueMap}, + schema::ast::ObjectType, }; use graphql_parser::Pos; -use crate::schema::ast::ObjectType; - /// A selection set is a table that maps object types to the fields that /// should be selected for objects of that type. The types are always /// concrete object types, never interface or union types. When a diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 8f4d0259538..dcf6cf51e1e 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -15,13 +15,13 @@ use graph::data::graphql::*; use graph::data::query::CacheStatus; use graph::env::CachedSubgraphIds; use graph::prelude::*; +use graph::schema::ast as sast; use graph::util::{lfu_cache::LfuCache, stable_hash_glue::impl_stable_hash}; use super::QueryHash; use crate::execution::ast as a; use crate::introspection::{is_introspection_field, INTROSPECTION_QUERY_TYPE}; use crate::prelude::*; -use crate::schema::ast as sast; lazy_static! { // Sharded query results cache for recent blocks by network. @@ -288,7 +288,7 @@ pub(crate) async fn execute_root_selection_set_uncached( execute_selection_set_to_map( &ictx, ctx.query.selection_set.as_ref(), - &INTROSPECTION_QUERY_TYPE, + &*INTROSPECTION_QUERY_TYPE, None, ) .await?, diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index 9a9b702e1af..6574c86f673 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -19,12 +19,13 @@ use graph::prelude::{ info, o, q, r, s, warn, BlockNumber, CheapClone, DeploymentHash, GraphQLMetrics, Logger, TryFromValue, ENV_VARS, }; +use graph::schema::api::ErrorPolicy; +use graph::schema::ast::{self as sast}; use crate::execution::ast as a; +use crate::execution::get_field; use crate::query::{ast as qast, ext::BlockConstraint}; -use crate::schema::ast::{self as sast}; use crate::values::coercion; -use crate::{execution::get_field, schema::api::ErrorPolicy}; lazy_static! { static ref GRAPHQL_VALIDATION_PLAN: ValidationPlan = diff --git a/graphql/src/introspection/resolver.rs b/graphql/src/introspection/resolver.rs index ff5b6bf49ce..60ebf2631e8 100644 --- a/graphql/src/introspection/resolver.rs +++ b/graphql/src/introspection/resolver.rs @@ -8,7 +8,7 @@ use graph::prelude::*; use crate::execution::ast as a; use crate::prelude::*; -use crate::schema::ast as sast; +use graph::schema::ast as sast; type TypeObjectsMap = BTreeMap; diff --git a/graphql/src/introspection/schema.rs b/graphql/src/introspection/schema.rs index 97379af3a42..303c46f36d5 100644 --- a/graphql/src/introspection/schema.rs +++ b/graphql/src/introspection/schema.rs @@ -8,7 +8,7 @@ use graph::prelude::s::Document; use lazy_static::lazy_static; -use crate::schema::ast as sast; +use graph::schema::ast as sast; const INTROSPECTION_SCHEMA: &str = " scalar Boolean diff --git a/graphql/src/lib.rs b/graphql/src/lib.rs index 310f58a98d4..b167427a369 100644 --- a/graphql/src/lib.rs +++ b/graphql/src/lib.rs @@ -1,8 +1,5 @@ pub extern crate graphql_parser; -/// Utilities for working with GraphQL schemas. -pub mod schema; - /// Utilities for schema introspection. pub mod introspection; @@ -32,7 +29,6 @@ pub mod prelude { pub use super::execution::{ast as a, ExecutionContext, Query, Resolver}; pub use super::introspection::IntrospectionResolver; pub use super::query::{execute_query, ext::BlockConstraint, QueryExecutionOptions}; - pub use super::schema::{api_schema, APISchemaError}; pub use super::store::StoreResolver; pub use super::subscription::SubscriptionExecutionOptions; pub use super::values::MaybeCoercible; diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 7ee678e7d00..55e4ef028ae 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -12,6 +12,7 @@ use std::collections::BTreeMap; use std::rc::Rc; use std::time::Instant; +use graph::schema::ast as sast; use graph::{components::store::EntityType, data::graphql::*}; use graph::{ data::graphql::ext::DirectiveFinder, @@ -24,7 +25,6 @@ use graph::{ use crate::execution::{ast as a, ExecutionContext, Resolver}; use crate::metrics::GraphQLMetrics; -use crate::schema::ast as sast; use crate::store::query::build_query; use crate::store::StoreResolver; diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 9913a0b13b4..04373926a67 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -6,10 +6,10 @@ use graph::data::graphql::TypeExt as _; use graph::data::value::Object; use graph::data::value::Value as DataValue; use graph::prelude::*; +use graph::schema::ast::{self as sast, FilterOp}; use graph::{components::store::EntityType, data::graphql::ObjectOrInterface}; use crate::execution::ast as a; -use crate::schema::ast::{self as sast, FilterOp}; use super::prefetch::SelectedAttributes; diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index ff35d7b7838..575ee7342e4 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -9,14 +9,14 @@ use graph::data::{ schema::META_FIELD_TYPE, }; use graph::prelude::*; +use graph::schema::api::ErrorPolicy; +use graph::schema::ast as sast; use graph::{components::store::*, data::schema::BLOCK_FIELD_TYPE}; use crate::execution::ast as a; use crate::metrics::GraphQLMetrics; +use crate::prelude::*; use crate::query::ext::BlockConstraint; -use crate::schema::ast as sast; -use crate::{prelude::*, schema::api::ErrorPolicy}; - use crate::store::query::collect_entities_from_query_field; /// A resolver that fetches entities from a `Store`. diff --git a/graphql/src/subscription/mod.rs b/graphql/src/subscription/mod.rs index ebb597a83e2..f231fafb53b 100644 --- a/graphql/src/subscription/mod.rs +++ b/graphql/src/subscription/mod.rs @@ -2,14 +2,13 @@ use std::result::Result; use std::time::{Duration, Instant}; use graph::components::store::UnitStream; -use graph::{components::store::SubscriptionManager, prelude::*}; +use graph::{components::store::SubscriptionManager, prelude::*, schema::api::ErrorPolicy}; use crate::metrics::GraphQLMetrics; use crate::{ execution::ast as a, execution::*, prelude::{BlockConstraint, StoreResolver}, - schema::api::ErrorPolicy, }; /// Options available for subscription execution. diff --git a/graphql/src/values/coercion.rs b/graphql/src/values/coercion.rs index 7f4f8cedd50..257a70c67f9 100644 --- a/graphql/src/values/coercion.rs +++ b/graphql/src/values/coercion.rs @@ -1,6 +1,6 @@ -use crate::schema; use graph::prelude::s::{EnumType, InputValue, ScalarType, Type, TypeDefinition}; use graph::prelude::{q, r, QueryExecutionError}; +use graph::schema; use std::collections::BTreeMap; use std::convert::TryFrom; diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 17af801f10a..0494ad5bd1a 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -7,7 +7,12 @@ edition.workspace = true async-trait = "0.1.50" blake3 = "1.3" derive_more = { version = "0.99.17" } -diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2"] } +diesel = { version = "1.4.8", features = [ + "postgres", + "serde_json", + "numeric", + "r2d2", +] } # We use diesel-dynamic-schema straight from git as the project has not # made a release as a crate yet diesel-dynamic-schema = { git = "https://github.com/diesel-rs/diesel-dynamic-schema", rev = "a8ec4fb1" } @@ -16,7 +21,6 @@ diesel_migrations = "1.3.0" fallible-iterator = "0.2.0" graph = { path = "../../graph" } graph-core = { path = "../../core" } -graph-graphql = { path = "../../graphql" } Inflector = "0.11.3" lazy_static = "1.1" lru_time_cache = "0.11" diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index ab8956c7a75..dbcfba7239e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -43,7 +43,7 @@ use graph::prelude::{ Logger, QueryExecutionError, Schema, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, }; -use graph_graphql::prelude::api_schema; +use graph::schema::api_schema; use web3::types::Address; use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 43ba9bff433..bc5f86dc3d4 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -6,9 +6,10 @@ use graph::prelude::{ async_trait, o, r, s, slog, tokio, ApiSchema, DeploymentHash, Logger, Query, QueryExecutionError, QueryResult, Schema, }; +use graph::schema::api_schema; + use graph_graphql::prelude::{ - a, api_schema, execute_query, ExecutionContext, Query as PreparedQuery, QueryExecutionOptions, - Resolver, + a, execute_query, ExecutionContext, Query as PreparedQuery, QueryExecutionOptions, Resolver, }; use test_store::graphql_metrics; use test_store::LOAD_MANAGER; From d4654288f57864066bc71a826624c6fba822436d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 1 Apr 2023 14:05:48 -0700 Subject: [PATCH 0135/2104] store: Remove graph-core depencency --- Cargo.lock | 1 - store/postgres/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 384067d2009..58e0ca2a4a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1934,7 +1934,6 @@ dependencies = [ "fallible-iterator", "git-testament", "graph", - "graph-core", "graphql-parser", "hex", "itertools", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 0494ad5bd1a..f3bf0f4283a 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -20,7 +20,6 @@ diesel-derive-enum = { version = "1.1", features = ["postgres"] } diesel_migrations = "1.3.0" fallible-iterator = "0.2.0" graph = { path = "../../graph" } -graph-core = { path = "../../core" } Inflector = "0.11.3" lazy_static = "1.1" lru_time_cache = "0.11" From 52c3bda4a43e8becde3560c8b9b61ddfb42b80d1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 12 Apr 2023 11:14:46 -0700 Subject: [PATCH 0136/2104] all: Introduce InputSchema for subgraph schemas We use Schema just as a basic utility, and differentiate in the rest of the code between input and api schemas --- graph/src/components/server/index_node.rs | 4 +- graph/src/components/store/entity_cache.rs | 5 +- graph/src/components/store/mod.rs | 7 +- graph/src/components/store/traits.rs | 9 +- graph/src/data/store/mod.rs | 33 +++--- graph/src/data/subgraph/features.rs | 7 +- graph/src/data/subgraph/mod.rs | 14 +-- graph/src/data/subgraph/schema.rs | 2 +- graph/src/schema/ast.rs | 4 +- graph/src/schema/input_schema.rs | 104 ++++++++++++++++++ graph/src/schema/mod.rs | 4 + server/index-node/src/explorer.rs | 2 +- store/postgres/examples/layout.rs | 5 +- store/postgres/src/deployment.rs | 11 +- store/postgres/src/deployment_store.rs | 17 ++- store/postgres/src/fork.rs | 32 ++---- store/postgres/src/relational.rs | 20 ++-- store/postgres/src/relational/ddl_tests.rs | 4 +- store/postgres/src/relational/query_tests.rs | 7 +- store/postgres/src/subgraph_store.rs | 15 +-- store/postgres/src/writable.rs | 9 +- store/test-store/src/store.rs | 3 +- store/test-store/tests/graph/entity_cache.rs | 12 +- store/test-store/tests/graphql/query.rs | 7 +- store/test-store/tests/postgres/graft.rs | 6 +- store/test-store/tests/postgres/relational.rs | 5 +- .../tests/postgres/relational_bytes.rs | 7 +- store/test-store/tests/postgres/store.rs | 8 +- store/test-store/tests/postgres/subgraph.rs | 6 +- store/test-store/tests/postgres/writable.rs | 6 +- 30 files changed, 244 insertions(+), 131 deletions(-) create mode 100644 graph/src/schema/input_schema.rs diff --git a/graph/src/components/server/index_node.rs b/graph/src/components/server/index_node.rs index eddf1fa51a8..7afdef5e8ed 100644 --- a/graph/src/components/server/index_node.rs +++ b/graph/src/components/server/index_node.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use futures::prelude::*; -use crate::prelude::{BlockNumber, Schema}; +use crate::{prelude::BlockNumber, schema::InputSchema}; /// This is only needed to support the explorer API. #[derive(Debug)] @@ -15,7 +15,7 @@ pub struct VersionInfo { pub failed: bool, pub description: Option, pub repository: Option, - pub schema: Arc, + pub schema: Arc, pub network: String, } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 3535f8dd107..e1340d36ec7 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -4,7 +4,8 @@ use std::fmt::{self, Debug}; use std::sync::Arc; use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; -use crate::prelude::{Schema, ENV_VARS}; +use crate::prelude::ENV_VARS; +use crate::schema::InputSchema; use crate::util::lfu_cache::LfuCache; use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest}; @@ -41,7 +42,7 @@ pub struct EntityCache { /// The store is only used to read entities. pub store: Arc, - schema: Arc, + schema: Arc, } impl Debug for EntityCache { diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 17bb533b419..e84dc59a13b 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -28,6 +28,7 @@ use crate::data::store::scalar::Bytes; use crate::data::store::*; use crate::data::value::Word; use crate::data_source::CausalityRegion; +use crate::schema::InputSchema; use crate::{constraint_violation, prelude::*}; /// The type name of an entity. This is the string that is used in the @@ -1152,11 +1153,11 @@ impl fmt::Display for DeploymentSchemaVersion { /// A `ReadStore` that is always empty. pub struct EmptyStore { - schema: Arc, + schema: Arc, } impl EmptyStore { - pub fn new(schema: Arc) -> Self { + pub fn new(schema: Arc) -> Self { EmptyStore { schema } } } @@ -1177,7 +1178,7 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> Arc { self.schema.cheap_clone() } } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index dc376b7f65b..b3ae512acab 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -9,6 +9,7 @@ use crate::data::query::Trace; use crate::data::subgraph::status; use crate::data::value::Word; use crate::data::{query::QueryTarget, subgraph::schema::*}; +use crate::schema::InputSchema; pub trait SubscriptionManager: Send + Sync + 'static { /// Subscribe to changes for specific subgraphs and entities. @@ -67,7 +68,7 @@ pub trait SubgraphStore: Send + Sync + 'static { fn create_subgraph_deployment( &self, name: SubgraphName, - schema: &Schema, + schema: &InputSchema, deployment: DeploymentCreate, node_id: NodeId, network: String, @@ -111,7 +112,7 @@ pub trait SubgraphStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Return the GraphQL schema supplied by the user - fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result, StoreError>; + fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result, StoreError>; /// Return the GraphQL schema that was derived from the user's schema by /// adding a root query type etc. to it @@ -192,7 +193,7 @@ pub trait ReadStore: Send + Sync + 'static { query_derived: &DerivedEntityQuery, ) -> Result, StoreError>; - fn input_schema(&self) -> Arc; + fn input_schema(&self) -> Arc; } // This silly impl is needed until https://github.com/rust-lang/rust/issues/65991 is stable. @@ -215,7 +216,7 @@ impl ReadStore for Arc { (**self).get_derived(entity_derived) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> Arc { (**self).input_schema() } } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 8d91b9933ea..dd934bb9f46 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,8 +1,9 @@ use crate::{ components::store::{DeploymentLocator, EntityKey, EntityType}, data::graphql::ObjectTypeExt, - prelude::{anyhow::Context, q, r, s, CacheWeight, QueryExecutionError, Schema}, + prelude::{anyhow::Context, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, + schema::InputSchema, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; use anyhow::{anyhow, Error}; @@ -18,7 +19,7 @@ use std::{borrow::Cow, collections::HashMap}; use strum::AsStaticRef as _; use strum_macros::AsStaticStr; -use super::graphql::{ext::DirectiveFinder, DocumentExt as _, TypeExt as _}; +use super::graphql::{ext::DirectiveFinder, TypeExt as _}; /// Custom scalars in GraphQL. pub mod scalar; @@ -694,12 +695,12 @@ impl Entity { /// Validate that this entity matches the object type definition in the /// schema. An entity that passes these checks can be stored /// successfully in the subgraph's database schema - pub fn validate(&self, schema: &Schema, key: &EntityKey) -> Result<(), anyhow::Error> { - fn scalar_value_type(schema: &Schema, field_type: &s::Type) -> ValueType { + pub fn validate(&self, schema: &InputSchema, key: &EntityKey) -> Result<(), anyhow::Error> { + fn scalar_value_type(schema: &InputSchema, field_type: &s::Type) -> ValueType { use s::TypeDefinition as t; match field_type { s::Type::NamedType(name) => ValueType::from_str(name).unwrap_or_else(|_| { - match schema.document.get_named_type(name) { + match schema.get_named_type(name) { Some(t::Object(obj_type)) => { let id = obj_type.field("id").expect("all object types have an id"); scalar_value_type(schema, &id.field_type) @@ -710,8 +711,7 @@ impl Entity { // therefore enough to use the id type of one of // the implementors match schema - .types_for_interface() - .get(&EntityType::new(intf.name.clone())) + .types_for_interface(intf) .expect("interface type names are known") .first() { @@ -745,16 +745,12 @@ impl Entity { // type for them, and validation would therefore fail return Ok(()); } - let object_type_definitions = schema.document.get_object_type_definitions(); - let object_type = object_type_definitions - .iter() - .find(|object_type| key.entity_type.as_str() == object_type.name) - .with_context(|| { - format!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, key.entity_id, key.entity_type - ) - })?; + let object_type = schema.find_object_type(&key.entity_type).with_context(|| { + format!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, key.entity_id, key.entity_type + ) + })?; for field in &object_type.fields { let is_derived = field.is_derived(); @@ -917,8 +913,7 @@ fn entity_validation() { cruft: Cruft! @derivedFrom(field: \"thing\") }"; let subgraph = DeploymentHash::new("doesntmatter").unwrap(); - let schema = - crate::prelude::Schema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); + let schema = InputSchema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); let id = thing.id().unwrap_or("none".to_owned()); let key = EntityKey::data("Thing".to_owned(), id.clone()); diff --git a/graph/src/data/subgraph/features.rs b/graph/src/data/subgraph/features.rs index c86d6eb84c7..da6b00d1ce6 100644 --- a/graph/src/data/subgraph/features.rs +++ b/graph/src/data/subgraph/features.rs @@ -12,8 +12,9 @@ use crate::{ blockchain::Blockchain, - data::{graphql::DocumentExt, schema::Schema, subgraph::SubgraphManifest}, + data::subgraph::SubgraphManifest, prelude::{Deserialize, Serialize}, + schema::InputSchema, }; use itertools::Itertools; use std::{collections::BTreeSet, fmt, str::FromStr}; @@ -110,8 +111,8 @@ fn detect_grafting(manifest: &SubgraphManifest) -> Option Option { - match schema.document.get_fulltext_directives() { +fn detect_full_text_search(schema: &InputSchema) -> Option { + match schema.get_fulltext_directives() { Ok(directives) => (!directives.is_empty()).then_some(SubgraphFeature::FullTextSearch), Err(_) => { diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 7ab0ac7588f..c52bd40e470 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -31,11 +31,8 @@ use crate::{ store::{StoreError, SubgraphStore}, }, data::{ - graphql::TryFromValue, - query::QueryExecutionError, - schema::{Schema, SchemaValidationError}, - store::Entity, - subgraph::features::validate_subgraph_features, + graphql::TryFromValue, query::QueryExecutionError, schema::SchemaValidationError, + store::Entity, subgraph::features::validate_subgraph_features, }, data_source::{ offchain::OFFCHAIN_KINDS, DataSource, DataSourceTemplate, UnresolvedDataSource, @@ -43,6 +40,7 @@ use crate::{ }, ensure, prelude::{r, CheapClone, ENV_VARS}, + schema::InputSchema, }; use crate::prelude::{impl_slog_value, BlockNumber, Deserialize, Serialize}; @@ -385,12 +383,12 @@ impl UnresolvedSchema { id: DeploymentHash, resolver: &Arc, logger: &Logger, - ) -> Result { + ) -> Result { let schema_bytes = resolver .cat(logger, &self.file) .await .with_context(|| format!("failed to resolve schema {}", &self.file.link))?; - Schema::parse(&String::from_utf8(schema_bytes)?, id) + InputSchema::parse(&String::from_utf8(schema_bytes)?, id) } } @@ -503,7 +501,7 @@ pub type UnresolvedSubgraphManifest = BaseSubgraphManifest< /// SubgraphManifest validated with IPFS links resolved pub type SubgraphManifest = - BaseSubgraphManifest, DataSourceTemplate>; + BaseSubgraphManifest, DataSourceTemplate>; /// Unvalidated SubgraphManifest pub struct UnvalidatedSubgraphManifest(SubgraphManifest); diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index e877ecb5c6d..185f8227a4f 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -194,7 +194,7 @@ impl SubgraphManifestEntity { description: manifest.description.clone(), repository: manifest.repository.clone(), features: manifest.features.iter().map(|f| f.to_string()).collect(), - schema: manifest.schema.document.clone().to_string(), + schema: manifest.schema.document_string(), raw_yaml: Some(raw_yaml), entities_with_causality_region, history_blocks: BLOCK_NUMBER_MAX, diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index e57a9d58089..80f5433ef28 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -408,6 +408,7 @@ fn entity_validation() { use crate::components::store::EntityKey; use crate::data::store; use crate::prelude::{DeploymentHash, Entity}; + use crate::schema::InputSchema; fn make_thing(name: &str) -> Entity { let mut thing = Entity::new(); @@ -438,8 +439,7 @@ fn entity_validation() { cruft: Cruft! @derivedFrom(field: \"thing\") }"; let subgraph = DeploymentHash::new("doesntmatter").unwrap(); - let schema = - crate::prelude::Schema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); + let schema = InputSchema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); let id = thing.id().unwrap_or("none".to_owned()); let key = EntityKey::data("Thing".to_owned(), id.clone()); diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs new file mode 100644 index 00000000000..5ea0314c5d0 --- /dev/null +++ b/graph/src/schema/input_schema.rs @@ -0,0 +1,104 @@ +use std::collections::BTreeMap; + +use anyhow::Error; + +use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; +use crate::data::graphql::DocumentExt; +use crate::data::schema::{FulltextDefinition, Schema, SchemaValidationError}; +use crate::data::store; +use crate::prelude::{s, ApiSchema, DeploymentHash}; +use crate::schema::api_schema; + +#[derive(Clone, Debug, PartialEq)] +pub struct InputSchema { + schema: Schema, +} + +impl InputSchema { + pub fn new(id: DeploymentHash, document: s::Document) -> Result { + let schema = Schema::new(id, document)?; + Ok(Self { schema }) + } + + pub fn parse(raw: &str, id: DeploymentHash) -> Result { + let schema = Schema::parse(raw, id)?; + + Ok(Self { schema }) + } + + pub fn api_schema(&self) -> Result { + let mut schema = self.schema.clone(); + schema.document = api_schema(&self.schema.document)?; + schema.add_subgraph_id_directives(schema.id.clone()); + ApiSchema::from_api_schema(schema) + } + + pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &s::Field), Error> { + self.schema.get_field_related(key) + } + + pub fn id_value(&self, key: &EntityKey) -> Result { + self.schema.id_value(key) + } + + pub fn is_immutable(&self, entity_type: &EntityType) -> bool { + self.schema.is_immutable(entity_type) + } + + pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { + self.schema.document.get_named_type(name) + } + + pub fn types_for_interface(&self, intf: &s::InterfaceType) -> Option<&Vec> { + self.schema + .types_for_interface + .get(&EntityType::new(intf.name.clone())) + } + + pub fn find_object_type(&self, entity_type: &EntityType) -> Option<&s::ObjectType> { + self.schema + .document + .definitions + .iter() + .filter_map(|d| match d { + s::Definition::TypeDefinition(s::TypeDefinition::Object(t)) => Some(t), + _ => None, + }) + .find(|object_type| entity_type.as_str() == object_type.name) + } + + pub fn get_enum_definitions(&self) -> Vec<&s::EnumType> { + self.schema.document.get_enum_definitions() + } + + pub fn get_object_type_definitions(&self) -> Vec<&s::ObjectType> { + self.schema.document.get_object_type_definitions() + } + + pub fn interface_types(&self) -> &BTreeMap> { + &self.schema.types_for_interface + } + + pub fn entity_fulltext_definitions( + &self, + entity: &str, + ) -> Result, anyhow::Error> { + Schema::entity_fulltext_definitions(entity, &self.schema.document) + } + + pub fn id(&self) -> &DeploymentHash { + &self.schema.id + } + + pub fn document_string(&self) -> String { + self.schema.document.to_string() + } + + pub fn get_fulltext_directives(&self) -> Result, Error> { + self.schema.document.get_fulltext_directives() + } + + pub(crate) fn validate(&self) -> Result<(), Vec> { + self.schema.validate() + } +} diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 85544cfb7ac..8c03499107c 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -4,4 +4,8 @@ pub mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; +mod input_schema; + pub use api::{api_schema, APISchemaError}; + +pub use input_schema::InputSchema; diff --git a/server/index-node/src/explorer.rs b/server/index-node/src/explorer.rs index 6d65630e9ad..5c7d0215b40 100644 --- a/server/index-node/src/explorer.rs +++ b/server/index-node/src/explorer.rs @@ -98,7 +98,7 @@ where failed: vi.failed, description: vi.description.as_deref(), repository: vi.repository.as_deref(), - schema: vi.schema.document.to_string(), + schema: vi.schema.document_string(), network: vi.network.as_str() }; Ok(as_http_response(&value)) diff --git a/store/postgres/examples/layout.rs b/store/postgres/examples/layout.rs index 94daa9657f4..f166e891630 100644 --- a/store/postgres/examples/layout.rs +++ b/store/postgres/examples/layout.rs @@ -2,11 +2,12 @@ extern crate clap; extern crate graph_store_postgres; use clap::{arg, Command}; +use graph::schema::InputSchema; use std::collections::BTreeSet; use std::process::exit; use std::{fs, sync::Arc}; -use graph::prelude::{DeploymentHash, Schema}; +use graph::prelude::DeploymentHash; use graph_store_postgres::{ command_support::{Catalog, Column, ColumnType, Layout, Namespace}, layout_for_tests::make_dummy_site, @@ -137,7 +138,7 @@ pub fn main() { let subgraph = DeploymentHash::new("Qmasubgraph").unwrap(); let schema = ensure(fs::read_to_string(schema), "Can not read schema file"); let schema = ensure( - Schema::parse(&schema, subgraph.clone()), + InputSchema::parse(&schema, subgraph.clone()), "Failed to parse schema", ); let namespace = ensure( diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 865722b5f92..03f8b16eb0c 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -18,8 +18,9 @@ use graph::{ components::store::EntityType, prelude::{ anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr, - DeploymentHash, DeploymentState, Schema, StoreError, + DeploymentHash, DeploymentState, StoreError, }, + schema::InputSchema, }; use graph::{ data::subgraph::{ @@ -294,19 +295,19 @@ pub fn debug_fork( } } -pub fn schema(conn: &PgConnection, site: &Site) -> Result<(Schema, bool), StoreError> { +pub fn schema(conn: &PgConnection, site: &Site) -> Result<(InputSchema, bool), StoreError> { use subgraph_manifest as sm; let (s, use_bytea_prefix) = sm::table .select((sm::schema, sm::use_bytea_prefix)) .filter(sm::id.eq(site.id)) .first::<(String, bool)>(conn)?; - Schema::parse(s.as_str(), site.deployment.clone()) + InputSchema::parse(s.as_str(), site.deployment.clone()) .map_err(StoreError::Unknown) .map(|schema| (schema, use_bytea_prefix)) } pub struct ManifestInfo { - pub input_schema: Schema, + pub input_schema: InputSchema, pub description: Option, pub repository: Option, pub spec_version: String, @@ -332,7 +333,7 @@ impl ManifestInfo { )) .filter(sm::id.eq(site.id)) .first(conn)?; - let input_schema = Schema::parse(s.as_str(), site.deployment.clone())?; + let input_schema = InputSchema::parse(s.as_str(), site.deployment.clone())?; // Using the features field to store the instrument flag is a bit // backhanded, but since this will be used very rarely, should not diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index dbcfba7239e..2e422638ddc 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -40,10 +40,10 @@ use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_OBJECT} use graph::prelude::{ anyhow, debug, info, o, warn, web3, ApiSchema, AttributeNames, BlockNumber, BlockPtr, CheapClone, DeploymentHash, DeploymentState, Entity, EntityModification, EntityQuery, Error, - Logger, QueryExecutionError, Schema, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, - Value, ENV_VARS, + Logger, QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, + ENV_VARS, }; -use graph::schema::api_schema; +use graph::schema::InputSchema; use web3::types::Address; use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; @@ -74,7 +74,7 @@ pub enum ReplicaId { #[derive(Clone)] pub(crate) struct SubgraphInfo { /// The schema as supplied by the user - pub(crate) input: Arc, + pub(crate) input: Arc, /// The schema we derive from `input` with `graphql::schema::api::api_schema` pub(crate) api: HashMap>, /// The block number at which this subgraph was grafted onto @@ -177,7 +177,7 @@ impl DeploymentStore { pub(crate) fn create_deployment( &self, - schema: &Schema, + schema: &InputSchema, deployment: DeploymentCreate, site: Arc, graft_base: Option>, @@ -578,11 +578,8 @@ impl DeploymentStore { for version in VERSIONS.iter() { let api_version = ApiVersion::from_version(version).expect("Invalid API version"); - let mut schema = manifest_info.input_schema.clone(); - schema.document = - api_schema(&schema.document).map_err(|e| StoreError::Unknown(e.into()))?; - schema.add_subgraph_id_directives(site.deployment.clone()); - api.insert(api_version, Arc::new(ApiSchema::from_api_schema(schema)?)); + let schema = manifest_info.input_schema.api_schema()?; + api.insert(api_version, Arc::new(schema)); } let spec_version = diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 9bc34814220..3cae590bf2f 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -4,17 +4,18 @@ use std::{ sync::{Arc, Mutex}, }; +use graph::schema::InputSchema; use graph::{ block_on, - components::store::SubgraphFork as SubgraphForkTrait, + components::store::{EntityType, SubgraphFork as SubgraphForkTrait}, data::graphql::ext::DirectiveFinder, prelude::{ info, r::Value as RValue, reqwest, - s::{Definition, Field, ObjectType, TypeDefinition}, - serde_json, Attribute, DeploymentHash, Entity, Logger, Schema, Serialize, StoreError, - Value, ValueType, + s::{Field, ObjectType}, + serde_json, Attribute, DeploymentHash, Entity, Logger, Serialize, StoreError, Value, + ValueType, }, url::Url, }; @@ -41,7 +42,7 @@ struct Variables { pub(crate) struct SubgraphFork { client: reqwest::Client, endpoint: Url, - schema: Arc, + schema: Arc, fetched_ids: Mutex>, logger: Logger, } @@ -91,7 +92,7 @@ impl SubgraphFork { pub(crate) fn new( base: Url, id: DeploymentHash, - schema: Arc, + schema: Arc, logger: Logger, ) -> Result { Ok(Self { @@ -130,19 +131,8 @@ impl SubgraphFork { } fn get_fields_of(&self, entity_type: &str) -> Result<&Vec, StoreError> { - let entity: Option<&ObjectType> = - self.schema - .document - .definitions - .iter() - .find_map(|def| match def { - Definition::TypeDefinition(TypeDefinition::Object(o)) - if o.name == entity_type => - { - Some(o) - } - _ => None, - }); + let entity_type = EntityType::new(entity_type.to_string()); + let entity: Option<&ObjectType> = self.schema.find_object_type(&entity_type); if entity.is_none() { return Err(StoreError::ForkFailure(format!( @@ -255,8 +245,8 @@ mod tests { DeploymentHash::new("test").unwrap() } - fn test_schema() -> Arc { - let schema = Schema::new( + fn test_schema() -> Arc { + let schema = InputSchema::new( DeploymentHash::new("test").unwrap(), parse_schema::( r#"type Gravatar @entity { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 4e578db1ec1..a228a30d9d4 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -30,6 +30,7 @@ use graph::data::query::Trace; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; +use graph::schema::InputSchema; use graph::slog::warn; use inflector::Inflector; use lazy_static::lazy_static; @@ -50,8 +51,8 @@ use crate::{ }, }; use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; -use graph::data::graphql::ext::{DirectiveFinder, DocumentExt, ObjectTypeExt}; -use graph::data::schema::{FulltextConfig, FulltextDefinition, Schema, SCHEMA_TYPE_NAME}; +use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; +use graph::data::schema::{FulltextConfig, FulltextDefinition, SCHEMA_TYPE_NAME}; use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::{POI_OBJECT, POI_TABLE}; use graph::prelude::{ @@ -255,10 +256,13 @@ impl Layout { /// Generate a layout for a relational schema for entities in the /// GraphQL schema `schema`. The name of the database schema in which /// the subgraph's tables live is in `site`. - pub fn new(site: Arc, schema: &Schema, catalog: Catalog) -> Result { + pub fn new( + site: Arc, + schema: &InputSchema, + catalog: Catalog, + ) -> Result { // Extract enum types let enums: EnumMap = schema - .document .get_enum_definitions() .iter() .map( @@ -280,7 +284,6 @@ impl Layout { // List of all object types that are not __SCHEMA__ let object_types = schema - .document .get_object_type_definitions() .into_iter() .filter(|obj_type| obj_type.name != SCHEMA_TYPE_NAME) @@ -288,7 +291,7 @@ impl Layout { // For interfaces, check that all implementors use the same IdType // and build a list of name/IdType pairs - let id_types_for_interface = schema.types_for_interface.iter().map(|(interface, types)| { + let id_types_for_interface = schema.interface_types().iter().map(|(interface, types)| { types .iter() .map(IdType::try_from) @@ -327,7 +330,8 @@ impl Layout { Table::new( obj_type, &catalog, - Schema::entity_fulltext_definitions(&obj_type.name, &schema.document) + schema + .entity_fulltext_definitions(&obj_type.name) .map_err(|_| StoreError::FulltextSearchNonDeterministic)?, &enums, &id_types, @@ -427,7 +431,7 @@ impl Layout { pub fn create_relational_schema( conn: &PgConnection, site: Arc, - schema: &Schema, + schema: &InputSchema, entities_with_causality_region: BTreeSet, ) -> Result { let catalog = Catalog::for_creation(site.cheap_clone(), entities_with_causality_region); diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index 72a3b8bed7b..487de02ae56 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -9,7 +9,7 @@ const ID_TYPE: ColumnType = ColumnType::String; fn test_layout(gql: &str) -> Layout { let subgraph = DeploymentHash::new("subgraph").unwrap(); - let schema = Schema::parse(gql, subgraph.clone()).expect("Test schema invalid"); + let schema = InputSchema::parse(gql, subgraph.clone()).expect("Test schema invalid"); let namespace = Namespace::new("sgd0815".to_owned()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = Catalog::for_tests(site.clone(), BTreeSet::from_iter(["FileThing".into()])) @@ -201,7 +201,7 @@ const THING_GQL: &str = r#" bigInt: BigInt, color: Color, } - + type FileThing @entity { id: ID! } diff --git a/store/postgres/src/relational/query_tests.rs b/store/postgres/src/relational/query_tests.rs index acb4610b301..34f179fb538 100644 --- a/store/postgres/src/relational/query_tests.rs +++ b/store/postgres/src/relational/query_tests.rs @@ -3,7 +3,8 @@ use std::{collections::BTreeSet, sync::Arc}; use diesel::{debug_query, pg::Pg}; use graph::{ components::store::EntityType, - prelude::{r, serde_json as json, DeploymentHash, EntityFilter, Schema}, + prelude::{r, serde_json as json, DeploymentHash, EntityFilter}, + schema::InputSchema, }; use crate::{ @@ -30,7 +31,7 @@ fn gql_value_from_bytes() { fn test_layout(gql: &str) -> Layout { let subgraph = DeploymentHash::new("subgraph").unwrap(); - let schema = Schema::parse(gql, subgraph.clone()).expect("Test schema invalid"); + let schema = InputSchema::parse(gql, subgraph.clone()).expect("Test schema invalid"); let namespace = Namespace::new("sgd0815".to_owned()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = @@ -41,7 +42,7 @@ fn test_layout(gql: &str) -> Layout { #[track_caller] fn filter_contains(filter: EntityFilter, sql: &str) { const SCHEMA: &str = " - type Thing @entity { + type Thing @entity { id: Bytes!, address: Bytes!, name: String diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 7c27c6eace8..4d10456694d 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -27,9 +27,10 @@ use graph::{ prelude::{ anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiSchema, ApiVersion, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, - MetricsRegistry, NodeId, PartialBlockPtr, Schema, StoreError, SubgraphDeploymentEntity, + MetricsRegistry, NodeId, PartialBlockPtr, StoreError, SubgraphDeploymentEntity, SubgraphName, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }, + schema::InputSchema, url::Url, util::timed_cache::TimedCache, }; @@ -500,7 +501,7 @@ impl SubgraphStoreInner { fn create_deployment_internal( &self, name: SubgraphName, - schema: &Schema, + schema: &InputSchema, deployment: DeploymentCreate, node_id: NodeId, network_name: String, @@ -512,7 +513,7 @@ impl SubgraphStoreInner { #[cfg(not(debug_assertions))] assert!(!replace); - self.evict(&schema.id)?; + self.evict(schema.id())?; let graft_base = deployment .graft_base @@ -537,7 +538,7 @@ impl SubgraphStoreInner { Some(src_layout) => src_layout.site.schema_version, }; let conn = self.primary_conn()?; - let site = conn.allocate_site(shard, &schema.id, network_name, schema_version)?; + let site = conn.allocate_site(shard, schema.id(), network_name, schema_version)?; let node_id = conn.assigned_node(&site)?.unwrap_or(node_id); (site, node_id) }; @@ -680,7 +681,7 @@ impl SubgraphStoreInner { pub fn create_deployment_replace( &self, name: SubgraphName, - schema: &Schema, + schema: &InputSchema, deployment: DeploymentCreate, node_id: NodeId, network_name: String, @@ -1219,7 +1220,7 @@ impl SubgraphStoreTrait for SubgraphStore { fn create_subgraph_deployment( &self, name: SubgraphName, - schema: &Schema, + schema: &InputSchema, deployment: DeploymentCreate, node_id: NodeId, network_name: String, @@ -1287,7 +1288,7 @@ impl SubgraphStoreTrait for SubgraphStore { Ok(changes) } - fn input_schema(&self, id: &DeploymentHash) -> Result, StoreError> { + fn input_schema(&self, id: &DeploymentHash) -> Result, StoreError> { let (store, site) = self.store(id)?; let info = store.subgraph_info(&site)?; Ok(info.input) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index cda37b79aeb..1b03dd60fd6 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -9,9 +9,10 @@ use graph::constraint_violation; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ - BlockNumber, Entity, MetricsRegistry, Schema, SubgraphDeploymentEntity, SubgraphStore as _, + BlockNumber, Entity, MetricsRegistry, SubgraphDeploymentEntity, SubgraphStore as _, BLOCK_NUMBER_MAX, }; +use graph::schema::InputSchema; use graph::slog::info; use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; @@ -69,7 +70,7 @@ struct SyncStore { store: WritableSubgraphStore, writable: Arc, site: Arc, - input_schema: Arc, + input_schema: Arc, } impl SyncStore { @@ -367,7 +368,7 @@ impl SyncStore { .await } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> Arc { self.input_schema.clone() } } @@ -1166,7 +1167,7 @@ impl ReadStore for WritableStore { self.writer.get_derived(key) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> Arc { self.store.input_schema() } } diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index c55f8599fc5..a136c1fae5c 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -6,6 +6,7 @@ use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError}; use graph::data_source::CausalityRegion; use graph::log; use graph::prelude::{QueryStoreManager as _, SubgraphStore as _, *}; +use graph::schema::InputSchema; use graph::semver::Version; use graph::{ blockchain::block_stream::FirehoseCursor, blockchain::ChainIdentifier, @@ -151,7 +152,7 @@ pub async fn create_subgraph( schema: &str, base: Option<(DeploymentHash, BlockPtr)>, ) -> Result { - let schema = Schema::parse(schema, subgraph_id.clone()).unwrap(); + let schema = InputSchema::parse(schema, subgraph_id.clone()).unwrap(); let manifest = SubgraphManifest:: { id: subgraph_id.clone(), diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index d284a98d107..414ab08678c 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -6,6 +6,7 @@ use graph::components::store::{ use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; use graph::prelude::*; +use graph::schema::InputSchema; use graph::{ components::store::{DeploymentId, DeploymentLocator}, prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, @@ -27,8 +28,8 @@ lazy_static! { static ref SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("entity_cache").unwrap(); static ref DEPLOYMENT: DeploymentLocator = DeploymentLocator::new(DeploymentId::new(-12), SUBGRAPH_ID.clone()); - static ref SCHEMA: Arc = Arc::new( - Schema::parse( + static ref SCHEMA: Arc = Arc::new( + InputSchema::parse( " type Band @entity { id: ID! @@ -72,7 +73,7 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> Arc { SCHEMA.clone() } } @@ -387,8 +388,9 @@ lazy_static! { static ref LOAD_RELATED_ID_STRING: String = String::from("loadrelatedsubgraph"); static ref LOAD_RELATED_ID: DeploymentHash = DeploymentHash::new(LOAD_RELATED_ID_STRING.as_str()).unwrap(); - static ref LOAD_RELATED_SUBGRAPH: Schema = - Schema::parse(ACCOUNT_GQL, LOAD_RELATED_ID.clone()).expect("Failed to parse user schema"); + static ref LOAD_RELATED_SUBGRAPH: InputSchema = + InputSchema::parse(ACCOUNT_GQL, LOAD_RELATED_ID.clone()) + .expect("Failed to parse user schema"); static ref TEST_BLOCK_1_PTR: BlockPtr = ( H256::from(hex!( "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13" diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 1f4c02a97ae..8cc8787c20d 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -2,6 +2,7 @@ use graph::components::store::EntityKey; use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; use graph::prelude::SubscriptionResult; +use graph::schema::InputSchema; use graphql_parser::Pos; use std::iter::FromIterator; use std::sync::atomic::{AtomicBool, Ordering}; @@ -24,7 +25,7 @@ use graph::{ futures03::stream::StreamExt, lazy_static, o, q, r, serde_json, slog, BlockPtr, DeploymentHash, Entity, EntityOperation, FutureExtension, GraphQlRunner as _, Logger, NodeId, Query, QueryError, QueryExecutionError, QueryResult, QueryStoreManager, - QueryVariables, Schema, SubgraphManifest, SubgraphName, SubgraphStore, + QueryVariables, SubgraphManifest, SubgraphName, SubgraphStore, SubgraphVersionSwitchingMode, Subscription, SubscriptionError, }, semver::Version, @@ -147,7 +148,7 @@ async fn setup( } } -fn test_schema(id: DeploymentHash, id_type: IdType) -> Schema { +fn test_schema(id: DeploymentHash, id_type: IdType) -> InputSchema { const SCHEMA: &str = " type _Schema_ @@ -288,7 +289,7 @@ fn test_schema(id: DeploymentHash, id_type: IdType) -> Schema { } "; - Schema::parse(&SCHEMA.replace("@ID@", id_type.as_str()), id).expect("Test schema invalid") + InputSchema::parse(&SCHEMA.replace("@ID@", id_type.as_str()), id).expect("Test schema invalid") } async fn insert_test_entities( diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index c401afeaa2e..190406e63f7 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,4 +1,5 @@ use graph::blockchain::block_stream::FirehoseCursor; +use graph::schema::InputSchema; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; use std::{marker::PhantomData, str::FromStr}; @@ -76,8 +77,9 @@ const USER: &str = "User"; lazy_static! { static ref TEST_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("testsubgraph").unwrap(); - static ref TEST_SUBGRAPH_SCHEMA: Schema = - Schema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()).expect("Failed to parse user schema"); + static ref TEST_SUBGRAPH_SCHEMA: InputSchema = + InputSchema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()) + .expect("Failed to parse user schema"); static ref BLOCKS: Vec = vec![ "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index ca68338933e..7c3b5fa4f1e 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -6,9 +6,10 @@ use graph::data::store::scalar; use graph::entity; use graph::prelude::{ o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, - EntityOrder, EntityQuery, Logger, Schema, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, + EntityOrder, EntityQuery, Logger, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; use graph::prelude::{BlockNumber, MetricsRegistry}; +use graph::schema::InputSchema; use graph_store_postgres::layout_for_tests::set_account_like; use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; @@ -417,7 +418,7 @@ fn insert_pets(conn: &PgConnection, layout: &Layout) { } fn create_schema(conn: &PgConnection) -> Layout { - let schema = Schema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); + let schema = InputSchema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); let site = make_dummy_site( THINGS_SUBGRAPH_ID.clone(), NAMESPACE.clone(), diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index e28314826d1..e495daea0ac 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -5,6 +5,7 @@ use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data_source::CausalityRegion; use graph::prelude::{EntityQuery, MetricsRegistry}; +use graph::schema::InputSchema; use hex_literal::hex; use lazy_static::lazy_static; use std::borrow::Cow; @@ -14,8 +15,8 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::prelude::{ o, slog, web3::types::H256, AttributeNames, ChildMultiplicity, DeploymentHash, Entity, - EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, Schema, StopwatchMetrics, - Value, WindowAttribute, BLOCK_NUMBER_MAX, + EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, StopwatchMetrics, Value, + WindowAttribute, BLOCK_NUMBER_MAX, }; use graph::{ components::store::EntityType, @@ -118,7 +119,7 @@ fn insert_thing(conn: &PgConnection, layout: &Layout, id: &str, name: &str) { } fn create_schema(conn: &PgConnection) -> Layout { - let schema = Schema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); + let schema = InputSchema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); let query = format!("create schema {}", NAMESPACE.as_str()); conn.batch_execute(&query).unwrap(); diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index abbef4c3b3b..7fbacb9c102 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -2,6 +2,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; +use graph::schema::InputSchema; use graph_chain_ethereum::{Mapping, MappingABI}; use hex_literal::hex; use lazy_static::lazy_static; @@ -64,8 +65,9 @@ lazy_static! { static ref TEST_SUBGRAPH_ID_STRING: String = String::from("testsubgraph"); static ref TEST_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new(TEST_SUBGRAPH_ID_STRING.as_str()).unwrap(); - static ref TEST_SUBGRAPH_SCHEMA: Schema = - Schema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()).expect("Failed to parse user schema"); + static ref TEST_SUBGRAPH_SCHEMA: InputSchema = + InputSchema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()) + .expect("Failed to parse user schema"); static ref TEST_BLOCK_0_PTR: BlockPtr = ( H256::from(hex!( "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f" @@ -1269,7 +1271,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { run_test(|store, _, _| async move { let subgraph_id = DeploymentHash::new("EntityChangeTestSubgraph").unwrap(); let schema = - Schema::parse(USER_GQL, subgraph_id.clone()).expect("Failed to parse user schema"); + InputSchema::parse(USER_GQL, subgraph_id.clone()).expect("Failed to parse user schema"); let manifest = SubgraphManifest:: { id: subgraph_id.clone(), spec_version: Version::new(1, 0, 0), diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index a2ea1a5932f..e0e58d8ea72 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -10,13 +10,13 @@ use graph::{ prelude::EntityChange, prelude::EntityChangeOperation, prelude::QueryStoreManager, - prelude::Schema, prelude::SubgraphManifest, prelude::SubgraphName, prelude::SubgraphVersionSwitchingMode, prelude::UnfailOutcome, prelude::{futures03, StoreEvent}, prelude::{CheapClone, DeploymentHash, NodeId, SubgraphStore as _}, + schema::InputSchema, semver::Version, }; use graph_store_postgres::layout_for_tests::Connection as Primary; @@ -133,7 +133,7 @@ fn create_subgraph() { ) -> (DeploymentLocator, HashSet) { let name = SubgraphName::new(SUBGRAPH_NAME.to_string()).unwrap(); let id = DeploymentHash::new(id.to_string()).unwrap(); - let schema = Schema::parse(SUBGRAPH_GQL, id.clone()).unwrap(); + let schema = InputSchema::parse(SUBGRAPH_GQL, id.clone()).unwrap(); let manifest = SubgraphManifest:: { id, @@ -472,7 +472,7 @@ fn version_info() { Some("repo for versionInfoSubgraph"), vi.repository.as_deref() ); - assert_eq!(NAME, vi.schema.id.as_str()); + assert_eq!(NAME, vi.schema.id().as_str()); assert_eq!(Some(1), vi.latest_ethereum_block_number); assert_eq!(NETWORK_NAME, vi.network.as_str()); // We set the head for the network to null in the test framework diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 1c0733e0574..6f5364d2935 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -1,5 +1,6 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::subgraph::schema::DeploymentCreate; +use graph::schema::InputSchema; use lazy_static::lazy_static; use std::marker::PhantomData; use test_store::*; @@ -25,8 +26,9 @@ lazy_static! { static ref TEST_SUBGRAPH_ID_STRING: String = String::from("writableSubgraph"); static ref TEST_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new(TEST_SUBGRAPH_ID_STRING.as_str()).unwrap(); - static ref TEST_SUBGRAPH_SCHEMA: Schema = - Schema::parse(SCHEMA_GQL, TEST_SUBGRAPH_ID.clone()).expect("Failed to parse user schema"); + static ref TEST_SUBGRAPH_SCHEMA: InputSchema = + InputSchema::parse(SCHEMA_GQL, TEST_SUBGRAPH_ID.clone()) + .expect("Failed to parse user schema"); } /// Inserts test data into the store. From 4f5207c435894d72b909cbbf6d57e0c78142e987 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 1 Apr 2023 17:39:05 -0700 Subject: [PATCH 0137/2104] graph: Move some methods from Schema to ApiSchema --- graph/src/data/schema.rs | 154 +---------------------------- graph/src/schema/input_schema.rs | 163 ++++++++++++++++++++++++++++--- 2 files changed, 153 insertions(+), 164 deletions(-) diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index dc0f026f77b..e5ff0ea4907 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,8 +1,8 @@ use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; +use crate::components::store::EntityType; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; -use crate::data::store::{self, ValueType}; +use crate::data::store::ValueType; use crate::data::subgraph::DeploymentHash; use crate::prelude::{ anyhow, lazy_static, @@ -25,7 +25,6 @@ use std::str::FromStr; use std::sync::Arc; use super::graphql::ObjectOrInterface; -use super::store::scalar; pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; @@ -478,8 +477,6 @@ pub struct Schema { // Maps an interface name to the list of entities that implement it. pub types_for_interface: BTreeMap>, - - immutable_types: HashSet, } impl Schema { @@ -491,14 +488,12 @@ impl Schema { // `Schema` is always fully valid pub fn new(id: DeploymentHash, document: s::Document) -> Result { let (interfaces_for_type, types_for_interface) = Self::collect_interfaces(&document)?; - let immutable_types = Self::collect_immutable_types(&document); let mut schema = Schema { id: id.clone(), document, interfaces_for_type, types_for_interface, - immutable_types, }; schema.add_subgraph_id_directives(id); @@ -506,116 +501,6 @@ impl Schema { Ok(schema) } - /// Construct a value for the entity type's id attribute - pub fn id_value(&self, key: &EntityKey) -> Result { - let base_type = self - .document - .get_object_type_definition(key.entity_type.as_str()) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, - key.entity_id, - key.entity_type - ) - })? - .field("id") - .unwrap() - .field_type - .get_base_type(); - - match base_type { - "ID" | "String" => Ok(store::Value::String(key.entity_id.to_string())), - "Bytes" => Ok(store::Value::Bytes(scalar::Bytes::from_str( - &key.entity_id, - )?)), - s => { - return Err(anyhow!( - "Entity type {} uses illegal type {} for id column", - key.entity_type, - s - )) - } - } - } - - /// Returns the field that has the relationship with the key requested - /// This works as a reverse search for the Field related to the query - /// - /// example: - /// - /// type Account @entity { - /// wallets: [Wallet!]! @derivedFrom(field: "account") - /// } - /// type Wallet { - /// account: Account! - /// balance: Int! - /// } - /// - /// When asked to load the related entities from "Account" in the field "wallets" - /// This function will return the type "Wallet" with the field "account" - pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &Field), Error> { - let field = self - .document - .get_object_type_definition(key.entity_type.as_str()) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, - key.entity_id, - key.entity_type, - ) - })? - .field(&key.entity_field) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown field `{}`", - key.entity_type, - key.entity_id, - key.entity_field, - ) - })?; - if field.is_derived() { - let derived_from = field.find_directive("derivedFrom").unwrap(); - let base_type = field.field_type.get_base_type(); - let field_name = derived_from.argument("field").unwrap(); - - let field = self - .document - .get_object_type_definition(base_type) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, - key.entity_id, - key.entity_type, - ) - })? - .field(field_name.as_str().unwrap()) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown field `{}`", - key.entity_type, - key.entity_id, - key.entity_field, - ) - })?; - - Ok((base_type, field)) - } else { - Err(anyhow!( - "Entity {}[{}]: field `{}` is not derived", - key.entity_type, - key.entity_id, - key.entity_field, - )) - } - } - - pub fn is_immutable(&self, entity_type: &EntityType) -> bool { - self.immutable_types.contains(entity_type) - } - fn collect_interfaces( document: &s::Document, ) -> Result< @@ -669,16 +554,6 @@ impl Schema { Ok((interfaces_for_type, types_for_interface)) } - fn collect_immutable_types(document: &s::Document) -> HashSet { - HashSet::from_iter( - document - .get_object_type_definitions() - .into_iter() - .filter(|obj_type| obj_type.is_immutable()) - .map(Into::into), - ) - } - pub fn parse(raw: &str, id: DeploymentHash) -> Result { let document = graphql_parser::parse_schema(raw)?.into_static(); @@ -1281,31 +1156,6 @@ impl Schema { .into_iter() .find(|object_type| object_type.name.eq(SCHEMA_TYPE_NAME)) } - - pub fn entity_fulltext_definitions( - entity: &str, - document: &Document, - ) -> Result, anyhow::Error> { - Ok(document - .get_fulltext_directives()? - .into_iter() - .filter(|directive| match directive.argument("include") { - Some(Value::List(includes)) if !includes.is_empty() => { - includes.iter().any(|include| match include { - Value::Object(include) => match include.get("entity") { - Some(Value::String(fulltext_entity)) if fulltext_entity == entity => { - true - } - _ => false, - }, - _ => false, - }) - } - _ => false, - }) - .map(FulltextDefinition::from) - .collect()) - } } #[test] diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 5ea0314c5d0..02ca15cca63 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -1,29 +1,47 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; +use std::str::FromStr; -use anyhow::Error; +use anyhow::{anyhow, Error}; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; -use crate::data::graphql::DocumentExt; +use crate::data::graphql::ext::DirectiveFinder; +use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; use crate::data::schema::{FulltextDefinition, Schema, SchemaValidationError}; -use crate::data::store; +use crate::data::store::{self, scalar}; +use crate::prelude::q::Value; use crate::prelude::{s, ApiSchema, DeploymentHash}; use crate::schema::api_schema; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { schema: Schema, + immutable_types: HashSet, } impl InputSchema { + fn create(schema: Schema) -> Self { + let immutable_types = HashSet::from_iter( + schema + .document + .get_object_type_definitions() + .into_iter() + .filter(|obj_type| obj_type.is_immutable()) + .map(Into::into), + ); + Self { + schema, + immutable_types, + } + } pub fn new(id: DeploymentHash, document: s::Document) -> Result { let schema = Schema::new(id, document)?; - Ok(Self { schema }) + Ok(Self::create(schema)) } pub fn parse(raw: &str, id: DeploymentHash) -> Result { let schema = Schema::parse(raw, id)?; - Ok(Self { schema }) + Ok(Self::create(schema)) } pub fn api_schema(&self) -> Result { @@ -33,16 +51,117 @@ impl InputSchema { ApiSchema::from_api_schema(schema) } + /// Returns the field that has the relationship with the key requested + /// This works as a reverse search for the Field related to the query + /// + /// example: + /// + /// type Account @entity { + /// wallets: [Wallet!]! @derivedFrom(field: "account") + /// } + /// type Wallet { + /// account: Account! + /// balance: Int! + /// } + /// + /// When asked to load the related entities from "Account" in the field "wallets" + /// This function will return the type "Wallet" with the field "account" pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &s::Field), Error> { - self.schema.get_field_related(key) - } - + let field = self + .schema + .document + .get_object_type_definition(key.entity_type.as_str()) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, + key.entity_id, + key.entity_type, + ) + })? + .field(&key.entity_field) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown field `{}`", + key.entity_type, + key.entity_id, + key.entity_field, + ) + })?; + if field.is_derived() { + let derived_from = field.find_directive("derivedFrom").unwrap(); + let base_type = field.field_type.get_base_type(); + let field_name = derived_from.argument("field").unwrap(); + + let field = self + .schema + .document + .get_object_type_definition(base_type) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, + key.entity_id, + key.entity_type, + ) + })? + .field(field_name.as_str().unwrap()) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown field `{}`", + key.entity_type, + key.entity_id, + key.entity_field, + ) + })?; + + Ok((base_type, field)) + } else { + Err(anyhow!( + "Entity {}[{}]: field `{}` is not derived", + key.entity_type, + key.entity_id, + key.entity_field, + )) + } + } + + /// Construct a value for the entity type's id attribute pub fn id_value(&self, key: &EntityKey) -> Result { - self.schema.id_value(key) + let base_type = self + .schema + .document + .get_object_type_definition(key.entity_type.as_str()) + .ok_or_else(|| { + anyhow!( + "Entity {}[{}]: unknown entity type `{}`", + key.entity_type, + key.entity_id, + key.entity_type + ) + })? + .field("id") + .unwrap() + .field_type + .get_base_type(); + + match base_type { + "ID" | "String" => Ok(store::Value::String(key.entity_id.to_string())), + "Bytes" => Ok(store::Value::Bytes(scalar::Bytes::from_str( + &key.entity_id, + )?)), + s => { + return Err(anyhow!( + "Entity type {} uses illegal type {} for id column", + key.entity_type, + s + )) + } + } } pub fn is_immutable(&self, entity_type: &EntityType) -> bool { - self.schema.is_immutable(entity_type) + self.immutable_types.contains(entity_type) } pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { @@ -83,7 +202,27 @@ impl InputSchema { &self, entity: &str, ) -> Result, anyhow::Error> { - Schema::entity_fulltext_definitions(entity, &self.schema.document) + Ok(self + .schema + .document + .get_fulltext_directives()? + .into_iter() + .filter(|directive| match directive.argument("include") { + Some(Value::List(includes)) if !includes.is_empty() => { + includes.iter().any(|include| match include { + Value::Object(include) => match include.get("entity") { + Some(Value::String(fulltext_entity)) if fulltext_entity == entity => { + true + } + _ => false, + }, + _ => false, + }) + } + _ => false, + }) + .map(FulltextDefinition::from) + .collect()) } pub fn id(&self) -> &DeploymentHash { From 3c3357757a4768681d0caf5f0f212fc020798e05 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 7 Apr 2023 18:59:04 -0700 Subject: [PATCH 0138/2104] graph, store: Move fulltext types to graph::schema --- graph/src/data/schema.rs | 151 +--------------------- graph/src/schema/fulltext.rs | 155 +++++++++++++++++++++++ graph/src/schema/input_schema.rs | 4 +- graph/src/schema/mod.rs | 2 + store/postgres/src/relational.rs | 4 +- store/postgres/src/relational_queries.rs | 3 +- 6 files changed, 165 insertions(+), 154 deletions(-) create mode 100644 graph/src/schema/fulltext.rs diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index e5ff0ea4907..5137265a23b 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -9,6 +9,7 @@ use crate::prelude::{ q::Value, s::{self, Definition, InterfaceType, ObjectType, TypeDefinition, *}, }; +use crate::schema::{FulltextAlgorithm, FulltextLanguage}; use anyhow::{Context, Error}; use graphql_parser::{self, Pos}; @@ -102,156 +103,6 @@ pub enum SchemaValidationError { FulltextIncludedFieldInvalid(String), } -#[derive(Clone, Debug, PartialEq)] -pub enum FulltextLanguage { - Simple, - Danish, - Dutch, - English, - Finnish, - French, - German, - Hungarian, - Italian, - Norwegian, - Portugese, - Romanian, - Russian, - Spanish, - Swedish, - Turkish, -} - -impl TryFrom<&str> for FulltextLanguage { - type Error = String; - fn try_from(language: &str) -> Result { - match language { - "simple" => Ok(FulltextLanguage::Simple), - "da" => Ok(FulltextLanguage::Danish), - "nl" => Ok(FulltextLanguage::Dutch), - "en" => Ok(FulltextLanguage::English), - "fi" => Ok(FulltextLanguage::Finnish), - "fr" => Ok(FulltextLanguage::French), - "de" => Ok(FulltextLanguage::German), - "hu" => Ok(FulltextLanguage::Hungarian), - "it" => Ok(FulltextLanguage::Italian), - "no" => Ok(FulltextLanguage::Norwegian), - "pt" => Ok(FulltextLanguage::Portugese), - "ro" => Ok(FulltextLanguage::Romanian), - "ru" => Ok(FulltextLanguage::Russian), - "es" => Ok(FulltextLanguage::Spanish), - "sv" => Ok(FulltextLanguage::Swedish), - "tr" => Ok(FulltextLanguage::Turkish), - invalid => Err(format!( - "Provided language for fulltext search is invalid: {}", - invalid - )), - } - } -} - -impl FulltextLanguage { - /// Return the language as a valid SQL string. The string is safe to - /// directly use verbatim in a query, i.e., doesn't require being passed - /// through a bind variable - pub fn as_sql(&self) -> &'static str { - match self { - Self::Simple => "'simple'", - Self::Danish => "'danish'", - Self::Dutch => "'dutch'", - Self::English => "'english'", - Self::Finnish => "'finnish'", - Self::French => "'french'", - Self::German => "'german'", - Self::Hungarian => "'hungarian'", - Self::Italian => "'italian'", - Self::Norwegian => "'norwegian'", - Self::Portugese => "'portugese'", - Self::Romanian => "'romanian'", - Self::Russian => "'russian'", - Self::Spanish => "'spanish'", - Self::Swedish => "'swedish'", - Self::Turkish => "'turkish'", - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub enum FulltextAlgorithm { - Rank, - ProximityRank, -} - -impl TryFrom<&str> for FulltextAlgorithm { - type Error = String; - fn try_from(algorithm: &str) -> Result { - match algorithm { - "rank" => Ok(FulltextAlgorithm::Rank), - "proximityRank" => Ok(FulltextAlgorithm::ProximityRank), - invalid => Err(format!( - "The provided fulltext search algorithm {} is invalid. It must be one of: rank, proximityRank", - invalid, - )), - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct FulltextConfig { - pub language: FulltextLanguage, - pub algorithm: FulltextAlgorithm, -} - -pub struct FulltextDefinition { - pub config: FulltextConfig, - pub included_fields: HashSet, - pub name: String, -} - -impl From<&s::Directive> for FulltextDefinition { - // Assumes the input is a Fulltext Directive that has already been validated because it makes - // liberal use of unwrap() where specific types are expected - fn from(directive: &Directive) -> Self { - let name = directive.argument("name").unwrap().as_str().unwrap(); - - let algorithm = FulltextAlgorithm::try_from( - directive.argument("algorithm").unwrap().as_enum().unwrap(), - ) - .unwrap(); - - let language = - FulltextLanguage::try_from(directive.argument("language").unwrap().as_enum().unwrap()) - .unwrap(); - - let included_entity_list = directive.argument("include").unwrap().as_list().unwrap(); - // Currently fulltext query fields are limited to 1 entity, so we just take the first (and only) included Entity - let included_entity = included_entity_list.first().unwrap().as_object().unwrap(); - let included_field_values = included_entity.get("fields").unwrap().as_list().unwrap(); - let included_fields: HashSet = included_field_values - .iter() - .map(|field| { - field - .as_object() - .unwrap() - .get("name") - .unwrap() - .as_str() - .unwrap() - .into() - }) - .collect(); - - FulltextDefinition { - config: FulltextConfig { - language, - algorithm, - }, - included_fields, - name: name.into(), - } - } -} - #[derive(Debug)] pub struct ApiSchema { schema: Schema, diff --git a/graph/src/schema/fulltext.rs b/graph/src/schema/fulltext.rs new file mode 100644 index 00000000000..074e843dce9 --- /dev/null +++ b/graph/src/schema/fulltext.rs @@ -0,0 +1,155 @@ +use std::collections::HashSet; +use std::convert::TryFrom; + +use crate::data::graphql::{DirectiveExt, ValueExt}; +use crate::prelude::s; + +#[derive(Clone, Debug, PartialEq)] +pub enum FulltextLanguage { + Simple, + Danish, + Dutch, + English, + Finnish, + French, + German, + Hungarian, + Italian, + Norwegian, + Portugese, + Romanian, + Russian, + Spanish, + Swedish, + Turkish, +} + +impl TryFrom<&str> for FulltextLanguage { + type Error = String; + fn try_from(language: &str) -> Result { + match language { + "simple" => Ok(FulltextLanguage::Simple), + "da" => Ok(FulltextLanguage::Danish), + "nl" => Ok(FulltextLanguage::Dutch), + "en" => Ok(FulltextLanguage::English), + "fi" => Ok(FulltextLanguage::Finnish), + "fr" => Ok(FulltextLanguage::French), + "de" => Ok(FulltextLanguage::German), + "hu" => Ok(FulltextLanguage::Hungarian), + "it" => Ok(FulltextLanguage::Italian), + "no" => Ok(FulltextLanguage::Norwegian), + "pt" => Ok(FulltextLanguage::Portugese), + "ro" => Ok(FulltextLanguage::Romanian), + "ru" => Ok(FulltextLanguage::Russian), + "es" => Ok(FulltextLanguage::Spanish), + "sv" => Ok(FulltextLanguage::Swedish), + "tr" => Ok(FulltextLanguage::Turkish), + invalid => Err(format!( + "Provided language for fulltext search is invalid: {}", + invalid + )), + } + } +} + +impl FulltextLanguage { + /// Return the language as a valid SQL string. The string is safe to + /// directly use verbatim in a query, i.e., doesn't require being passed + /// through a bind variable + pub fn as_sql(&self) -> &'static str { + match self { + Self::Simple => "'simple'", + Self::Danish => "'danish'", + Self::Dutch => "'dutch'", + Self::English => "'english'", + Self::Finnish => "'finnish'", + Self::French => "'french'", + Self::German => "'german'", + Self::Hungarian => "'hungarian'", + Self::Italian => "'italian'", + Self::Norwegian => "'norwegian'", + Self::Portugese => "'portugese'", + Self::Romanian => "'romanian'", + Self::Russian => "'russian'", + Self::Spanish => "'spanish'", + Self::Swedish => "'swedish'", + Self::Turkish => "'turkish'", + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum FulltextAlgorithm { + Rank, + ProximityRank, +} + +impl TryFrom<&str> for FulltextAlgorithm { + type Error = String; + fn try_from(algorithm: &str) -> Result { + match algorithm { + "rank" => Ok(FulltextAlgorithm::Rank), + "proximityRank" => Ok(FulltextAlgorithm::ProximityRank), + invalid => Err(format!( + "The provided fulltext search algorithm {} is invalid. It must be one of: rank, proximityRank", + invalid, + )), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct FulltextConfig { + pub language: FulltextLanguage, + pub algorithm: FulltextAlgorithm, +} + +pub struct FulltextDefinition { + pub config: FulltextConfig, + pub included_fields: HashSet, + pub name: String, +} + +impl From<&s::Directive> for FulltextDefinition { + // Assumes the input is a Fulltext Directive that has already been validated because it makes + // liberal use of unwrap() where specific types are expected + fn from(directive: &s::Directive) -> Self { + let name = directive.argument("name").unwrap().as_str().unwrap(); + + let algorithm = FulltextAlgorithm::try_from( + directive.argument("algorithm").unwrap().as_enum().unwrap(), + ) + .unwrap(); + + let language = + FulltextLanguage::try_from(directive.argument("language").unwrap().as_enum().unwrap()) + .unwrap(); + + let included_entity_list = directive.argument("include").unwrap().as_list().unwrap(); + // Currently fulltext query fields are limited to 1 entity, so we just take the first (and only) included Entity + let included_entity = included_entity_list.first().unwrap().as_object().unwrap(); + let included_field_values = included_entity.get("fields").unwrap().as_list().unwrap(); + let included_fields: HashSet = included_field_values + .iter() + .map(|field| { + field + .as_object() + .unwrap() + .get("name") + .unwrap() + .as_str() + .unwrap() + .into() + }) + .collect(); + + FulltextDefinition { + config: FulltextConfig { + language, + algorithm, + }, + included_fields, + name: name.into(), + } + } +} diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 02ca15cca63..2693531a4d5 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -6,12 +6,14 @@ use anyhow::{anyhow, Error}; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::schema::{FulltextDefinition, Schema, SchemaValidationError}; +use crate::data::schema::{Schema, SchemaValidationError}; use crate::data::store::{self, scalar}; use crate::prelude::q::Value; use crate::prelude::{s, ApiSchema, DeploymentHash}; use crate::schema::api_schema; +use super::fulltext::FulltextDefinition; + #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { schema: Schema, diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 8c03499107c..127232e1218 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -4,8 +4,10 @@ pub mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; +mod fulltext; mod input_schema; pub use api::{api_schema, APISchemaError}; +pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index a228a30d9d4..ccb5aa4b2de 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -30,7 +30,7 @@ use graph::data::query::Trace; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; -use graph::schema::InputSchema; +use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema}; use graph::slog::warn; use inflector::Inflector; use lazy_static::lazy_static; @@ -52,7 +52,7 @@ use crate::{ }; use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; -use graph::data::schema::{FulltextConfig, FulltextDefinition, SCHEMA_TYPE_NAME}; +use graph::data::schema::SCHEMA_TYPE_NAME; use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::{POI_OBJECT, POI_TABLE}; use graph::prelude::{ diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index d8a09433e76..13cde73c039 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -20,9 +20,10 @@ use graph::prelude::{ EntityFilter, EntityLink, EntityOrder, EntityOrderByChild, EntityOrderByChildInfo, EntityRange, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, ENV_VARS, }; +use graph::schema::FulltextAlgorithm; use graph::{ components::store::{AttributeNames, EntityType}, - data::{schema::FulltextAlgorithm, store::scalar}, + data::store::scalar, }; use itertools::Itertools; use std::borrow::Cow; From d5c932cecc7b482ce3097a0b6f145a025a2251d0 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 7 Apr 2023 19:08:57 -0700 Subject: [PATCH 0139/2104] all: Move ApiSchema into graph::schema --- graph/src/components/store/traits.rs | 2 +- graph/src/data/schema.rs | 224 +----------------- graph/src/lib.rs | 2 +- graph/src/schema/api.rs | 217 +++++++++++++++++ graph/src/schema/input_schema.rs | 3 +- .../{data => schema}/introspection.graphql | 0 graph/src/schema/mod.rs | 3 +- graphql/examples/schema.rs | 2 +- graphql/src/execution/ast.rs | 4 +- graphql/src/execution/query.rs | 4 +- graphql/src/execution/resolver.rs | 3 +- graphql/src/store/prefetch.rs | 6 +- graphql/src/store/query.rs | 6 +- graphql/src/store/resolver.rs | 4 +- graphql/src/subscription/mod.rs | 3 +- server/index-node/src/schema.rs | 2 +- store/postgres/src/deployment_store.rs | 9 +- store/postgres/src/query_store.rs | 1 + store/postgres/src/subgraph_store.rs | 6 +- .../test-store/tests/graphql/introspection.rs | 6 +- 20 files changed, 256 insertions(+), 251 deletions(-) rename graph/src/{data => schema}/introspection.graphql (100%) diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index b3ae512acab..6d36c582353 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -9,7 +9,7 @@ use crate::data::query::Trace; use crate::data::subgraph::status; use crate::data::value::Word; use crate::data::{query::QueryTarget, subgraph::schema::*}; -use crate::schema::InputSchema; +use crate::schema::{ApiSchema, InputSchema}; pub trait SubscriptionManager: Send + Sync + 'static { /// Subscribe to changes for specific subgraphs and entities. diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 5137265a23b..a199ea1ca2b 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -1,31 +1,27 @@ -use crate::cheap_clone::CheapClone; use crate::components::store::EntityType; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::ValueType; use crate::data::subgraph::DeploymentHash; use crate::prelude::{ - anyhow, lazy_static, + anyhow, q::Value, s::{self, Definition, InterfaceType, ObjectType, TypeDefinition, *}, }; use crate::schema::{FulltextAlgorithm, FulltextLanguage}; -use anyhow::{Context, Error}; +use anyhow::Error; use graphql_parser::{self, Pos}; use inflector::Inflector; use itertools::Itertools; use serde::{Deserialize, Serialize}; use thiserror::Error; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashSet}; use std::convert::TryFrom; use std::fmt; use std::iter::FromIterator; use std::str::FromStr; -use std::sync::Arc; - -use super::graphql::ObjectOrInterface; pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; @@ -103,220 +99,6 @@ pub enum SchemaValidationError { FulltextIncludedFieldInvalid(String), } -#[derive(Debug)] -pub struct ApiSchema { - schema: Schema, - - // Root types for the api schema. - pub query_type: Arc, - pub subscription_type: Option>, - object_types: HashMap>, -} - -impl ApiSchema { - /// `api_schema` will typically come from `fn api_schema` in the graphql - /// crate. - /// - /// In addition, the API schema has an introspection schema mixed into - /// `api_schema`. In particular, the `Query` type has fields called - /// `__schema` and `__type` - pub fn from_api_schema(mut api_schema: Schema) -> Result { - add_introspection_schema(&mut api_schema.document); - - let query_type = api_schema - .document - .get_root_query_type() - .context("no root `Query` in the schema")? - .clone(); - let subscription_type = api_schema - .document - .get_root_subscription_type() - .cloned() - .map(Arc::new); - - let object_types = HashMap::from_iter( - api_schema - .document - .get_object_type_definitions() - .into_iter() - .map(|obj_type| (obj_type.name.clone(), Arc::new(obj_type.clone()))), - ); - - Ok(Self { - schema: api_schema, - query_type: Arc::new(query_type), - subscription_type, - object_types, - }) - } - - pub fn document(&self) -> &s::Document { - &self.schema.document - } - - pub fn id(&self) -> &DeploymentHash { - &self.schema.id - } - - pub fn schema(&self) -> &Schema { - &self.schema - } - - pub fn types_for_interface(&self) -> &BTreeMap> { - &self.schema.types_for_interface - } - - /// Returns `None` if the type implements no interfaces. - pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { - self.schema.interfaces_for_type(type_name) - } - - /// Return an `Arc` around the `ObjectType` from our internal cache - /// - /// # Panics - /// If `obj_type` is not part of this schema, this function panics - pub fn object_type(&self, obj_type: &ObjectType) -> Arc { - self.object_types - .get(&obj_type.name) - .expect("ApiSchema.object_type is only used with existing types") - .cheap_clone() - } - - pub fn get_named_type(&self, name: &str) -> Option<&TypeDefinition> { - self.schema.document.get_named_type(name) - } - - /// Returns true if the given type is an input type. - /// - /// Uses the algorithm outlined on - /// https://facebook.github.io/graphql/draft/#IsInputType(). - pub fn is_input_type(&self, t: &s::Type) -> bool { - match t { - s::Type::NamedType(name) => { - let named_type = self.get_named_type(name); - named_type.map_or(false, |type_def| match type_def { - s::TypeDefinition::Scalar(_) - | s::TypeDefinition::Enum(_) - | s::TypeDefinition::InputObject(_) => true, - _ => false, - }) - } - s::Type::ListType(inner) => self.is_input_type(inner), - s::Type::NonNullType(inner) => self.is_input_type(inner), - } - } - - pub fn get_root_query_type_def(&self) -> Option<&s::TypeDefinition> { - self.schema - .document - .definitions - .iter() - .find_map(|d| match d { - s::Definition::TypeDefinition(def @ s::TypeDefinition::Object(_)) => match def { - s::TypeDefinition::Object(t) if t.name == "Query" => Some(def), - _ => None, - }, - _ => None, - }) - } - - pub fn object_or_interface(&self, name: &str) -> Option> { - if name.starts_with("__") { - INTROSPECTION_SCHEMA.object_or_interface(name) - } else { - self.schema.document.object_or_interface(name) - } - } - - /// Returns the type definition that a field type corresponds to. - pub fn get_type_definition_from_field<'a>( - &'a self, - field: &s::Field, - ) -> Option<&'a s::TypeDefinition> { - self.get_type_definition_from_type(&field.field_type) - } - - /// Returns the type definition for a type. - pub fn get_type_definition_from_type<'a>( - &'a self, - t: &s::Type, - ) -> Option<&'a s::TypeDefinition> { - match t { - s::Type::NamedType(name) => self.get_named_type(name), - s::Type::ListType(inner) => self.get_type_definition_from_type(inner), - s::Type::NonNullType(inner) => self.get_type_definition_from_type(inner), - } - } - - #[cfg(debug_assertions)] - pub fn definitions(&self) -> impl Iterator> { - self.schema.document.definitions.iter() - } -} - -lazy_static! { - static ref INTROSPECTION_SCHEMA: Document = { - let schema = include_str!("introspection.graphql"); - parse_schema(schema).expect("the schema `introspection.graphql` is invalid") - }; -} - -fn add_introspection_schema(schema: &mut Document) { - fn introspection_fields() -> Vec { - // Generate fields for the root query fields in an introspection schema, - // the equivalent of the fields of the `Query` type: - // - // type Query { - // __schema: __Schema! - // __type(name: String!): __Type - // } - - let type_args = vec![InputValue { - position: Pos::default(), - description: None, - name: "name".to_string(), - value_type: Type::NonNullType(Box::new(Type::NamedType("String".to_string()))), - default_value: None, - directives: vec![], - }]; - - vec![ - Field { - position: Pos::default(), - description: None, - name: "__schema".to_string(), - arguments: vec![], - field_type: Type::NonNullType(Box::new(Type::NamedType("__Schema".to_string()))), - directives: vec![], - }, - Field { - position: Pos::default(), - description: None, - name: "__type".to_string(), - arguments: type_args, - field_type: Type::NamedType("__Type".to_string()), - directives: vec![], - }, - ] - } - - schema - .definitions - .extend(INTROSPECTION_SCHEMA.definitions.iter().cloned()); - - let query_type = schema - .definitions - .iter_mut() - .filter_map(|d| match d { - Definition::TypeDefinition(TypeDefinition::Object(t)) if t.name == "Query" => Some(t), - _ => None, - }) - .peekable() - .next() - .expect("no root `Query` in the schema"); - query_type.fields.append(&mut introspection_fields()); -} - /// A validated and preprocessed GraphQL schema for a subgraph. #[derive(Clone, Debug, PartialEq)] pub struct Schema { diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 23992e0f96a..7e1c4f36832 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -148,7 +148,7 @@ pub mod prelude { pub use crate::data::query::{ Query, QueryError, QueryExecutionError, QueryResult, QueryTarget, QueryVariables, }; - pub use crate::data::schema::{ApiSchema, Schema}; + pub use crate::data::schema::Schema; pub use crate::data::store::ethereum::*; pub use crate::data::store::scalar::{BigDecimal, BigInt, BigIntSign}; pub use crate::data::store::{ diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 9e549a53efe..7d86c1b89ce 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -1,9 +1,12 @@ +use std::collections::{BTreeMap, HashMap}; use std::str::FromStr; use graphql_parser::{schema::TypeDefinition, Pos}; use inflector::Inflector; use lazy_static::lazy_static; +use crate::components::store::EntityType; +use crate::data::graphql::ObjectOrInterface; use crate::schema::ast; use crate::data::{ @@ -71,6 +74,220 @@ impl TryFrom<&r::Value> for ErrorPolicy { } } +#[derive(Debug)] +pub struct ApiSchema { + schema: Schema, + + // Root types for the api schema. + pub query_type: Arc, + pub subscription_type: Option>, + object_types: HashMap>, +} + +impl ApiSchema { + /// `api_schema` will typically come from `fn api_schema` in the graphql + /// crate. + /// + /// In addition, the API schema has an introspection schema mixed into + /// `api_schema`. In particular, the `Query` type has fields called + /// `__schema` and `__type` + pub fn from_api_schema(mut api_schema: Schema) -> Result { + add_introspection_schema(&mut api_schema.document); + + let query_type = api_schema + .document + .get_root_query_type() + .context("no root `Query` in the schema")? + .clone(); + let subscription_type = api_schema + .document + .get_root_subscription_type() + .cloned() + .map(Arc::new); + + let object_types = HashMap::from_iter( + api_schema + .document + .get_object_type_definitions() + .into_iter() + .map(|obj_type| (obj_type.name.clone(), Arc::new(obj_type.clone()))), + ); + + Ok(Self { + schema: api_schema, + query_type: Arc::new(query_type), + subscription_type, + object_types, + }) + } + + pub fn document(&self) -> &s::Document { + &self.schema.document + } + + pub fn id(&self) -> &DeploymentHash { + &self.schema.id + } + + pub fn schema(&self) -> &Schema { + &self.schema + } + + pub fn types_for_interface(&self) -> &BTreeMap> { + &self.schema.types_for_interface + } + + /// Returns `None` if the type implements no interfaces. + pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { + self.schema.interfaces_for_type(type_name) + } + + /// Return an `Arc` around the `ObjectType` from our internal cache + /// + /// # Panics + /// If `obj_type` is not part of this schema, this function panics + pub fn object_type(&self, obj_type: &ObjectType) -> Arc { + self.object_types + .get(&obj_type.name) + .expect("ApiSchema.object_type is only used with existing types") + .cheap_clone() + } + + pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { + self.schema.document.get_named_type(name) + } + + /// Returns true if the given type is an input type. + /// + /// Uses the algorithm outlined on + /// https://facebook.github.io/graphql/draft/#IsInputType(). + pub fn is_input_type(&self, t: &s::Type) -> bool { + match t { + s::Type::NamedType(name) => { + let named_type = self.get_named_type(name); + named_type.map_or(false, |type_def| match type_def { + s::TypeDefinition::Scalar(_) + | s::TypeDefinition::Enum(_) + | s::TypeDefinition::InputObject(_) => true, + _ => false, + }) + } + s::Type::ListType(inner) => self.is_input_type(inner), + s::Type::NonNullType(inner) => self.is_input_type(inner), + } + } + + pub fn get_root_query_type_def(&self) -> Option<&s::TypeDefinition> { + self.schema + .document + .definitions + .iter() + .find_map(|d| match d { + s::Definition::TypeDefinition(def @ s::TypeDefinition::Object(_)) => match def { + s::TypeDefinition::Object(t) if t.name == "Query" => Some(def), + _ => None, + }, + _ => None, + }) + } + + pub fn object_or_interface(&self, name: &str) -> Option> { + if name.starts_with("__") { + INTROSPECTION_SCHEMA.object_or_interface(name) + } else { + self.schema.document.object_or_interface(name) + } + } + + /// Returns the type definition that a field type corresponds to. + pub fn get_type_definition_from_field<'a>( + &'a self, + field: &s::Field, + ) -> Option<&'a s::TypeDefinition> { + self.get_type_definition_from_type(&field.field_type) + } + + /// Returns the type definition for a type. + pub fn get_type_definition_from_type<'a>( + &'a self, + t: &s::Type, + ) -> Option<&'a s::TypeDefinition> { + match t { + s::Type::NamedType(name) => self.get_named_type(name), + s::Type::ListType(inner) => self.get_type_definition_from_type(inner), + s::Type::NonNullType(inner) => self.get_type_definition_from_type(inner), + } + } + + #[cfg(debug_assertions)] + pub fn definitions(&self) -> impl Iterator> { + self.schema.document.definitions.iter() + } +} + +lazy_static! { + static ref INTROSPECTION_SCHEMA: Document = { + let schema = include_str!("introspection.graphql"); + parse_schema(schema).expect("the schema `introspection.graphql` is invalid") + }; +} + +fn add_introspection_schema(schema: &mut Document) { + fn introspection_fields() -> Vec { + // Generate fields for the root query fields in an introspection schema, + // the equivalent of the fields of the `Query` type: + // + // type Query { + // __schema: __Schema! + // __type(name: String!): __Type + // } + + let type_args = vec![InputValue { + position: Pos::default(), + description: None, + name: "name".to_string(), + value_type: Type::NonNullType(Box::new(Type::NamedType("String".to_string()))), + default_value: None, + directives: vec![], + }]; + + vec![ + Field { + position: Pos::default(), + description: None, + name: "__schema".to_string(), + arguments: vec![], + field_type: Type::NonNullType(Box::new(Type::NamedType("__Schema".to_string()))), + directives: vec![], + }, + Field { + position: Pos::default(), + description: None, + name: "__type".to_string(), + arguments: type_args, + field_type: Type::NamedType("__Type".to_string()), + directives: vec![], + }, + ] + } + + schema + .definitions + .extend(INTROSPECTION_SCHEMA.definitions.iter().cloned()); + + let query_type = schema + .definitions + .iter_mut() + .filter_map(|d| match d { + Definition::TypeDefinition(TypeDefinition::Object(t)) if t.name == "Query" => Some(t), + _ => None, + }) + .peekable() + .next() + .expect("no root `Query` in the schema"); + query_type.fields.append(&mut introspection_fields()); +} + /// Derives a full-fledged GraphQL API schema from an input schema. /// /// The input schema should only have type/enum/interface/union definitions diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 2693531a4d5..b89d28e678e 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -9,10 +9,11 @@ use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, Va use crate::data::schema::{Schema, SchemaValidationError}; use crate::data::store::{self, scalar}; use crate::prelude::q::Value; -use crate::prelude::{s, ApiSchema, DeploymentHash}; +use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; use super::fulltext::FulltextDefinition; +use super::ApiSchema; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { diff --git a/graph/src/data/introspection.graphql b/graph/src/schema/introspection.graphql similarity index 100% rename from graph/src/data/introspection.graphql rename to graph/src/schema/introspection.graphql diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 127232e1218..13e01b4114f 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -1,5 +1,5 @@ /// Generate full-fledged API schemas from existing GraphQL schemas. -pub mod api; +mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; @@ -9,5 +9,6 @@ mod input_schema; pub use api::{api_schema, APISchemaError}; +pub use api::{ApiSchema, ErrorPolicy}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; diff --git a/graphql/examples/schema.rs b/graphql/examples/schema.rs index 2d4ee14e346..c23ae9de502 100644 --- a/graphql/examples/schema.rs +++ b/graphql/examples/schema.rs @@ -3,7 +3,7 @@ use std::env; use std::fs; use std::process::exit; -use graph::schema::api::api_schema; +use graph::schema::api_schema; pub fn usage(msg: &str) -> ! { println!("{}", msg); diff --git a/graphql/src/execution/ast.rs b/graphql/src/execution/ast.rs index 464836c1566..20b08ec0e24 100644 --- a/graphql/src/execution/ast.rs +++ b/graphql/src/execution/ast.rs @@ -3,8 +3,8 @@ use std::collections::HashSet; use graph::{ components::store::EntityType, data::graphql::ObjectOrInterface, - prelude::{anyhow, q, r, s, ApiSchema, QueryExecutionError, ValueMap}, - schema::ast::ObjectType, + prelude::{anyhow, q, r, s, QueryExecutionError, ValueMap}, + schema::{ast::ObjectType, ApiSchema}, }; use graphql_parser::Pos; diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index 6574c86f673..a23a870ced7 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -1,5 +1,6 @@ use graph::data::graphql::DocumentExt as _; use graph::data::value::Object; +use graph::schema::ApiSchema; use graphql_parser::Pos; use graphql_tools::validation::rules::*; use graphql_tools::validation::validate::{validate, ValidationPlan}; @@ -14,13 +15,12 @@ use std::{collections::hash_map::DefaultHasher, convert::TryFrom}; use graph::data::graphql::{ext::TypeExt, ObjectOrInterface}; use graph::data::query::QueryExecutionError; use graph::data::query::{Query as GraphDataQuery, QueryVariables}; -use graph::data::schema::ApiSchema; use graph::prelude::{ info, o, q, r, s, warn, BlockNumber, CheapClone, DeploymentHash, GraphQLMetrics, Logger, TryFromValue, ENV_VARS, }; -use graph::schema::api::ErrorPolicy; use graph::schema::ast::{self as sast}; +use graph::schema::ErrorPolicy; use crate::execution::ast as a; use crate::execution::get_field; diff --git a/graphql/src/execution/resolver.rs b/graphql/src/execution/resolver.rs index f601b6f8f36..a95267f0850 100644 --- a/graphql/src/execution/resolver.rs +++ b/graphql/src/execution/resolver.rs @@ -1,6 +1,7 @@ use graph::components::store::UnitStream; use graph::data::query::Trace; -use graph::prelude::{async_trait, s, tokio, ApiSchema, Error, QueryExecutionError}; +use graph::prelude::{async_trait, s, tokio, Error, QueryExecutionError}; +use graph::schema::ApiSchema; use graph::{ data::graphql::ObjectOrInterface, prelude::{r, QueryResult}, diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 55e4ef028ae..fcacaba8852 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -12,13 +12,13 @@ use std::collections::BTreeMap; use std::rc::Rc; use std::time::Instant; -use graph::schema::ast as sast; +use graph::schema::{ast as sast, ApiSchema}; use graph::{components::store::EntityType, data::graphql::*}; use graph::{ data::graphql::ext::DirectiveFinder, prelude::{ - s, ApiSchema, AttributeNames, ChildMultiplicity, EntityCollection, EntityFilter, - EntityLink, EntityOrder, EntityWindow, ParentLink, QueryExecutionError, StoreError, + s, AttributeNames, ChildMultiplicity, EntityCollection, EntityFilter, EntityLink, + EntityOrder, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value as StoreValue, WindowAttribute, ENV_VARS, }, }; diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 04373926a67..a7305547db5 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -7,6 +7,7 @@ use graph::data::value::Object; use graph::data::value::Value as DataValue; use graph::prelude::*; use graph::schema::ast::{self as sast, FilterOp}; +use graph::schema::ApiSchema; use graph::{components::store::EntityType, data::graphql::ObjectOrInterface}; use crate::execution::ast as a; @@ -749,13 +750,14 @@ mod tests { components::store::EntityType, data::value::Object, prelude::{ - r, ApiSchema, AttributeNames, DeploymentHash, EntityCollection, EntityFilter, - EntityRange, Schema, Value, ValueType, BLOCK_NUMBER_MAX, + r, AttributeNames, DeploymentHash, EntityCollection, EntityFilter, EntityRange, Schema, + Value, ValueType, BLOCK_NUMBER_MAX, }, prelude::{ s::{self, Directive, Field, InputValue, ObjectType, Type, Value as SchemaValue}, EntityOrder, }, + schema::ApiSchema, }; use graphql_parser::Pos; use std::{collections::BTreeMap, iter::FromIterator, sync::Arc}; diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 575ee7342e4..61e24431972 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -9,8 +9,8 @@ use graph::data::{ schema::META_FIELD_TYPE, }; use graph::prelude::*; -use graph::schema::api::ErrorPolicy; -use graph::schema::ast as sast; +use graph::schema::ErrorPolicy; +use graph::schema::{ast as sast, ApiSchema}; use graph::{components::store::*, data::schema::BLOCK_FIELD_TYPE}; use crate::execution::ast as a; diff --git a/graphql/src/subscription/mod.rs b/graphql/src/subscription/mod.rs index f231fafb53b..c12d9d904bf 100644 --- a/graphql/src/subscription/mod.rs +++ b/graphql/src/subscription/mod.rs @@ -2,7 +2,8 @@ use std::result::Result; use std::time::{Duration, Instant}; use graph::components::store::UnitStream; -use graph::{components::store::SubscriptionManager, prelude::*, schema::api::ErrorPolicy}; +use graph::schema::ApiSchema; +use graph::{components::store::SubscriptionManager, prelude::*, schema::ErrorPolicy}; use crate::metrics::GraphQLMetrics; use crate::{ diff --git a/server/index-node/src/schema.rs b/server/index-node/src/schema.rs index bc3852770f1..d2593adff45 100644 --- a/server/index-node/src/schema.rs +++ b/server/index-node/src/schema.rs @@ -1,4 +1,4 @@ -use graph::prelude::*; +use graph::{prelude::*, schema::ApiSchema}; lazy_static! { pub static ref SCHEMA: Arc = { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 2e422638ddc..1ee4ad343d1 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -38,12 +38,11 @@ use graph::components::subgraph::{ProofOfIndexingFinisher, ProofOfIndexingVersio use graph::constraint_violation; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_OBJECT}; use graph::prelude::{ - anyhow, debug, info, o, warn, web3, ApiSchema, AttributeNames, BlockNumber, BlockPtr, - CheapClone, DeploymentHash, DeploymentState, Entity, EntityModification, EntityQuery, Error, - Logger, QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, - ENV_VARS, + anyhow, debug, info, o, warn, web3, AttributeNames, BlockNumber, BlockPtr, CheapClone, + DeploymentHash, DeploymentState, Entity, EntityModification, EntityQuery, Error, Logger, + QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, }; -use graph::schema::InputSchema; +use graph::schema::{ApiSchema, InputSchema}; use web3::types::Address; use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index ccc5142d77d..18cb765878d 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -5,6 +5,7 @@ use graph::components::store::QueryStore as QueryStoreTrait; use graph::data::query::Trace; use graph::data::value::Word; use graph::prelude::*; +use graph::schema::ApiSchema; use crate::primary::Site; diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 4d10456694d..7ae6c7ef71c 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -25,12 +25,12 @@ use graph::{ data::subgraph::{schema::DeploymentCreate, status}, prelude::StoreEvent, prelude::{ - anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiSchema, - ApiVersion, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, + anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiVersion, + BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, MetricsRegistry, NodeId, PartialBlockPtr, StoreError, SubgraphDeploymentEntity, SubgraphName, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }, - schema::InputSchema, + schema::{ApiSchema, InputSchema}, url::Url, util::timed_cache::TimedCache, }; diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index bc5f86dc3d4..dba67b8165f 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -3,10 +3,10 @@ use std::sync::Arc; use graph::data::graphql::{object, object_value, ObjectOrInterface}; use graph::data::query::Trace; use graph::prelude::{ - async_trait, o, r, s, slog, tokio, ApiSchema, DeploymentHash, Logger, Query, - QueryExecutionError, QueryResult, Schema, + async_trait, o, r, s, slog, tokio, DeploymentHash, Logger, Query, QueryExecutionError, + QueryResult, Schema, }; -use graph::schema::api_schema; +use graph::schema::{api_schema, ApiSchema}; use graph_graphql::prelude::{ a, execute_query, ExecutionContext, Query as PreparedQuery, QueryExecutionOptions, Resolver, From cf0c56288b7873f427b87d50a133dca755662cfe Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 7 Apr 2023 20:03:54 -0700 Subject: [PATCH 0140/2104] all: Move remainder of graph::data::schema into graph::schema --- graph/src/data/graphql/ext.rs | 2 +- graph/src/data/graphql/object_or_interface.rs | 2 +- graph/src/data/mod.rs | 3 - graph/src/data/schema.rs | 1077 ----------------- graph/src/data/subgraph/mod.rs | 6 +- graph/src/lib.rs | 1 - graph/src/schema/api.rs | 9 +- graph/src/schema/input_schema.rs | 3 +- graph/src/schema/mod.rs | 1077 +++++++++++++++++ graphql/src/execution/execution.rs | 3 +- graphql/src/introspection/resolver.rs | 2 +- graphql/src/store/query.rs | 6 +- graphql/src/store/resolver.rs | 11 +- server/index-node/src/schema.rs | 5 +- store/postgres/src/relational.rs | 3 +- .../test-store/tests/graphql/introspection.rs | 4 +- 16 files changed, 1104 insertions(+), 1110 deletions(-) delete mode 100644 graph/src/data/schema.rs diff --git a/graph/src/data/graphql/ext.rs b/graph/src/data/graphql/ext.rs index c25cdbc0e88..ac7c956d97e 100644 --- a/graph/src/data/graphql/ext.rs +++ b/graph/src/data/graphql/ext.rs @@ -1,10 +1,10 @@ use super::ObjectOrInterface; -use crate::data::schema::{META_FIELD_TYPE, SCHEMA_TYPE_NAME}; use crate::prelude::s::{ Definition, Directive, Document, EnumType, Field, InterfaceType, ObjectType, Type, TypeDefinition, Value, }; use crate::prelude::ENV_VARS; +use crate::schema::{META_FIELD_TYPE, SCHEMA_TYPE_NAME}; use std::collections::{BTreeMap, HashMap}; pub trait ObjectTypeExt { diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index 7764769a1a0..dfefdfad2c6 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -1,4 +1,4 @@ -use crate::prelude::Schema; +use crate::schema::Schema; use crate::{components::store::EntityType, prelude::s}; use std::cmp::Ordering; use std::collections::BTreeMap; diff --git a/graph/src/data/mod.rs b/graph/src/data/mod.rs index b308c75bb29..45f085c96fa 100644 --- a/graph/src/data/mod.rs +++ b/graph/src/data/mod.rs @@ -4,9 +4,6 @@ pub mod subgraph; /// Data types for dealing with GraphQL queries. pub mod query; -/// Data types for dealing with GraphQL schemas. -pub mod schema; - /// Data types for dealing with storing entities. pub mod store; diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs deleted file mode 100644 index a199ea1ca2b..00000000000 --- a/graph/src/data/schema.rs +++ /dev/null @@ -1,1077 +0,0 @@ -use crate::components::store::EntityType; -use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; -use crate::data::graphql::ObjectTypeExt; -use crate::data::store::ValueType; -use crate::data::subgraph::DeploymentHash; -use crate::prelude::{ - anyhow, - q::Value, - s::{self, Definition, InterfaceType, ObjectType, TypeDefinition, *}, -}; -use crate::schema::{FulltextAlgorithm, FulltextLanguage}; - -use anyhow::Error; -use graphql_parser::{self, Pos}; -use inflector::Inflector; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -use std::collections::{BTreeMap, HashSet}; -use std::convert::TryFrom; -use std::fmt; -use std::iter::FromIterator; -use std::str::FromStr; - -pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; - -pub const META_FIELD_TYPE: &str = "_Meta_"; -pub const META_FIELD_NAME: &str = "_meta"; - -pub const BLOCK_FIELD_TYPE: &str = "_Block_"; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Strings(Vec); - -impl fmt::Display for Strings { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let s = self.0.join(", "); - write!(f, "{}", s) - } -} - -#[derive(Debug, Error, PartialEq, Eq)] -pub enum SchemaValidationError { - #[error("Interface `{0}` not defined")] - InterfaceUndefined(String), - - #[error("@entity directive missing on the following types: `{0}`")] - EntityDirectivesMissing(Strings), - - #[error( - "Entity type `{0}` does not satisfy interface `{1}` because it is missing \ - the following fields: {2}" - )] - InterfaceFieldsMissing(String, String, Strings), // (type, interface, missing_fields) - #[error("Implementors of interface `{0}` use different id types `{1}`. They must all use the same type")] - InterfaceImplementorsMixId(String, String), - #[error("Field `{1}` in type `{0}` has invalid @derivedFrom: {2}")] - InvalidDerivedFrom(String, String, String), // (type, field, reason) - #[error("The following type names are reserved: `{0}`")] - UsageOfReservedTypes(Strings), - #[error("_Schema_ type is only for @fulltext and must not have any fields")] - SchemaTypeWithFields, - #[error("The _Schema_ type only allows @fulltext directives")] - InvalidSchemaTypeDirectives, - #[error("Type `{0}`, field `{1}`: type `{2}` is not defined")] - FieldTypeUnknown(String, String, String), // (type_name, field_name, field_type) - #[error("Imported type `{0}` does not exist in the `{1}` schema")] - ImportedTypeUndefined(String, String), // (type_name, schema) - #[error("Fulltext directive name undefined")] - FulltextNameUndefined, - #[error("Fulltext directive name overlaps with type: {0}")] - FulltextNameConflict(String), - #[error("Fulltext directive name overlaps with an existing entity field or a top-level query field: {0}")] - FulltextNameCollision(String), - #[error("Fulltext language is undefined")] - FulltextLanguageUndefined, - #[error("Fulltext language is invalid: {0}")] - FulltextLanguageInvalid(String), - #[error("Fulltext algorithm is undefined")] - FulltextAlgorithmUndefined, - #[error("Fulltext algorithm is invalid: {0}")] - FulltextAlgorithmInvalid(String), - #[error("Fulltext include is invalid")] - FulltextIncludeInvalid, - #[error("Fulltext directive requires an 'include' list")] - FulltextIncludeUndefined, - #[error("Fulltext 'include' list must contain an object")] - FulltextIncludeObjectMissing, - #[error( - "Fulltext 'include' object must contain 'entity' (String) and 'fields' (List) attributes" - )] - FulltextIncludeEntityMissingOrIncorrectAttributes, - #[error("Fulltext directive includes an entity not found on the subgraph schema")] - FulltextIncludedEntityNotFound, - #[error("Fulltext include field must have a 'name' attribute")] - FulltextIncludedFieldMissingRequiredProperty, - #[error("Fulltext entity field, {0}, not found or not a string")] - FulltextIncludedFieldInvalid(String), -} - -/// A validated and preprocessed GraphQL schema for a subgraph. -#[derive(Clone, Debug, PartialEq)] -pub struct Schema { - pub id: DeploymentHash, - pub document: s::Document, - - // Maps type name to implemented interfaces. - pub interfaces_for_type: BTreeMap>, - - // Maps an interface name to the list of entities that implement it. - pub types_for_interface: BTreeMap>, -} - -impl Schema { - /// Create a new schema. The document must already have been validated - // - // TODO: The way some validation is expected to be done beforehand, and - // some is done here makes it incredibly murky whether a `Schema` is - // fully validated. The code should be changed to make sure that a - // `Schema` is always fully valid - pub fn new(id: DeploymentHash, document: s::Document) -> Result { - let (interfaces_for_type, types_for_interface) = Self::collect_interfaces(&document)?; - - let mut schema = Schema { - id: id.clone(), - document, - interfaces_for_type, - types_for_interface, - }; - - schema.add_subgraph_id_directives(id); - - Ok(schema) - } - - fn collect_interfaces( - document: &s::Document, - ) -> Result< - ( - BTreeMap>, - BTreeMap>, - ), - SchemaValidationError, - > { - // Initialize with an empty vec for each interface, so we don't - // miss interfaces that have no implementors. - let mut types_for_interface = - BTreeMap::from_iter(document.definitions.iter().filter_map(|d| match d { - Definition::TypeDefinition(TypeDefinition::Interface(t)) => { - Some((EntityType::from(t), vec![])) - } - _ => None, - })); - let mut interfaces_for_type = BTreeMap::<_, Vec<_>>::new(); - - for object_type in document.get_object_type_definitions() { - for implemented_interface in object_type.implements_interfaces.clone() { - let interface_type = document - .definitions - .iter() - .find_map(|def| match def { - Definition::TypeDefinition(TypeDefinition::Interface(i)) - if i.name.eq(&implemented_interface) => - { - Some(i.clone()) - } - _ => None, - }) - .ok_or_else(|| { - SchemaValidationError::InterfaceUndefined(implemented_interface.clone()) - })?; - - Self::validate_interface_implementation(object_type, &interface_type)?; - - interfaces_for_type - .entry(EntityType::from(object_type)) - .or_default() - .push(interface_type); - types_for_interface - .get_mut(&EntityType::new(implemented_interface)) - .unwrap() - .push(object_type.clone()); - } - } - - Ok((interfaces_for_type, types_for_interface)) - } - - pub fn parse(raw: &str, id: DeploymentHash) -> Result { - let document = graphql_parser::parse_schema(raw)?.into_static(); - - Schema::new(id, document).map_err(Into::into) - } - - /// Returned map has one an entry for each interface in the schema. - pub fn types_for_interface(&self) -> &BTreeMap> { - &self.types_for_interface - } - - /// Returns `None` if the type implements no interfaces. - pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { - self.interfaces_for_type.get(type_name) - } - - // Adds a @subgraphId(id: ...) directive to object/interface/enum types in the schema. - pub fn add_subgraph_id_directives(&mut self, id: DeploymentHash) { - for definition in self.document.definitions.iter_mut() { - let subgraph_id_argument = (String::from("id"), s::Value::String(id.to_string())); - - let subgraph_id_directive = s::Directive { - name: "subgraphId".to_string(), - position: Pos::default(), - arguments: vec![subgraph_id_argument], - }; - - if let Definition::TypeDefinition(ref mut type_definition) = definition { - let (name, directives) = match type_definition { - TypeDefinition::Object(object_type) => { - (&object_type.name, &mut object_type.directives) - } - TypeDefinition::Interface(interface_type) => { - (&interface_type.name, &mut interface_type.directives) - } - TypeDefinition::Enum(enum_type) => (&enum_type.name, &mut enum_type.directives), - TypeDefinition::Scalar(scalar_type) => { - (&scalar_type.name, &mut scalar_type.directives) - } - TypeDefinition::InputObject(input_object_type) => { - (&input_object_type.name, &mut input_object_type.directives) - } - TypeDefinition::Union(union_type) => { - (&union_type.name, &mut union_type.directives) - } - }; - - if !name.eq(SCHEMA_TYPE_NAME) - && !directives - .iter() - .any(|directive| directive.name.eq("subgraphId")) - { - directives.push(subgraph_id_directive); - } - }; - } - } - - pub fn validate(&self) -> Result<(), Vec> { - let mut errors: Vec = [ - self.validate_schema_types(), - self.validate_derived_from(), - self.validate_schema_type_has_no_fields(), - self.validate_directives_on_schema_type(), - self.validate_reserved_types_usage(), - self.validate_interface_id_type(), - ] - .into_iter() - .filter(Result::is_err) - // Safe unwrap due to the filter above - .map(Result::unwrap_err) - .collect(); - - errors.append(&mut self.validate_fields()); - errors.append(&mut self.validate_fulltext_directives()); - - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } - - fn validate_schema_type_has_no_fields(&self) -> Result<(), SchemaValidationError> { - match self - .subgraph_schema_object_type() - .and_then(|subgraph_schema_type| { - if !subgraph_schema_type.fields.is_empty() { - Some(SchemaValidationError::SchemaTypeWithFields) - } else { - None - } - }) { - Some(err) => Err(err), - None => Ok(()), - } - } - - fn validate_directives_on_schema_type(&self) -> Result<(), SchemaValidationError> { - match self - .subgraph_schema_object_type() - .and_then(|subgraph_schema_type| { - if subgraph_schema_type - .directives - .iter() - .filter(|directive| !directive.name.eq("fulltext")) - .next() - .is_some() - { - Some(SchemaValidationError::InvalidSchemaTypeDirectives) - } else { - None - } - }) { - Some(err) => Err(err), - None => Ok(()), - } - } - - fn validate_fulltext_directives(&self) -> Vec { - self.subgraph_schema_object_type() - .map_or(vec![], |subgraph_schema_type| { - subgraph_schema_type - .directives - .iter() - .filter(|directives| directives.name.eq("fulltext")) - .fold(vec![], |mut errors, fulltext| { - errors.extend(self.validate_fulltext_directive_name(fulltext).into_iter()); - errors.extend( - self.validate_fulltext_directive_language(fulltext) - .into_iter(), - ); - errors.extend( - self.validate_fulltext_directive_algorithm(fulltext) - .into_iter(), - ); - errors.extend( - self.validate_fulltext_directive_includes(fulltext) - .into_iter(), - ); - errors - }) - }) - } - - fn validate_fulltext_directive_name(&self, fulltext: &Directive) -> Vec { - let name = match fulltext.argument("name") { - Some(Value::String(name)) => name, - _ => return vec![SchemaValidationError::FulltextNameUndefined], - }; - - let local_types: Vec<&ObjectType> = self - .document - .get_object_type_definitions() - .into_iter() - .collect(); - - // Validate that the fulltext field doesn't collide with any top-level Query fields - // generated for entity types. The field name conversions should always align with those used - // to create the field names in `graphql::schema::api::query_fields_for_type()`. - if local_types.iter().any(|typ| { - typ.fields.iter().any(|field| { - name == &field.name.as_str().to_camel_case() - || name == &field.name.to_plural().to_camel_case() - || field.name.eq(name) - }) - }) { - return vec![SchemaValidationError::FulltextNameCollision( - name.to_string(), - )]; - } - - // Validate that each fulltext directive has a distinct name - if self - .subgraph_schema_object_type() - .unwrap() - .directives - .iter() - .filter(|directive| directive.name.eq("fulltext")) - .filter_map(|fulltext| { - // Collect all @fulltext directives with the same name - match fulltext.argument("name") { - Some(Value::String(n)) if name.eq(n) => Some(n.as_str()), - _ => None, - } - }) - .count() - > 1 - { - vec![SchemaValidationError::FulltextNameConflict( - name.to_string(), - )] - } else { - vec![] - } - } - - fn validate_fulltext_directive_language( - &self, - fulltext: &Directive, - ) -> Vec { - let language = match fulltext.argument("language") { - Some(Value::Enum(language)) => language, - _ => return vec![SchemaValidationError::FulltextLanguageUndefined], - }; - match FulltextLanguage::try_from(language.as_str()) { - Ok(_) => vec![], - Err(_) => vec![SchemaValidationError::FulltextLanguageInvalid( - language.to_string(), - )], - } - } - - fn validate_fulltext_directive_algorithm( - &self, - fulltext: &Directive, - ) -> Vec { - let algorithm = match fulltext.argument("algorithm") { - Some(Value::Enum(algorithm)) => algorithm, - _ => return vec![SchemaValidationError::FulltextAlgorithmUndefined], - }; - match FulltextAlgorithm::try_from(algorithm.as_str()) { - Ok(_) => vec![], - Err(_) => vec![SchemaValidationError::FulltextAlgorithmInvalid( - algorithm.to_string(), - )], - } - } - - fn validate_fulltext_directive_includes( - &self, - fulltext: &Directive, - ) -> Vec { - // Only allow fulltext directive on local types - let local_types: Vec<&ObjectType> = self - .document - .get_object_type_definitions() - .into_iter() - .collect(); - - // Validate that each entity in fulltext.include exists - let includes = match fulltext.argument("include") { - Some(Value::List(includes)) if !includes.is_empty() => includes, - _ => return vec![SchemaValidationError::FulltextIncludeUndefined], - }; - - for include in includes { - match include.as_object() { - None => return vec![SchemaValidationError::FulltextIncludeObjectMissing], - Some(include_entity) => { - let (entity, fields) = - match (include_entity.get("entity"), include_entity.get("fields")) { - (Some(Value::String(entity)), Some(Value::List(fields))) => { - (entity, fields) - } - _ => return vec![SchemaValidationError::FulltextIncludeEntityMissingOrIncorrectAttributes], - }; - - // Validate the included entity type is one of the local types - let entity_type = match local_types - .iter() - .cloned() - .find(|typ| typ.name[..].eq(entity)) - { - None => return vec![SchemaValidationError::FulltextIncludedEntityNotFound], - Some(t) => t.clone(), - }; - - for field_value in fields { - let field_name = match field_value { - Value::Object(field_map) => match field_map.get("name") { - Some(Value::String(name)) => name, - _ => return vec![SchemaValidationError::FulltextIncludedFieldMissingRequiredProperty], - }, - _ => return vec![SchemaValidationError::FulltextIncludeEntityMissingOrIncorrectAttributes], - }; - - // Validate the included field is a String field on the local entity types specified - if !&entity_type - .fields - .iter() - .any(|field| { - let base_type: &str = field.field_type.get_base_type(); - matches!(ValueType::from_str(base_type), Ok(ValueType::String) if field.name.eq(field_name)) - }) - { - return vec![SchemaValidationError::FulltextIncludedFieldInvalid( - field_name.clone(), - )]; - }; - } - } - } - } - // Fulltext include validations all passed, so we return an empty vector - vec![] - } - - fn validate_fields(&self) -> Vec { - let local_types = self.document.get_object_and_interface_type_fields(); - let local_enums = self - .document - .get_enum_definitions() - .iter() - .map(|enu| enu.name.clone()) - .collect::>(); - local_types - .iter() - .fold(vec![], |errors, (type_name, fields)| { - fields.iter().fold(errors, |mut errors, field| { - let base = field.field_type.get_base_type(); - if ValueType::is_scalar(base) { - return errors; - } - if local_types.contains_key(base) { - return errors; - } - if local_enums.iter().any(|enu| enu.eq(base)) { - return errors; - } - errors.push(SchemaValidationError::FieldTypeUnknown( - type_name.to_string(), - field.name.to_string(), - base.to_string(), - )); - errors - }) - }) - } - - /// Checks if the schema is using types that are reserved - /// by `graph-node` - fn validate_reserved_types_usage(&self) -> Result<(), SchemaValidationError> { - let document = &self.document; - let object_types: Vec<_> = document - .get_object_type_definitions() - .into_iter() - .map(|obj_type| &obj_type.name) - .collect(); - - let interface_types: Vec<_> = document - .get_interface_type_definitions() - .into_iter() - .map(|iface_type| &iface_type.name) - .collect(); - - // TYPE_NAME_filter types for all object and interface types - let mut filter_types: Vec = object_types - .iter() - .chain(interface_types.iter()) - .map(|type_name| format!("{}_filter", type_name)) - .collect(); - - // TYPE_NAME_orderBy types for all object and interface types - let mut order_by_types: Vec<_> = object_types - .iter() - .chain(interface_types.iter()) - .map(|type_name| format!("{}_orderBy", type_name)) - .collect(); - - let mut reserved_types: Vec = vec![ - // The built-in scalar types - "Boolean".into(), - "ID".into(), - "Int".into(), - "BigDecimal".into(), - "String".into(), - "Bytes".into(), - "BigInt".into(), - // Reserved Query and Subscription types - "Query".into(), - "Subscription".into(), - ]; - - reserved_types.append(&mut filter_types); - reserved_types.append(&mut order_by_types); - - // `reserved_types` will now only contain - // the reserved types that the given schema *is* using. - // - // That is, if the schema is compliant and not using any reserved - // types, then it'll become an empty vector - reserved_types.retain(|reserved_type| document.get_named_type(reserved_type).is_some()); - - if reserved_types.is_empty() { - Ok(()) - } else { - Err(SchemaValidationError::UsageOfReservedTypes(Strings( - reserved_types, - ))) - } - } - - fn validate_schema_types(&self) -> Result<(), SchemaValidationError> { - let types_without_entity_directive = self - .document - .get_object_type_definitions() - .iter() - .filter(|t| t.find_directive("entity").is_none() && !t.name.eq(SCHEMA_TYPE_NAME)) - .map(|t| t.name.clone()) - .collect::>(); - if types_without_entity_directive.is_empty() { - Ok(()) - } else { - Err(SchemaValidationError::EntityDirectivesMissing(Strings( - types_without_entity_directive, - ))) - } - } - - fn validate_derived_from(&self) -> Result<(), SchemaValidationError> { - // Helper to construct a DerivedFromInvalid - fn invalid( - object_type: &ObjectType, - field_name: &str, - reason: &str, - ) -> SchemaValidationError { - SchemaValidationError::InvalidDerivedFrom( - object_type.name.clone(), - field_name.to_owned(), - reason.to_owned(), - ) - } - - let type_definitions = self.document.get_object_type_definitions(); - let object_and_interface_type_fields = self.document.get_object_and_interface_type_fields(); - - // Iterate over all derived fields in all entity types; include the - // interface types that the entity with the `@derivedFrom` implements - // and the `field` argument of @derivedFrom directive - for (object_type, interface_types, field, target_field) in type_definitions - .clone() - .iter() - .flat_map(|object_type| { - object_type - .fields - .iter() - .map(move |field| (object_type, field)) - }) - .filter_map(|(object_type, field)| { - field.find_directive("derivedFrom").map(|directive| { - ( - object_type, - object_type - .implements_interfaces - .iter() - .filter(|iface| { - // Any interface that has `field` can be used - // as the type of the field - self.document - .find_interface(iface) - .map(|iface| { - iface - .fields - .iter() - .any(|ifield| ifield.name.eq(&field.name)) - }) - .unwrap_or(false) - }) - .collect::>(), - field, - directive.argument("field"), - ) - }) - }) - { - // Turn `target_field` into the string name of the field - let target_field = target_field.ok_or_else(|| { - invalid( - object_type, - &field.name, - "the @derivedFrom directive must have a `field` argument", - ) - })?; - let target_field = match target_field { - Value::String(s) => s, - _ => { - return Err(invalid( - object_type, - &field.name, - "the @derivedFrom `field` argument must be a string", - )) - } - }; - - // Check that the type we are deriving from exists - let target_type_name = field.field_type.get_base_type(); - let target_fields = object_and_interface_type_fields - .get(target_type_name) - .ok_or_else(|| { - invalid( - object_type, - &field.name, - "type must be an existing entity or interface", - ) - })?; - - // Check that the type we are deriving from has a field with the - // right name and type - let target_field = target_fields - .iter() - .find(|field| field.name.eq(target_field)) - .ok_or_else(|| { - let msg = format!( - "field `{}` does not exist on type `{}`", - target_field, target_type_name - ); - invalid(object_type, &field.name, &msg) - })?; - - // The field we are deriving from has to point back to us; as an - // exception, we allow deriving from the `id` of another type. - // For that, we will wind up comparing the `id`s of the two types - // when we query, and just assume that that's ok. - let target_field_type = target_field.field_type.get_base_type(); - if target_field_type != object_type.name - && target_field_type != "ID" - && !interface_types - .iter() - .any(|iface| target_field_type.eq(iface.as_str())) - { - fn type_signatures(name: &str) -> Vec { - vec![ - format!("{}", name), - format!("{}!", name), - format!("[{}!]", name), - format!("[{}!]!", name), - ] - } - - let mut valid_types = type_signatures(&object_type.name); - valid_types.extend( - interface_types - .iter() - .flat_map(|iface| type_signatures(iface)), - ); - let valid_types = valid_types.join(", "); - - let msg = format!( - "field `{tf}` on type `{tt}` must have one of the following types: {valid_types}", - tf = target_field.name, - tt = target_type_name, - valid_types = valid_types, - ); - return Err(invalid(object_type, &field.name, &msg)); - } - } - Ok(()) - } - - /// Validate that `object` implements `interface`. - fn validate_interface_implementation( - object: &ObjectType, - interface: &InterfaceType, - ) -> Result<(), SchemaValidationError> { - // Check that all fields in the interface exist in the object with same name and type. - let mut missing_fields = vec![]; - for i in &interface.fields { - if !object - .fields - .iter() - .any(|o| o.name.eq(&i.name) && o.field_type.eq(&i.field_type)) - { - missing_fields.push(i.to_string().trim().to_owned()); - } - } - if !missing_fields.is_empty() { - Err(SchemaValidationError::InterfaceFieldsMissing( - object.name.clone(), - interface.name.clone(), - Strings(missing_fields), - )) - } else { - Ok(()) - } - } - - fn validate_interface_id_type(&self) -> Result<(), SchemaValidationError> { - for (intf, obj_types) in &self.types_for_interface { - let id_types: HashSet<&str> = HashSet::from_iter( - obj_types - .iter() - .filter_map(|obj_type| obj_type.field("id")) - .map(|f| f.field_type.get_base_type()) - .map(|name| if name == "ID" { "String" } else { name }), - ); - if id_types.len() > 1 { - return Err(SchemaValidationError::InterfaceImplementorsMixId( - intf.to_string(), - id_types.iter().join(", "), - )); - } - } - Ok(()) - } - - fn subgraph_schema_object_type(&self) -> Option<&ObjectType> { - self.document - .get_object_type_definitions() - .into_iter() - .find(|object_type| object_type.name.eq(SCHEMA_TYPE_NAME)) - } -} - -#[test] -fn non_existing_interface() { - let schema = "type Foo implements Bar @entity { foo: Int }"; - let res = Schema::parse(schema, DeploymentHash::new("dummy").unwrap()); - let error = res - .unwrap_err() - .downcast::() - .unwrap(); - assert_eq!( - error, - SchemaValidationError::InterfaceUndefined("Bar".to_owned()) - ); -} - -#[test] -fn invalid_interface_implementation() { - let schema = " - interface Foo { - x: Int, - y: Int - } - - type Bar implements Foo @entity { - x: Boolean - } - "; - let res = Schema::parse(schema, DeploymentHash::new("dummy").unwrap()); - assert_eq!( - res.unwrap_err().to_string(), - "Entity type `Bar` does not satisfy interface `Foo` because it is missing \ - the following fields: x: Int, y: Int", - ); -} - -#[test] -fn interface_implementations_id_type() { - fn check_schema(bar_id: &str, baz_id: &str, ok: bool) { - let schema = format!( - "interface Foo {{ x: Int }} - type Bar implements Foo @entity {{ - id: {bar_id}! - x: Int - }} - - type Baz implements Foo @entity {{ - id: {baz_id}! - x: Int - }}" - ); - let schema = Schema::parse(&schema, DeploymentHash::new("dummy").unwrap()).unwrap(); - let res = schema.validate(); - if ok { - assert!(matches!(res, Ok(_))); - } else { - assert!(matches!(res, Err(_))); - assert!(matches!( - res.unwrap_err()[0], - SchemaValidationError::InterfaceImplementorsMixId(_, _) - )); - } - } - check_schema("ID", "ID", true); - check_schema("ID", "String", true); - check_schema("ID", "Bytes", false); - check_schema("Bytes", "String", false); -} - -#[test] -fn test_derived_from_validation() { - const OTHER_TYPES: &str = " -type B @entity { id: ID! } -type C @entity { id: ID! } -type D @entity { id: ID! } -type E @entity { id: ID! } -type F @entity { id: ID! } -type G @entity { id: ID! a: BigInt } -type H @entity { id: ID! a: A! } -# This sets up a situation where we need to allow `Transaction.from` to -# point to an interface because of `Account.txn` -type Transaction @entity { from: Address! } -interface Address { txn: Transaction! @derivedFrom(field: \"from\") } -type Account implements Address @entity { id: ID!, txn: Transaction! @derivedFrom(field: \"from\") }"; - - fn validate(field: &str, errmsg: &str) { - let raw = format!("type A @entity {{ id: ID!\n {} }}\n{}", field, OTHER_TYPES); - - let document = graphql_parser::parse_schema(&raw) - .expect("Failed to parse raw schema") - .into_static(); - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - match schema.validate_derived_from() { - Err(ref e) => match e { - SchemaValidationError::InvalidDerivedFrom(_, _, msg) => assert_eq!(errmsg, msg), - _ => panic!("expected variant SchemaValidationError::DerivedFromInvalid"), - }, - Ok(_) => { - if errmsg != "ok" { - panic!("expected validation for `{}` to fail", field) - } - } - } - } - - validate( - "b: B @derivedFrom(field: \"a\")", - "field `a` does not exist on type `B`", - ); - validate( - "c: [C!]! @derivedFrom(field: \"a\")", - "field `a` does not exist on type `C`", - ); - validate( - "d: D @derivedFrom", - "the @derivedFrom directive must have a `field` argument", - ); - validate( - "e: E @derivedFrom(attr: \"a\")", - "the @derivedFrom directive must have a `field` argument", - ); - validate( - "f: F @derivedFrom(field: 123)", - "the @derivedFrom `field` argument must be a string", - ); - validate( - "g: G @derivedFrom(field: \"a\")", - "field `a` on type `G` must have one of the following types: A, A!, [A!], [A!]!", - ); - validate("h: H @derivedFrom(field: \"a\")", "ok"); - validate( - "i: NotAType @derivedFrom(field: \"a\")", - "type must be an existing entity or interface", - ); - validate("j: B @derivedFrom(field: \"id\")", "ok"); -} - -#[test] -fn test_reserved_type_with_fields() { - const ROOT_SCHEMA: &str = " -type _Schema_ { id: ID! }"; - - let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - assert_eq!( - schema - .validate_schema_type_has_no_fields() - .expect_err("Expected validation to fail due to fields defined on the reserved type"), - SchemaValidationError::SchemaTypeWithFields - ) -} - -#[test] -fn test_reserved_type_directives() { - const ROOT_SCHEMA: &str = " -type _Schema_ @illegal"; - - let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - assert_eq!( - schema.validate_directives_on_schema_type().expect_err( - "Expected validation to fail due to extra imports defined on the reserved type" - ), - SchemaValidationError::InvalidSchemaTypeDirectives - ) -} - -#[test] -fn test_enums_pass_field_validation() { - const ROOT_SCHEMA: &str = r#" -enum Color { - RED - GREEN -} - -type A @entity { - id: ID! - color: Color -}"#; - - let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - assert_eq!(schema.validate_fields().len(), 0); -} - -#[test] -fn test_reserved_types_validation() { - let reserved_types = [ - // Built-in scalars - "Boolean", - "ID", - "Int", - "BigDecimal", - "String", - "Bytes", - "BigInt", - // Reserved keywords - "Query", - "Subscription", - ]; - - let dummy_hash = DeploymentHash::new("dummy").unwrap(); - - for reserved_type in reserved_types { - let schema = format!("type {} @entity {{ _: Boolean }}\n", reserved_type); - - let schema = Schema::parse(&schema, dummy_hash.clone()).unwrap(); - - let errors = schema.validate().unwrap_err(); - for error in errors { - assert!(matches!( - error, - SchemaValidationError::UsageOfReservedTypes(_) - )) - } - } -} - -#[test] -fn test_reserved_filter_and_group_by_types_validation() { - const SCHEMA: &str = r#" - type Gravatar @entity { - _: Boolean - } - type Gravatar_filter @entity { - _: Boolean - } - type Gravatar_orderBy @entity { - _: Boolean - } - "#; - - let dummy_hash = DeploymentHash::new("dummy").unwrap(); - - let schema = Schema::parse(SCHEMA, dummy_hash).unwrap(); - - let errors = schema.validate().unwrap_err(); - - // The only problem in the schema is the usage of reserved types - assert_eq!(errors.len(), 1); - - assert!(matches!( - &errors[0], - SchemaValidationError::UsageOfReservedTypes(Strings(_)) - )); - - // We know this will match due to the assertion above - match &errors[0] { - SchemaValidationError::UsageOfReservedTypes(Strings(reserved_types)) => { - let expected_types: Vec = - vec!["Gravatar_filter".into(), "Gravatar_orderBy".into()]; - assert_eq!(reserved_types, &expected_types); - } - _ => unreachable!(), - } -} - -#[test] -fn test_fulltext_directive_validation() { - const SCHEMA: &str = r#" -type _Schema_ @fulltext( - name: "metadata" - language: en - algorithm: rank - include: [ - { - entity: "Gravatar", - fields: [ - { name: "displayName"}, - { name: "imageUrl"}, - ] - } - ] -) -type Gravatar @entity { - id: ID! - owner: Bytes! - displayName: String! - imageUrl: String! -}"#; - - let document = graphql_parser::parse_schema(SCHEMA).expect("Failed to parse schema"); - let schema = Schema::new(DeploymentHash::new("id1").unwrap(), document).unwrap(); - - assert_eq!(schema.validate_fulltext_directives(), vec![]); -} diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index c52bd40e470..a8b18fe67ef 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -31,8 +31,8 @@ use crate::{ store::{StoreError, SubgraphStore}, }, data::{ - graphql::TryFromValue, query::QueryExecutionError, schema::SchemaValidationError, - store::Entity, subgraph::features::validate_subgraph_features, + graphql::TryFromValue, query::QueryExecutionError, store::Entity, + subgraph::features::validate_subgraph_features, }, data_source::{ offchain::OFFCHAIN_KINDS, DataSource, DataSourceTemplate, UnresolvedDataSource, @@ -40,7 +40,7 @@ use crate::{ }, ensure, prelude::{r, CheapClone, ENV_VARS}, - schema::InputSchema, + schema::{InputSchema, SchemaValidationError}, }; use crate::prelude::{impl_slog_value, BlockNumber, Deserialize, Serialize}; diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 7e1c4f36832..05e7e300a37 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -148,7 +148,6 @@ pub mod prelude { pub use crate::data::query::{ Query, QueryError, QueryExecutionError, QueryResult, QueryTarget, QueryVariables, }; - pub use crate::data::schema::Schema; pub use crate::data::store::ethereum::*; pub use crate::data::store::scalar::{BigDecimal, BigInt, BigIntSign}; pub use crate::data::store::{ diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 7d86c1b89ce..e3022553bdf 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -7,16 +7,15 @@ use lazy_static::lazy_static; use crate::components::store::EntityType; use crate::data::graphql::ObjectOrInterface; -use crate::schema::ast; +use crate::schema::{ast, META_FIELD_NAME, META_FIELD_TYPE}; -use crate::data::{ - graphql::ext::{DirectiveExt, DocumentExt, ValueExt}, - schema::{META_FIELD_NAME, META_FIELD_TYPE, SCHEMA_TYPE_NAME}, -}; +use crate::data::graphql::ext::{DirectiveExt, DocumentExt, ValueExt}; use crate::prelude::s::{Value, *}; use crate::prelude::*; use thiserror::Error; +use super::{Schema, SCHEMA_TYPE_NAME}; + #[derive(Error, Debug)] pub enum APISchemaError { #[error("type {0} already exists in the input schema")] diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index b89d28e678e..1fa1147e6d5 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -6,14 +6,13 @@ use anyhow::{anyhow, Error}; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::schema::{Schema, SchemaValidationError}; use crate::data::store::{self, scalar}; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; use super::fulltext::FulltextDefinition; -use super::ApiSchema; +use super::{ApiSchema, Schema, SchemaValidationError}; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 13e01b4114f..5d4a3a0789a 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -1,3 +1,27 @@ +use crate::components::store::EntityType; +use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; +use crate::data::graphql::ObjectTypeExt; +use crate::data::store::ValueType; +use crate::data::subgraph::DeploymentHash; +use crate::prelude::{ + anyhow, + q::Value, + s::{self, Definition, InterfaceType, ObjectType, TypeDefinition, *}, +}; + +use anyhow::Error; +use graphql_parser::{self, Pos}; +use inflector::Inflector; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use std::collections::{BTreeMap, HashSet}; +use std::convert::TryFrom; +use std::fmt; +use std::iter::FromIterator; +use std::str::FromStr; + /// Generate full-fledged API schemas from existing GraphQL schemas. mod api; @@ -12,3 +36,1056 @@ pub use api::{api_schema, APISchemaError}; pub use api::{ApiSchema, ErrorPolicy}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; + +pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; + +pub const META_FIELD_TYPE: &str = "_Meta_"; +pub const META_FIELD_NAME: &str = "_meta"; + +pub const BLOCK_FIELD_TYPE: &str = "_Block_"; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Strings(Vec); + +impl fmt::Display for Strings { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let s = self.0.join(", "); + write!(f, "{}", s) + } +} + +#[derive(Debug, Error, PartialEq, Eq)] +pub enum SchemaValidationError { + #[error("Interface `{0}` not defined")] + InterfaceUndefined(String), + + #[error("@entity directive missing on the following types: `{0}`")] + EntityDirectivesMissing(Strings), + + #[error( + "Entity type `{0}` does not satisfy interface `{1}` because it is missing \ + the following fields: {2}" + )] + InterfaceFieldsMissing(String, String, Strings), // (type, interface, missing_fields) + #[error("Implementors of interface `{0}` use different id types `{1}`. They must all use the same type")] + InterfaceImplementorsMixId(String, String), + #[error("Field `{1}` in type `{0}` has invalid @derivedFrom: {2}")] + InvalidDerivedFrom(String, String, String), // (type, field, reason) + #[error("The following type names are reserved: `{0}`")] + UsageOfReservedTypes(Strings), + #[error("_Schema_ type is only for @fulltext and must not have any fields")] + SchemaTypeWithFields, + #[error("The _Schema_ type only allows @fulltext directives")] + InvalidSchemaTypeDirectives, + #[error("Type `{0}`, field `{1}`: type `{2}` is not defined")] + FieldTypeUnknown(String, String, String), // (type_name, field_name, field_type) + #[error("Imported type `{0}` does not exist in the `{1}` schema")] + ImportedTypeUndefined(String, String), // (type_name, schema) + #[error("Fulltext directive name undefined")] + FulltextNameUndefined, + #[error("Fulltext directive name overlaps with type: {0}")] + FulltextNameConflict(String), + #[error("Fulltext directive name overlaps with an existing entity field or a top-level query field: {0}")] + FulltextNameCollision(String), + #[error("Fulltext language is undefined")] + FulltextLanguageUndefined, + #[error("Fulltext language is invalid: {0}")] + FulltextLanguageInvalid(String), + #[error("Fulltext algorithm is undefined")] + FulltextAlgorithmUndefined, + #[error("Fulltext algorithm is invalid: {0}")] + FulltextAlgorithmInvalid(String), + #[error("Fulltext include is invalid")] + FulltextIncludeInvalid, + #[error("Fulltext directive requires an 'include' list")] + FulltextIncludeUndefined, + #[error("Fulltext 'include' list must contain an object")] + FulltextIncludeObjectMissing, + #[error( + "Fulltext 'include' object must contain 'entity' (String) and 'fields' (List) attributes" + )] + FulltextIncludeEntityMissingOrIncorrectAttributes, + #[error("Fulltext directive includes an entity not found on the subgraph schema")] + FulltextIncludedEntityNotFound, + #[error("Fulltext include field must have a 'name' attribute")] + FulltextIncludedFieldMissingRequiredProperty, + #[error("Fulltext entity field, {0}, not found or not a string")] + FulltextIncludedFieldInvalid(String), +} + +/// A validated and preprocessed GraphQL schema for a subgraph. +#[derive(Clone, Debug, PartialEq)] +pub struct Schema { + pub id: DeploymentHash, + pub document: s::Document, + + // Maps type name to implemented interfaces. + pub interfaces_for_type: BTreeMap>, + + // Maps an interface name to the list of entities that implement it. + pub types_for_interface: BTreeMap>, +} + +impl Schema { + /// Create a new schema. The document must already have been validated + // + // TODO: The way some validation is expected to be done beforehand, and + // some is done here makes it incredibly murky whether a `Schema` is + // fully validated. The code should be changed to make sure that a + // `Schema` is always fully valid + pub fn new(id: DeploymentHash, document: s::Document) -> Result { + let (interfaces_for_type, types_for_interface) = Self::collect_interfaces(&document)?; + + let mut schema = Schema { + id: id.clone(), + document, + interfaces_for_type, + types_for_interface, + }; + + schema.add_subgraph_id_directives(id); + + Ok(schema) + } + + fn collect_interfaces( + document: &s::Document, + ) -> Result< + ( + BTreeMap>, + BTreeMap>, + ), + SchemaValidationError, + > { + // Initialize with an empty vec for each interface, so we don't + // miss interfaces that have no implementors. + let mut types_for_interface = + BTreeMap::from_iter(document.definitions.iter().filter_map(|d| match d { + Definition::TypeDefinition(TypeDefinition::Interface(t)) => { + Some((EntityType::from(t), vec![])) + } + _ => None, + })); + let mut interfaces_for_type = BTreeMap::<_, Vec<_>>::new(); + + for object_type in document.get_object_type_definitions() { + for implemented_interface in object_type.implements_interfaces.clone() { + let interface_type = document + .definitions + .iter() + .find_map(|def| match def { + Definition::TypeDefinition(TypeDefinition::Interface(i)) + if i.name.eq(&implemented_interface) => + { + Some(i.clone()) + } + _ => None, + }) + .ok_or_else(|| { + SchemaValidationError::InterfaceUndefined(implemented_interface.clone()) + })?; + + Self::validate_interface_implementation(object_type, &interface_type)?; + + interfaces_for_type + .entry(EntityType::from(object_type)) + .or_default() + .push(interface_type); + types_for_interface + .get_mut(&EntityType::new(implemented_interface)) + .unwrap() + .push(object_type.clone()); + } + } + + Ok((interfaces_for_type, types_for_interface)) + } + + pub fn parse(raw: &str, id: DeploymentHash) -> Result { + let document = graphql_parser::parse_schema(raw)?.into_static(); + + Schema::new(id, document).map_err(Into::into) + } + + /// Returned map has one an entry for each interface in the schema. + pub fn types_for_interface(&self) -> &BTreeMap> { + &self.types_for_interface + } + + /// Returns `None` if the type implements no interfaces. + pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { + self.interfaces_for_type.get(type_name) + } + + // Adds a @subgraphId(id: ...) directive to object/interface/enum types in the schema. + pub fn add_subgraph_id_directives(&mut self, id: DeploymentHash) { + for definition in self.document.definitions.iter_mut() { + let subgraph_id_argument = (String::from("id"), s::Value::String(id.to_string())); + + let subgraph_id_directive = s::Directive { + name: "subgraphId".to_string(), + position: Pos::default(), + arguments: vec![subgraph_id_argument], + }; + + if let Definition::TypeDefinition(ref mut type_definition) = definition { + let (name, directives) = match type_definition { + TypeDefinition::Object(object_type) => { + (&object_type.name, &mut object_type.directives) + } + TypeDefinition::Interface(interface_type) => { + (&interface_type.name, &mut interface_type.directives) + } + TypeDefinition::Enum(enum_type) => (&enum_type.name, &mut enum_type.directives), + TypeDefinition::Scalar(scalar_type) => { + (&scalar_type.name, &mut scalar_type.directives) + } + TypeDefinition::InputObject(input_object_type) => { + (&input_object_type.name, &mut input_object_type.directives) + } + TypeDefinition::Union(union_type) => { + (&union_type.name, &mut union_type.directives) + } + }; + + if !name.eq(SCHEMA_TYPE_NAME) + && !directives + .iter() + .any(|directive| directive.name.eq("subgraphId")) + { + directives.push(subgraph_id_directive); + } + }; + } + } + + pub fn validate(&self) -> Result<(), Vec> { + let mut errors: Vec = [ + self.validate_schema_types(), + self.validate_derived_from(), + self.validate_schema_type_has_no_fields(), + self.validate_directives_on_schema_type(), + self.validate_reserved_types_usage(), + self.validate_interface_id_type(), + ] + .into_iter() + .filter(Result::is_err) + // Safe unwrap due to the filter above + .map(Result::unwrap_err) + .collect(); + + errors.append(&mut self.validate_fields()); + errors.append(&mut self.validate_fulltext_directives()); + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } + + fn validate_schema_type_has_no_fields(&self) -> Result<(), SchemaValidationError> { + match self + .subgraph_schema_object_type() + .and_then(|subgraph_schema_type| { + if !subgraph_schema_type.fields.is_empty() { + Some(SchemaValidationError::SchemaTypeWithFields) + } else { + None + } + }) { + Some(err) => Err(err), + None => Ok(()), + } + } + + fn validate_directives_on_schema_type(&self) -> Result<(), SchemaValidationError> { + match self + .subgraph_schema_object_type() + .and_then(|subgraph_schema_type| { + if subgraph_schema_type + .directives + .iter() + .filter(|directive| !directive.name.eq("fulltext")) + .next() + .is_some() + { + Some(SchemaValidationError::InvalidSchemaTypeDirectives) + } else { + None + } + }) { + Some(err) => Err(err), + None => Ok(()), + } + } + + fn validate_fulltext_directives(&self) -> Vec { + self.subgraph_schema_object_type() + .map_or(vec![], |subgraph_schema_type| { + subgraph_schema_type + .directives + .iter() + .filter(|directives| directives.name.eq("fulltext")) + .fold(vec![], |mut errors, fulltext| { + errors.extend(self.validate_fulltext_directive_name(fulltext).into_iter()); + errors.extend( + self.validate_fulltext_directive_language(fulltext) + .into_iter(), + ); + errors.extend( + self.validate_fulltext_directive_algorithm(fulltext) + .into_iter(), + ); + errors.extend( + self.validate_fulltext_directive_includes(fulltext) + .into_iter(), + ); + errors + }) + }) + } + + fn validate_fulltext_directive_name(&self, fulltext: &Directive) -> Vec { + let name = match fulltext.argument("name") { + Some(Value::String(name)) => name, + _ => return vec![SchemaValidationError::FulltextNameUndefined], + }; + + let local_types: Vec<&ObjectType> = self + .document + .get_object_type_definitions() + .into_iter() + .collect(); + + // Validate that the fulltext field doesn't collide with any top-level Query fields + // generated for entity types. The field name conversions should always align with those used + // to create the field names in `graphql::schema::api::query_fields_for_type()`. + if local_types.iter().any(|typ| { + typ.fields.iter().any(|field| { + name == &field.name.as_str().to_camel_case() + || name == &field.name.to_plural().to_camel_case() + || field.name.eq(name) + }) + }) { + return vec![SchemaValidationError::FulltextNameCollision( + name.to_string(), + )]; + } + + // Validate that each fulltext directive has a distinct name + if self + .subgraph_schema_object_type() + .unwrap() + .directives + .iter() + .filter(|directive| directive.name.eq("fulltext")) + .filter_map(|fulltext| { + // Collect all @fulltext directives with the same name + match fulltext.argument("name") { + Some(Value::String(n)) if name.eq(n) => Some(n.as_str()), + _ => None, + } + }) + .count() + > 1 + { + vec![SchemaValidationError::FulltextNameConflict( + name.to_string(), + )] + } else { + vec![] + } + } + + fn validate_fulltext_directive_language( + &self, + fulltext: &Directive, + ) -> Vec { + let language = match fulltext.argument("language") { + Some(Value::Enum(language)) => language, + _ => return vec![SchemaValidationError::FulltextLanguageUndefined], + }; + match FulltextLanguage::try_from(language.as_str()) { + Ok(_) => vec![], + Err(_) => vec![SchemaValidationError::FulltextLanguageInvalid( + language.to_string(), + )], + } + } + + fn validate_fulltext_directive_algorithm( + &self, + fulltext: &Directive, + ) -> Vec { + let algorithm = match fulltext.argument("algorithm") { + Some(Value::Enum(algorithm)) => algorithm, + _ => return vec![SchemaValidationError::FulltextAlgorithmUndefined], + }; + match FulltextAlgorithm::try_from(algorithm.as_str()) { + Ok(_) => vec![], + Err(_) => vec![SchemaValidationError::FulltextAlgorithmInvalid( + algorithm.to_string(), + )], + } + } + + fn validate_fulltext_directive_includes( + &self, + fulltext: &Directive, + ) -> Vec { + // Only allow fulltext directive on local types + let local_types: Vec<&ObjectType> = self + .document + .get_object_type_definitions() + .into_iter() + .collect(); + + // Validate that each entity in fulltext.include exists + let includes = match fulltext.argument("include") { + Some(Value::List(includes)) if !includes.is_empty() => includes, + _ => return vec![SchemaValidationError::FulltextIncludeUndefined], + }; + + for include in includes { + match include.as_object() { + None => return vec![SchemaValidationError::FulltextIncludeObjectMissing], + Some(include_entity) => { + let (entity, fields) = + match (include_entity.get("entity"), include_entity.get("fields")) { + (Some(Value::String(entity)), Some(Value::List(fields))) => { + (entity, fields) + } + _ => return vec![SchemaValidationError::FulltextIncludeEntityMissingOrIncorrectAttributes], + }; + + // Validate the included entity type is one of the local types + let entity_type = match local_types + .iter() + .cloned() + .find(|typ| typ.name[..].eq(entity)) + { + None => return vec![SchemaValidationError::FulltextIncludedEntityNotFound], + Some(t) => t.clone(), + }; + + for field_value in fields { + let field_name = match field_value { + Value::Object(field_map) => match field_map.get("name") { + Some(Value::String(name)) => name, + _ => return vec![SchemaValidationError::FulltextIncludedFieldMissingRequiredProperty], + }, + _ => return vec![SchemaValidationError::FulltextIncludeEntityMissingOrIncorrectAttributes], + }; + + // Validate the included field is a String field on the local entity types specified + if !&entity_type + .fields + .iter() + .any(|field| { + let base_type: &str = field.field_type.get_base_type(); + matches!(ValueType::from_str(base_type), Ok(ValueType::String) if field.name.eq(field_name)) + }) + { + return vec![SchemaValidationError::FulltextIncludedFieldInvalid( + field_name.clone(), + )]; + }; + } + } + } + } + // Fulltext include validations all passed, so we return an empty vector + vec![] + } + + fn validate_fields(&self) -> Vec { + let local_types = self.document.get_object_and_interface_type_fields(); + let local_enums = self + .document + .get_enum_definitions() + .iter() + .map(|enu| enu.name.clone()) + .collect::>(); + local_types + .iter() + .fold(vec![], |errors, (type_name, fields)| { + fields.iter().fold(errors, |mut errors, field| { + let base = field.field_type.get_base_type(); + if ValueType::is_scalar(base) { + return errors; + } + if local_types.contains_key(base) { + return errors; + } + if local_enums.iter().any(|enu| enu.eq(base)) { + return errors; + } + errors.push(SchemaValidationError::FieldTypeUnknown( + type_name.to_string(), + field.name.to_string(), + base.to_string(), + )); + errors + }) + }) + } + + /// Checks if the schema is using types that are reserved + /// by `graph-node` + fn validate_reserved_types_usage(&self) -> Result<(), SchemaValidationError> { + let document = &self.document; + let object_types: Vec<_> = document + .get_object_type_definitions() + .into_iter() + .map(|obj_type| &obj_type.name) + .collect(); + + let interface_types: Vec<_> = document + .get_interface_type_definitions() + .into_iter() + .map(|iface_type| &iface_type.name) + .collect(); + + // TYPE_NAME_filter types for all object and interface types + let mut filter_types: Vec = object_types + .iter() + .chain(interface_types.iter()) + .map(|type_name| format!("{}_filter", type_name)) + .collect(); + + // TYPE_NAME_orderBy types for all object and interface types + let mut order_by_types: Vec<_> = object_types + .iter() + .chain(interface_types.iter()) + .map(|type_name| format!("{}_orderBy", type_name)) + .collect(); + + let mut reserved_types: Vec = vec![ + // The built-in scalar types + "Boolean".into(), + "ID".into(), + "Int".into(), + "BigDecimal".into(), + "String".into(), + "Bytes".into(), + "BigInt".into(), + // Reserved Query and Subscription types + "Query".into(), + "Subscription".into(), + ]; + + reserved_types.append(&mut filter_types); + reserved_types.append(&mut order_by_types); + + // `reserved_types` will now only contain + // the reserved types that the given schema *is* using. + // + // That is, if the schema is compliant and not using any reserved + // types, then it'll become an empty vector + reserved_types.retain(|reserved_type| document.get_named_type(reserved_type).is_some()); + + if reserved_types.is_empty() { + Ok(()) + } else { + Err(SchemaValidationError::UsageOfReservedTypes(Strings( + reserved_types, + ))) + } + } + + fn validate_schema_types(&self) -> Result<(), SchemaValidationError> { + let types_without_entity_directive = self + .document + .get_object_type_definitions() + .iter() + .filter(|t| t.find_directive("entity").is_none() && !t.name.eq(SCHEMA_TYPE_NAME)) + .map(|t| t.name.clone()) + .collect::>(); + if types_without_entity_directive.is_empty() { + Ok(()) + } else { + Err(SchemaValidationError::EntityDirectivesMissing(Strings( + types_without_entity_directive, + ))) + } + } + + fn validate_derived_from(&self) -> Result<(), SchemaValidationError> { + // Helper to construct a DerivedFromInvalid + fn invalid( + object_type: &ObjectType, + field_name: &str, + reason: &str, + ) -> SchemaValidationError { + SchemaValidationError::InvalidDerivedFrom( + object_type.name.clone(), + field_name.to_owned(), + reason.to_owned(), + ) + } + + let type_definitions = self.document.get_object_type_definitions(); + let object_and_interface_type_fields = self.document.get_object_and_interface_type_fields(); + + // Iterate over all derived fields in all entity types; include the + // interface types that the entity with the `@derivedFrom` implements + // and the `field` argument of @derivedFrom directive + for (object_type, interface_types, field, target_field) in type_definitions + .clone() + .iter() + .flat_map(|object_type| { + object_type + .fields + .iter() + .map(move |field| (object_type, field)) + }) + .filter_map(|(object_type, field)| { + field.find_directive("derivedFrom").map(|directive| { + ( + object_type, + object_type + .implements_interfaces + .iter() + .filter(|iface| { + // Any interface that has `field` can be used + // as the type of the field + self.document + .find_interface(iface) + .map(|iface| { + iface + .fields + .iter() + .any(|ifield| ifield.name.eq(&field.name)) + }) + .unwrap_or(false) + }) + .collect::>(), + field, + directive.argument("field"), + ) + }) + }) + { + // Turn `target_field` into the string name of the field + let target_field = target_field.ok_or_else(|| { + invalid( + object_type, + &field.name, + "the @derivedFrom directive must have a `field` argument", + ) + })?; + let target_field = match target_field { + Value::String(s) => s, + _ => { + return Err(invalid( + object_type, + &field.name, + "the @derivedFrom `field` argument must be a string", + )) + } + }; + + // Check that the type we are deriving from exists + let target_type_name = field.field_type.get_base_type(); + let target_fields = object_and_interface_type_fields + .get(target_type_name) + .ok_or_else(|| { + invalid( + object_type, + &field.name, + "type must be an existing entity or interface", + ) + })?; + + // Check that the type we are deriving from has a field with the + // right name and type + let target_field = target_fields + .iter() + .find(|field| field.name.eq(target_field)) + .ok_or_else(|| { + let msg = format!( + "field `{}` does not exist on type `{}`", + target_field, target_type_name + ); + invalid(object_type, &field.name, &msg) + })?; + + // The field we are deriving from has to point back to us; as an + // exception, we allow deriving from the `id` of another type. + // For that, we will wind up comparing the `id`s of the two types + // when we query, and just assume that that's ok. + let target_field_type = target_field.field_type.get_base_type(); + if target_field_type != object_type.name + && target_field_type != "ID" + && !interface_types + .iter() + .any(|iface| target_field_type.eq(iface.as_str())) + { + fn type_signatures(name: &str) -> Vec { + vec![ + format!("{}", name), + format!("{}!", name), + format!("[{}!]", name), + format!("[{}!]!", name), + ] + } + + let mut valid_types = type_signatures(&object_type.name); + valid_types.extend( + interface_types + .iter() + .flat_map(|iface| type_signatures(iface)), + ); + let valid_types = valid_types.join(", "); + + let msg = format!( + "field `{tf}` on type `{tt}` must have one of the following types: {valid_types}", + tf = target_field.name, + tt = target_type_name, + valid_types = valid_types, + ); + return Err(invalid(object_type, &field.name, &msg)); + } + } + Ok(()) + } + + /// Validate that `object` implements `interface`. + fn validate_interface_implementation( + object: &ObjectType, + interface: &InterfaceType, + ) -> Result<(), SchemaValidationError> { + // Check that all fields in the interface exist in the object with same name and type. + let mut missing_fields = vec![]; + for i in &interface.fields { + if !object + .fields + .iter() + .any(|o| o.name.eq(&i.name) && o.field_type.eq(&i.field_type)) + { + missing_fields.push(i.to_string().trim().to_owned()); + } + } + if !missing_fields.is_empty() { + Err(SchemaValidationError::InterfaceFieldsMissing( + object.name.clone(), + interface.name.clone(), + Strings(missing_fields), + )) + } else { + Ok(()) + } + } + + fn validate_interface_id_type(&self) -> Result<(), SchemaValidationError> { + for (intf, obj_types) in &self.types_for_interface { + let id_types: HashSet<&str> = HashSet::from_iter( + obj_types + .iter() + .filter_map(|obj_type| obj_type.field("id")) + .map(|f| f.field_type.get_base_type()) + .map(|name| if name == "ID" { "String" } else { name }), + ); + if id_types.len() > 1 { + return Err(SchemaValidationError::InterfaceImplementorsMixId( + intf.to_string(), + id_types.iter().join(", "), + )); + } + } + Ok(()) + } + + fn subgraph_schema_object_type(&self) -> Option<&ObjectType> { + self.document + .get_object_type_definitions() + .into_iter() + .find(|object_type| object_type.name.eq(SCHEMA_TYPE_NAME)) + } +} + +#[test] +fn non_existing_interface() { + let schema = "type Foo implements Bar @entity { foo: Int }"; + let res = Schema::parse(schema, DeploymentHash::new("dummy").unwrap()); + let error = res + .unwrap_err() + .downcast::() + .unwrap(); + assert_eq!( + error, + SchemaValidationError::InterfaceUndefined("Bar".to_owned()) + ); +} + +#[test] +fn invalid_interface_implementation() { + let schema = " + interface Foo { + x: Int, + y: Int + } + + type Bar implements Foo @entity { + x: Boolean + } + "; + let res = Schema::parse(schema, DeploymentHash::new("dummy").unwrap()); + assert_eq!( + res.unwrap_err().to_string(), + "Entity type `Bar` does not satisfy interface `Foo` because it is missing \ + the following fields: x: Int, y: Int", + ); +} + +#[test] +fn interface_implementations_id_type() { + fn check_schema(bar_id: &str, baz_id: &str, ok: bool) { + let schema = format!( + "interface Foo {{ x: Int }} + type Bar implements Foo @entity {{ + id: {bar_id}! + x: Int + }} + + type Baz implements Foo @entity {{ + id: {baz_id}! + x: Int + }}" + ); + let schema = Schema::parse(&schema, DeploymentHash::new("dummy").unwrap()).unwrap(); + let res = schema.validate(); + if ok { + assert!(matches!(res, Ok(_))); + } else { + assert!(matches!(res, Err(_))); + assert!(matches!( + res.unwrap_err()[0], + SchemaValidationError::InterfaceImplementorsMixId(_, _) + )); + } + } + check_schema("ID", "ID", true); + check_schema("ID", "String", true); + check_schema("ID", "Bytes", false); + check_schema("Bytes", "String", false); +} + +#[test] +fn test_derived_from_validation() { + const OTHER_TYPES: &str = " +type B @entity { id: ID! } +type C @entity { id: ID! } +type D @entity { id: ID! } +type E @entity { id: ID! } +type F @entity { id: ID! } +type G @entity { id: ID! a: BigInt } +type H @entity { id: ID! a: A! } +# This sets up a situation where we need to allow `Transaction.from` to +# point to an interface because of `Account.txn` +type Transaction @entity { from: Address! } +interface Address { txn: Transaction! @derivedFrom(field: \"from\") } +type Account implements Address @entity { id: ID!, txn: Transaction! @derivedFrom(field: \"from\") }"; + + fn validate(field: &str, errmsg: &str) { + let raw = format!("type A @entity {{ id: ID!\n {} }}\n{}", field, OTHER_TYPES); + + let document = graphql_parser::parse_schema(&raw) + .expect("Failed to parse raw schema") + .into_static(); + let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); + match schema.validate_derived_from() { + Err(ref e) => match e { + SchemaValidationError::InvalidDerivedFrom(_, _, msg) => assert_eq!(errmsg, msg), + _ => panic!("expected variant SchemaValidationError::DerivedFromInvalid"), + }, + Ok(_) => { + if errmsg != "ok" { + panic!("expected validation for `{}` to fail", field) + } + } + } + } + + validate( + "b: B @derivedFrom(field: \"a\")", + "field `a` does not exist on type `B`", + ); + validate( + "c: [C!]! @derivedFrom(field: \"a\")", + "field `a` does not exist on type `C`", + ); + validate( + "d: D @derivedFrom", + "the @derivedFrom directive must have a `field` argument", + ); + validate( + "e: E @derivedFrom(attr: \"a\")", + "the @derivedFrom directive must have a `field` argument", + ); + validate( + "f: F @derivedFrom(field: 123)", + "the @derivedFrom `field` argument must be a string", + ); + validate( + "g: G @derivedFrom(field: \"a\")", + "field `a` on type `G` must have one of the following types: A, A!, [A!], [A!]!", + ); + validate("h: H @derivedFrom(field: \"a\")", "ok"); + validate( + "i: NotAType @derivedFrom(field: \"a\")", + "type must be an existing entity or interface", + ); + validate("j: B @derivedFrom(field: \"id\")", "ok"); +} + +#[test] +fn test_reserved_type_with_fields() { + const ROOT_SCHEMA: &str = " +type _Schema_ { id: ID! }"; + + let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); + let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); + assert_eq!( + schema + .validate_schema_type_has_no_fields() + .expect_err("Expected validation to fail due to fields defined on the reserved type"), + SchemaValidationError::SchemaTypeWithFields + ) +} + +#[test] +fn test_reserved_type_directives() { + const ROOT_SCHEMA: &str = " +type _Schema_ @illegal"; + + let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); + let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); + assert_eq!( + schema.validate_directives_on_schema_type().expect_err( + "Expected validation to fail due to extra imports defined on the reserved type" + ), + SchemaValidationError::InvalidSchemaTypeDirectives + ) +} + +#[test] +fn test_enums_pass_field_validation() { + const ROOT_SCHEMA: &str = r#" +enum Color { + RED + GREEN +} + +type A @entity { + id: ID! + color: Color +}"#; + + let document = graphql_parser::parse_schema(ROOT_SCHEMA).expect("Failed to parse root schema"); + let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); + assert_eq!(schema.validate_fields().len(), 0); +} + +#[test] +fn test_reserved_types_validation() { + let reserved_types = [ + // Built-in scalars + "Boolean", + "ID", + "Int", + "BigDecimal", + "String", + "Bytes", + "BigInt", + // Reserved keywords + "Query", + "Subscription", + ]; + + let dummy_hash = DeploymentHash::new("dummy").unwrap(); + + for reserved_type in reserved_types { + let schema = format!("type {} @entity {{ _: Boolean }}\n", reserved_type); + + let schema = Schema::parse(&schema, dummy_hash.clone()).unwrap(); + + let errors = schema.validate().unwrap_err(); + for error in errors { + assert!(matches!( + error, + SchemaValidationError::UsageOfReservedTypes(_) + )) + } + } +} + +#[test] +fn test_reserved_filter_and_group_by_types_validation() { + const SCHEMA: &str = r#" + type Gravatar @entity { + _: Boolean + } + type Gravatar_filter @entity { + _: Boolean + } + type Gravatar_orderBy @entity { + _: Boolean + } + "#; + + let dummy_hash = DeploymentHash::new("dummy").unwrap(); + + let schema = Schema::parse(SCHEMA, dummy_hash).unwrap(); + + let errors = schema.validate().unwrap_err(); + + // The only problem in the schema is the usage of reserved types + assert_eq!(errors.len(), 1); + + assert!(matches!( + &errors[0], + SchemaValidationError::UsageOfReservedTypes(Strings(_)) + )); + + // We know this will match due to the assertion above + match &errors[0] { + SchemaValidationError::UsageOfReservedTypes(Strings(reserved_types)) => { + let expected_types: Vec = + vec!["Gravatar_filter".into(), "Gravatar_orderBy".into()]; + assert_eq!(reserved_types, &expected_types); + } + _ => unreachable!(), + } +} + +#[test] +fn test_fulltext_directive_validation() { + const SCHEMA: &str = r#" +type _Schema_ @fulltext( + name: "metadata" + language: en + algorithm: rank + include: [ + { + entity: "Gravatar", + fields: [ + { name: "displayName"}, + { name: "imageUrl"}, + ] + } + ] +) +type Gravatar @entity { + id: ID! + owner: Bytes! + displayName: String! + imageUrl: String! +}"#; + + let document = graphql_parser::parse_schema(SCHEMA).expect("Failed to parse schema"); + let schema = Schema::new(DeploymentHash::new("id1").unwrap(), document).unwrap(); + + assert_eq!(schema.validate_fulltext_directives(), vec![]); +} diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index dcf6cf51e1e..5e9c134db23 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -2,8 +2,9 @@ use super::cache::{QueryBlockCache, QueryCache}; use async_recursion::async_recursion; use crossbeam::atomic::AtomicCell; use graph::{ - data::{query::Trace, schema::META_FIELD_NAME, value::Object}, + data::{query::Trace, value::Object}, prelude::{s, CheapClone}, + schema::META_FIELD_NAME, util::{lfu_cache::EvictStats, timed_rw_lock::TimedMutex}, }; use lazy_static::lazy_static; diff --git a/graphql/src/introspection/resolver.rs b/graphql/src/introspection/resolver.rs index 60ebf2631e8..c1a0e0560dd 100644 --- a/graphql/src/introspection/resolver.rs +++ b/graphql/src/introspection/resolver.rs @@ -8,7 +8,7 @@ use graph::prelude::*; use crate::execution::ast as a; use crate::prelude::*; -use graph::schema::ast as sast; +use graph::schema::{ast as sast, Schema}; type TypeObjectsMap = BTreeMap; diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index a7305547db5..3eb3f4e5b98 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -750,14 +750,14 @@ mod tests { components::store::EntityType, data::value::Object, prelude::{ - r, AttributeNames, DeploymentHash, EntityCollection, EntityFilter, EntityRange, Schema, - Value, ValueType, BLOCK_NUMBER_MAX, + r, AttributeNames, DeploymentHash, EntityCollection, EntityFilter, EntityRange, Value, + ValueType, BLOCK_NUMBER_MAX, }, prelude::{ s::{self, Directive, Field, InputValue, ObjectType, Type, Value as SchemaValue}, EntityOrder, }, - schema::ApiSchema, + schema::{ApiSchema, Schema}, }; use graphql_parser::Pos; use std::{collections::BTreeMap, iter::FromIterator, sync::Arc}; diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 61e24431972..85f733d5661 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -2,16 +2,13 @@ use std::collections::BTreeMap; use std::result; use std::sync::Arc; +use graph::components::store::*; +use graph::data::graphql::{object, ObjectOrInterface}; use graph::data::query::Trace; use graph::data::value::Object; -use graph::data::{ - graphql::{object, ObjectOrInterface}, - schema::META_FIELD_TYPE, -}; use graph::prelude::*; -use graph::schema::ErrorPolicy; -use graph::schema::{ast as sast, ApiSchema}; -use graph::{components::store::*, data::schema::BLOCK_FIELD_TYPE}; +use graph::schema::{ast as sast, ApiSchema, META_FIELD_TYPE}; +use graph::schema::{ErrorPolicy, BLOCK_FIELD_TYPE}; use crate::execution::ast as a; use crate::metrics::GraphQLMetrics; diff --git a/server/index-node/src/schema.rs b/server/index-node/src/schema.rs index d2593adff45..85df359467b 100644 --- a/server/index-node/src/schema.rs +++ b/server/index-node/src/schema.rs @@ -1,4 +1,7 @@ -use graph::{prelude::*, schema::ApiSchema}; +use graph::{ + prelude::*, + schema::{ApiSchema, Schema}, +}; lazy_static! { pub static ref SCHEMA: Arc = { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index ccb5aa4b2de..b7169980ce6 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -30,7 +30,7 @@ use graph::data::query::Trace; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; -use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema}; +use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME}; use graph::slog::warn; use inflector::Inflector; use lazy_static::lazy_static; @@ -52,7 +52,6 @@ use crate::{ }; use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; -use graph::data::schema::SCHEMA_TYPE_NAME; use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::{POI_OBJECT, POI_TABLE}; use graph::prelude::{ diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index dba67b8165f..b79c17fe4e2 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -4,9 +4,9 @@ use graph::data::graphql::{object, object_value, ObjectOrInterface}; use graph::data::query::Trace; use graph::prelude::{ async_trait, o, r, s, slog, tokio, DeploymentHash, Logger, Query, QueryExecutionError, - QueryResult, Schema, + QueryResult, }; -use graph::schema::{api_schema, ApiSchema}; +use graph::schema::{api_schema, ApiSchema, Schema}; use graph_graphql::prelude::{ a, execute_query, ExecutionContext, Query as PreparedQuery, QueryExecutionOptions, Resolver, From cae4914713422a687847e6eb7d12842d88edba9d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 8 Apr 2023 12:25:13 -0700 Subject: [PATCH 0141/2104] graph, graphql, store: Build Object much earlier during query This avoids building an intermediate BTreeMap --- graph/src/components/store/traits.rs | 4 ++-- graph/src/data/value.rs | 23 ++++++++++++++++++- graphql/src/execution/execution.rs | 2 +- graphql/src/store/prefetch.rs | 28 ++++++++++-------------- store/postgres/src/query_store.rs | 6 ++--- store/postgres/src/relational_queries.rs | 12 +++++----- 6 files changed, 45 insertions(+), 30 deletions(-) diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 6d36c582353..8ca56a09c84 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -7,7 +7,7 @@ use crate::components::transaction_receipt; use crate::components::versions::ApiVersion; use crate::data::query::Trace; use crate::data::subgraph::status; -use crate::data::value::Word; +use crate::data::value::Object; use crate::data::{query::QueryTarget, subgraph::schema::*}; use crate::schema::{ApiSchema, InputSchema}; @@ -487,7 +487,7 @@ pub trait QueryStore: Send + Sync { fn find_query_values( &self, query: EntityQuery, - ) -> Result<(Vec>, Trace), QueryExecutionError>; + ) -> Result<(Vec, Trace), QueryExecutionError>; async fn is_deployment_synced(&self) -> Result; diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 7991ef3ac17..d3c0bf8bd60 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -93,6 +93,10 @@ impl Entry { pub struct Object(Box<[Entry]>); impl Object { + pub fn empty() -> Object { + Object(Box::new([])) + } + pub fn get(&self, key: &str) -> Option<&Value> { self.0 .iter() @@ -118,11 +122,28 @@ impl Object { self.0.len() } - pub fn extend(&mut self, other: Object) { + /// Add the entries from an object to `self`. Note that if `self` and + /// `object` have entries with identical keys, the entry in `self` wins. + pub fn append(&mut self, other: Object) { let mut entries = std::mem::replace(&mut self.0, Box::new([])).into_vec(); entries.extend(other.0.into_vec()); self.0 = entries.into_boxed_slice(); } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Extend<(Word, Value)> for Object { + /// Add the entries from the iterator to an object. Note that if the + /// iterator produces a key that is already set in the object, it will + /// not be overwritten, and the previous value wins. + fn extend>(&mut self, iter: T) { + let mut entries = std::mem::replace(&mut self.0, Box::new([])).into_vec(); + entries.extend(iter.into_iter().map(|(key, value)| Entry::new(key, value))); + self.0 = entries.into_boxed_slice(); + } } impl FromIterator<(String, Value)> for Object { diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 5e9c134db23..4d097b7a9dd 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -285,7 +285,7 @@ pub(crate) async fn execute_root_selection_set_uncached( if !intro_set.is_empty() { let ictx = ctx.as_introspection_context(); - values.extend( + values.append( execute_selection_set_to_map( &ictx, ctx.query.selection_set.as_ref(), diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index fcacaba8852..91f165621eb 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -40,7 +40,7 @@ struct Node { /// the keys and values of the `children` map, but not of the map itself children_weight: usize, - entity: BTreeMap, + entity: Object, /// We are using an `Rc` here for two reasons: it allows us to defer /// copying objects until the end, when converting to `q::Value` forces /// us to copy any child that is referenced by multiple parents. It also @@ -84,8 +84,8 @@ struct Node { children: BTreeMap>>, } -impl From> for Node { - fn from(entity: BTreeMap) -> Self { +impl From for Node { + fn from(entity: Object) -> Self { Node { children_weight: entity.weight(), entity, @@ -130,7 +130,7 @@ fn is_root_node<'a>(mut nodes: impl Iterator) -> bool { } fn make_root_node() -> Vec { - let entity = BTreeMap::new(); + let entity = Object::empty(); vec![Node { children_weight: entity.weight(), entity, @@ -145,13 +145,14 @@ fn make_root_node() -> Vec { impl From for r::Value { fn from(node: Node) -> Self { let mut map = node.entity; - for (key, nodes) in node.children.into_iter() { - map.insert( + let entries = node.children.into_iter().map(|(key, nodes)| { + ( format!("prefetch:{}", key).into(), node_list_as_value(nodes), - ); - } - r::Value::object(map) + ) + }); + map.extend(entries); + r::Value::Object(map) } } @@ -178,7 +179,7 @@ impl Node { } fn get(&self, key: &str) -> Option<&r::Value> { - self.entity.get(&key.into()) + self.entity.get(key) } fn typename(&self) -> &str { @@ -699,12 +700,7 @@ fn fetch( resolver .store .find_query_values(query) - .map(|(values, trace)| { - ( - values.into_iter().map(|entity| entity.into()).collect(), - trace, - ) - }) + .map(|(values, trace)| (values.into_iter().map(Node::from).collect(), trace)) } #[derive(Debug, Default, Clone)] diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index 18cb765878d..1932cd61e9e 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -1,9 +1,7 @@ -use std::collections::BTreeMap; - use crate::deployment_store::{DeploymentStore, ReplicaId}; use graph::components::store::QueryStore as QueryStoreTrait; use graph::data::query::Trace; -use graph::data::value::Word; +use graph::data::value::Object; use graph::prelude::*; use graph::schema::ApiSchema; @@ -40,7 +38,7 @@ impl QueryStoreTrait for QueryStore { fn find_query_values( &self, query: EntityQuery, - ) -> Result<(Vec>, Trace), graph::prelude::QueryExecutionError> { + ) -> Result<(Vec, Trace), graph::prelude::QueryExecutionError> { assert_eq!(&self.site.deployment, &query.subgraph_id); let conn = self .store diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 13cde73c039..61ec3fd2f8f 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -13,7 +13,7 @@ use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; use graph::components::store::{DerivedEntityQuery, EntityKey}; -use graph::data::value::Word; +use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; use graph::prelude::{ anyhow, r, serde_json, Attribute, BlockNumber, ChildMultiplicity, Entity, EntityCollection, @@ -260,17 +260,17 @@ impl FromEntityData for Entity { } } -impl FromEntityData for BTreeMap { +// TODO: This implementation is not very efficient; we will address that by +// making deserialize_with_layout return an iterator +impl FromEntityData for Object { type Value = r::Value; fn new_entity(typename: String) -> Self { - let mut map = BTreeMap::new(); - map.insert("__typename".into(), Self::Value::from_string(typename)); - map + Object::from_iter([("__typename".into(), Self::Value::from_string(typename))]) } fn insert_entity_data(&mut self, key: String, v: Self::Value) { - self.insert(Word::from(key), v); + self.extend([(Word::from(key), v)]) } } From 83329019b4d15ae6c054fbde1a59d9acb0215229 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 8 Apr 2023 15:46:30 -0700 Subject: [PATCH 0142/2104] all: Rewrite deserialize_with_layout to use an iterator We need this to lower the requirements for implementors of FromEntityData, in particular so that we do not need an impl of Default --- graph/examples/stress.rs | 13 +++-- graph/src/data/graphql/object_macro.rs | 6 +- graph/src/data/query/result.rs | 5 +- graph/src/data/store/mod.rs | 14 ++++- graph/src/data/value.rs | 12 ++-- graphql/src/execution/execution.rs | 7 ++- graphql/src/execution/query.rs | 4 +- graphql/src/store/prefetch.rs | 9 ++- graphql/src/store/query.rs | 6 +- graphql/src/store/resolver.rs | 6 +- server/http/src/request.rs | 7 ++- server/http/src/service.rs | 8 +-- server/http/tests/server.rs | 10 +--- server/index-node/src/resolver.rs | 8 +-- server/index-node/src/service.rs | 7 ++- store/postgres/src/relational_queries.rs | 73 ++++++++++++------------ 16 files changed, 110 insertions(+), 85 deletions(-) diff --git a/graph/examples/stress.rs b/graph/examples/stress.rs index 18c557f45c6..2e3d77d57a6 100644 --- a/graph/examples/stress.rs +++ b/graph/examples/stress.rs @@ -324,7 +324,10 @@ fn make_object(size: usize, mut rng: Option<&mut SmallRng>) -> Object { 7 => { let mut obj = Vec::new(); for j in 0..(i % 51) { - obj.push((format!("key{}", j), r::Value::String(format!("value{}", j)))); + obj.push(( + Word::from(format!("key{}", j)), + r::Value::String(format!("value{}", j)), + )); } r::Value::Object(Object::from_iter(obj)) } @@ -332,7 +335,7 @@ fn make_object(size: usize, mut rng: Option<&mut SmallRng>) -> Object { }; let key = rng.as_deref_mut().map(|rng| rng.gen()).unwrap_or(i) % modulus; - obj.push((format!("val{}", key), value)); + obj.push((Word::from(format!("val{}", key)), value)); } Object::from_iter(obj) } @@ -346,7 +349,7 @@ fn make_domains(size: usize, _rng: Option<&mut SmallRng>) -> Object { }; let domains: Vec<_> = (0..size).map(|_| owner.clone()).collect(); - Object::from_iter([("domains".to_string(), r::Value::List(domains))]) + Object::from_iter([("domains".into(), r::Value::List(domains))]) } /// Template for testing caching of `Object` @@ -362,7 +365,7 @@ impl Template for Object { Box::new(Object::from_iter( self.iter() .take(size) - .map(|(k, v)| (k.to_owned(), v.clone())), + .map(|(k, v)| (Word::from(k), v.clone())), )) } else { Box::new(make_object(size, rng)) @@ -385,7 +388,7 @@ impl Template for QueryResult { .unwrap() .iter() .take(size) - .map(|(k, v)| (k.to_owned(), v.clone())), + .map(|(k, v)| (Word::from(k), v.clone())), ))) } else { Box::new(QueryResult::new(make_domains(size, rng))) diff --git a/graph/src/data/graphql/object_macro.rs b/graph/src/data/graphql/object_macro.rs index 8af3bbc55ab..bbecab075ec 100644 --- a/graph/src/data/graphql/object_macro.rs +++ b/graph/src/data/graphql/object_macro.rs @@ -1,4 +1,5 @@ use crate::data::value::Object; +use crate::data::value::Word; use crate::prelude::q; use crate::prelude::r; use std::iter::FromIterator; @@ -8,7 +9,7 @@ use std::iter::FromIterator; /// consider using the `object! {}` macro instead. pub fn object_value(data: Vec<(&str, r::Value)>) -> r::Value { r::Value::Object(Object::from_iter( - data.into_iter().map(|(k, v)| (k.to_string(), v)), + data.into_iter().map(|(k, v)| (Word::from(k), v)), )) } @@ -102,10 +103,11 @@ impl_into_values![(String, String), (f64, Float), (bool, Boolean)]; macro_rules! object { ($($name:ident: $value:expr,)*) => { { + use $crate::data::value::Word; let mut result = Vec::new(); $( let value = $crate::data::graphql::object_macro::IntoValue::into_value($value); - result.push((stringify!($name).to_string(), value)); + result.push((Word::from(stringify!($name)), value)); )* $crate::prelude::r::Value::Object($crate::data::value::Object::from_iter(result)) } diff --git a/graph/src/data/query/result.rs b/graph/src/data/query/result.rs index acf5accf57a..d2d06e65679 100644 --- a/graph/src/data/query/result.rs +++ b/graph/src/data/query/result.rs @@ -371,7 +371,10 @@ fn multiple_data_items() { use serde_json::json; fn make_obj(key: &str, value: &str) -> Arc { - let obj = Object::from_iter([(key.to_owned(), r::Value::String(value.to_owned()))]); + let obj = Object::from_iter([( + crate::data::value::Word::from(key), + r::Value::String(value.to_owned()), + )]); Arc::new(obj.into()) } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index dd934bb9f46..3663db45c51 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -19,7 +19,10 @@ use std::{borrow::Cow, collections::HashMap}; use strum::AsStaticRef as _; use strum_macros::AsStaticStr; -use super::graphql::{ext::DirectiveFinder, TypeExt as _}; +use super::{ + graphql::{ext::DirectiveFinder, TypeExt as _}, + value::Word, +}; /// Custom scalars in GraphQL. pub mod scalar; @@ -838,6 +841,15 @@ impl<'a> From> for Entity { } } +impl FromIterator<(Word, Value)> for Entity { + fn from_iter>(iter: T) -> Self { + Entity(HashMap::from_iter( + iter.into_iter() + .map(|(key, value)| (String::from(key), value)), + )) + } +} + impl CacheWeight for Entity { fn indirect_weight(&self) -> usize { self.0.indirect_weight() diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index d3c0bf8bd60..321745e3d75 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -146,11 +146,11 @@ impl Extend<(Word, Value)> for Object { } } -impl FromIterator<(String, Value)> for Object { - fn from_iter>(iter: T) -> Self { +impl FromIterator<(Word, Value)> for Object { + fn from_iter>(iter: T) -> Self { let mut items: Vec<_> = Vec::new(); for (key, value) in iter { - items.push(Entry::new(key.into(), value)) + items.push(Entry::new(key, value)) } Object(items.into_boxed_slice()) } @@ -428,8 +428,10 @@ impl From for Value { Value::List(vals) } serde_json::Value::Object(map) => { - let obj = - Object::from_iter(map.into_iter().map(|(key, val)| (key, Value::from(val)))); + let obj = Object::from_iter( + map.into_iter() + .map(|(key, val)| (Word::from(key), Value::from(val))), + ); Value::Object(obj) } } diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 4d097b7a9dd..a7f49eea513 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -2,7 +2,10 @@ use super::cache::{QueryBlockCache, QueryCache}; use async_recursion::async_recursion; use crossbeam::atomic::AtomicCell; use graph::{ - data::{query::Trace, value::Object}, + data::{ + query::Trace, + value::{Object, Word}, + }, prelude::{s, CheapClone}, schema::META_FIELD_NAME, util::{lfu_cache::EvictStats, timed_rw_lock::TimedMutex}, @@ -539,7 +542,7 @@ async fn execute_selection_set_to_map<'a>( } if errors.is_empty() { - let obj = Object::from_iter(results.into_iter().map(|(k, v)| (k.to_owned(), v))); + let obj = Object::from_iter(results.into_iter().map(|(k, v)| (Word::from(k), v))); Ok(obj) } else { Err(errors) diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index a23a870ced7..6fceacaec6d 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -1,5 +1,5 @@ use graph::data::graphql::DocumentExt as _; -use graph::data::value::Object; +use graph::data::value::{Object, Word}; use graph::schema::ApiSchema; use graphql_parser::Pos; use graphql_tools::validation::rules::*; @@ -850,7 +850,7 @@ impl Transform { ) { Ok(Some(value)) => { let value = if argument_def.name == *"text" { - r::Value::Object(Object::from_iter(vec![(field_name.to_string(), value)])) + r::Value::Object(Object::from_iter(vec![(Word::from(field_name), value)])) } else { value }; diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 91f165621eb..1fe3c6cfcea 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -481,9 +481,12 @@ pub fn run( execute_root_selection_set(resolver, ctx, selection_set).map(|(nodes, trace)| { graphql_metrics.observe_query_result_size(nodes.weight()); let obj = Object::from_iter(nodes.into_iter().flat_map(|node| { - node.children - .into_iter() - .map(|(key, nodes)| (format!("prefetch:{}", key), node_list_as_value(nodes))) + node.children.into_iter().map(|(key, nodes)| { + ( + Word::from(format!("prefetch:{}", key)), + node_list_as_value(nodes), + ) + }) })); (r::Value::Object(obj), trace) }) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 3eb3f4e5b98..35e4d494ea8 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -1152,7 +1152,7 @@ mod tests { let query_field = default_field_with( "where", r::Value::Object(Object::from_iter(vec![( - "name_ends_with".to_string(), + "name_ends_with".into(), r::Value::String("ello".to_string()), )])), ); @@ -1185,9 +1185,9 @@ mod tests { let query_field = default_field_with( "where", r::Value::Object(Object::from_iter(vec![( - "_change_block".to_string(), + "_change_block".into(), r::Value::Object(Object::from_iter(vec![( - "number_gte".to_string(), + "number_gte".into(), r::Value::Int(10), )])), )])), diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 85f733d5661..c5aeffa0d04 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use graph::components::store::*; use graph::data::graphql::{object, ObjectOrInterface}; use graph::data::query::Trace; -use graph::data::value::Object; +use graph::data::value::{Object, Word}; use graph::prelude::*; use graph::schema::{ast as sast, ApiSchema, META_FIELD_TYPE}; use graph::schema::{ErrorPolicy, BLOCK_FIELD_TYPE}; @@ -377,7 +377,9 @@ impl Resolver for StoreResolver { let data = result.take_data(); let meta = data.and_then(|mut d| d.remove("_meta").map(|m| ("_meta".to_string(), m))); - result.set_data(meta.map(|m| Object::from_iter(Some(m)))); + result.set_data( + meta.map(|(key, value)| Object::from_iter(Some((Word::from(key), value)))), + ); } ErrorPolicy::Allow => (), } diff --git a/server/http/src/request.rs b/server/http/src/request.rs index 8c659a7c199..3ad188ecd6a 100644 --- a/server/http/src/request.rs +++ b/server/http/src/request.rs @@ -51,7 +51,10 @@ mod tests { use std::collections::HashMap; use graph::{ - data::{query::QueryTarget, value::Object}, + data::{ + query::QueryTarget, + value::{Object, Word}, + }, prelude::*, }; @@ -165,7 +168,7 @@ mod tests { ( String::from("map"), r::Value::Object(Object::from_iter( - vec![(String::from("k"), r::Value::String(String::from("v")))].into_iter(), + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), )), ), (String::from("int"), r::Value::Int(5)), diff --git a/server/http/src/service.rs b/server/http/src/service.rs index 5d0ef6ded14..64cc36afdcc 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -351,7 +351,7 @@ where #[cfg(test)] mod tests { - use graph::data::value::Object; + use graph::data::value::{Object, Word}; use http::status::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request}; @@ -398,11 +398,7 @@ mod tests { async fn run_query(self: Arc, _query: Query, _target: QueryTarget) -> QueryResults { QueryResults::from(Object::from_iter( - vec![( - String::from("name"), - r::Value::String(String::from("Jordi")), - )] - .into_iter(), + vec![(Word::from("name"), r::Value::String(String::from("Jordi")))].into_iter(), )) } diff --git a/server/http/tests/server.rs b/server/http/tests/server.rs index 76295e79528..e9046e20020 100644 --- a/server/http/tests/server.rs +++ b/server/http/tests/server.rs @@ -5,7 +5,7 @@ use std::time::Duration; use graph::data::{ graphql::effort::LoadManager, query::{QueryResults, QueryTarget}, - value::Object, + value::{Object, Word}, }; use graph::prelude::*; @@ -56,15 +56,11 @@ impl GraphQlRunner for TestGraphQlRunner { == &r::Value::String(String::from("John")) { Object::from_iter( - vec![(String::from("name"), r::Value::String(String::from("John")))].into_iter(), + vec![(Word::from("name"), r::Value::String(String::from("John")))].into_iter(), ) } else { Object::from_iter( - vec![( - String::from("name"), - r::Value::String(String::from("Jordi")), - )] - .into_iter(), + vec![(Word::from("name"), r::Value::String(String::from("Jordi")))].into_iter(), ) } .into() diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 498e2149532..e05e0fa8377 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -606,9 +606,9 @@ impl IndexNodeResolver { // We then bulid a GraphqQL `Object` value that contains the feature detection and // validation results and send it back as a response. let response = [ - ("features".to_string(), features), - ("errors".to_string(), errors), - ("network".to_string(), network), + ("features".into(), features), + ("errors".into(), errors), + ("network".into(), network), ]; let response = Object::from_iter(response); @@ -621,7 +621,7 @@ impl IndexNodeResolver { .iter() .map(|version| { r::Value::Object(Object::from_iter(vec![( - "version".to_string(), + "version".into(), r::Value::String(version.to_string()), )])) }) diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 3f013449fa9..a880bd6b33a 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -374,7 +374,10 @@ impl ValidatedRequest { #[cfg(test)] mod tests { - use graph::{data::value::Object, prelude::*}; + use graph::{ + data::value::{Object, Word}, + prelude::*, + }; use hyper::body::Bytes; use hyper::HeaderMap; @@ -474,7 +477,7 @@ mod tests { ( String::from("map"), r::Value::Object(Object::from_iter( - vec![(String::from("k"), r::Value::String(String::from("v")))].into_iter(), + vec![(Word::from("k"), r::Value::String(String::from("v")))].into_iter(), )), ), (String::from("int"), r::Value::Int(5)), diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 61ec3fd2f8f..5d878cee115 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -238,39 +238,31 @@ impl ForeignKeyClauses for Column { } } -pub trait FromEntityData: std::fmt::Debug + std::default::Default { +pub trait FromEntityData: Sized { type Value: FromColumnValue; - fn new_entity(typename: String) -> Self; - - fn insert_entity_data(&mut self, key: String, v: Self::Value); + fn from_data>>( + iter: I, + ) -> Result; } impl FromEntityData for Entity { type Value = graph::prelude::Value; - fn new_entity(typename: String) -> Self { - let mut entity = Entity::new(); - entity.insert("__typename".to_string(), Self::Value::String(typename)); - entity - } - - fn insert_entity_data(&mut self, key: String, v: Self::Value) { - self.insert(key, v); + fn from_data>>( + iter: I, + ) -> Result { + as FromIterator>>::from_iter(iter) } } -// TODO: This implementation is not very efficient; we will address that by -// making deserialize_with_layout return an iterator impl FromEntityData for Object { type Value = r::Value; - fn new_entity(typename: String) -> Self { - Object::from_iter([("__typename".into(), Self::Value::from_string(typename))]) - } - - fn insert_entity_data(&mut self, key: String, v: Self::Value) { - self.extend([(Word::from(key), v)]) + fn from_data>>( + iter: I, + ) -> Result { + as FromIterator>>::from_iter(iter) } } @@ -492,18 +484,20 @@ impl EntityData { parent_type: Option<&ColumnType>, remove_typename: bool, ) -> Result { - let entity_type = EntityType::new(self.entity); + let entity_type = EntityType::new(self.entity.clone()); let table = layout.table_for_entity(&entity_type)?; use serde_json::Value as j; match self.data { j::Object(map) => { - let mut out = if !remove_typename { - T::new_entity(entity_type.into_string()) - } else { - T::default() - }; - for (key, json) in map { + let typname = std::iter::once(self.entity).filter_map(move |e| { + if remove_typename { + None + } else { + Some(Ok((Word::from("__typename"), T::Value::from_string(e)))) + } + }); + let entries = map.into_iter().filter_map(move |(key, json)| { // Simply ignore keys that do not have an underlying table // column; those will be things like the block_range that // is used internally for versioning @@ -513,23 +507,26 @@ impl EntityData { // A query that does not have parents // somehow returned parent ids. We have no // idea how to deserialize that - return Err(graph::constraint_violation!( + Some(Err(graph::constraint_violation!( "query unexpectedly produces parent ids" - )); - } - Some(parent_type) => { - let value = T::Value::from_column_value(parent_type, json)?; - out.insert_entity_data("g$parent_id".to_owned(), value); + ))) } + Some(parent_type) => Some( + T::Value::from_column_value(parent_type, json) + .map(|value| (Word::from("g$parent_id"), value)), + ), } } else if let Some(column) = table.column(&SqlName::verbatim(key)) { - let value = T::Value::from_column_value(&column.column_type, json)?; - if !value.is_null() { - out.insert_entity_data(column.field.clone(), value); + match T::Value::from_column_value(&column.column_type, json) { + Ok(value) if value.is_null() => None, + Ok(value) => Some(Ok((Word::from(column.field.to_string()), value))), + Err(e) => Some(Err(e)), } + } else { + None } - } - Ok(out) + }); + T::from_data(typname.chain(entries)) } _ => unreachable!( "we use `to_json` in our queries, and will therefore always get an object back" From 624e909606afb1baffb8d5025a8b764771811d35 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 8 Apr 2023 15:58:37 -0700 Subject: [PATCH 0143/2104] graph, runtime: Remove Default implementation from Entity --- graph/src/data/store/mod.rs | 4 ++-- runtime/wasm/src/host_exports.rs | 8 ++------ runtime/wasm/src/module/mod.rs | 7 ++++++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 3663db45c51..df870381551 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -584,7 +584,7 @@ where } /// An entity is represented as a map of attribute names to values. -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub struct Entity(HashMap); impl stable_hash_legacy::StableHash for Entity { @@ -626,7 +626,7 @@ macro_rules! entity { impl Entity { /// Creates a new entity with no attributes set. pub fn new() -> Self { - Default::default() + Entity(HashMap::new()) } pub fn get(&self, key: &str) -> Option<&Value> { diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index ef2ac1c09c5..f6db2b35311 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -752,13 +752,9 @@ impl HostExports { pub(crate) fn data_source_context( &self, gas: &GasCounter, - ) -> Result { + ) -> Result, DeterministicHostError> { gas.consume_host_fn(Gas::new(gas::DEFAULT_BASE_COST))?; - Ok(self - .data_source_context - .as_ref() - .clone() - .unwrap_or_default()) + Ok(self.data_source_context.as_ref().clone()) } pub(crate) fn json_from_bytes( diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index cbcc4eea3e8..8b560d0ec98 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1774,7 +1774,12 @@ impl WasmInstanceContext { ) -> Result, HostExportError> { asc_new( self, - &self.ctx.host_exports.data_source_context(gas)?.sorted(), + &self + .ctx + .host_exports + .data_source_context(gas)? + .map(|e| e.sorted()) + .unwrap_or(vec![]), gas, ) } From f55947dec1e3abc4181375be27b754f43cb5648c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 8 Apr 2023 16:15:50 -0700 Subject: [PATCH 0144/2104] graph, store: Use Entity.remove_null_fields where possible --- graph/src/components/store/entity_cache.rs | 11 +++++------ graph/src/data/store/mod.rs | 9 +++++++++ store/test-store/tests/postgres/relational.rs | 6 ++---- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index e1340d36ec7..0f3257cbad2 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -290,12 +290,11 @@ impl EntityCache { let current = self.current.remove(&key).and_then(|entity| entity); let modification = match (current, update) { // Entity was created - (None, EntityOp::Update(updates)) | (None, EntityOp::Overwrite(updates)) => { - // Merging with an empty entity removes null fields. - let mut data = Entity::new(); - data.merge_remove_null_fields(updates); - self.current.insert(key.clone(), Some(data.clone())); - Some(Insert { key, data }) + (None, EntityOp::Update(mut updates)) + | (None, EntityOp::Overwrite(mut updates)) => { + updates.remove_null_fields(); + self.current.insert(key.clone(), Some(updates.clone())); + Some(Insert { key, data: updates }) } // Entity may have been changed (Some(current), EntityOp::Update(updates)) => { diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index df870381551..9b42805d2bf 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -425,6 +425,10 @@ impl Value { _ => false, } } + + fn is_null(&self) -> bool { + matches!(self, Value::Null) + } } impl fmt::Display for Value { @@ -695,6 +699,11 @@ impl Entity { } } + /// Remove all entries with value `Value::Null` from `self` + pub fn remove_null_fields(&mut self) { + self.0.retain(|_, value| !value.is_null()) + } + /// Validate that this entity matches the object type definition in the /// schema. An entity that passes these checks can be stored /// successfully in the subgraph's database schema diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 7c3b5fa4f1e..214232a0e47 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -432,10 +432,8 @@ fn create_schema(conn: &PgConnection) -> Layout { } fn scrub(entity: &Entity) -> Entity { - let mut scrubbed = Entity::new(); - // merge_remove_null_fields has the side-effect of removing any attribute - // that is Value::Null - scrubbed.merge_remove_null_fields(entity.clone()); + let mut scrubbed = entity.clone(); + scrubbed.remove_null_fields(); scrubbed } From f5e57d7ecef8c4e99dff5562dce5286982928d83 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 12 Apr 2023 13:31:49 -0700 Subject: [PATCH 0145/2104] all: Make InputSchema a factory for Entity For now, we use a fake `AtomPool`, but eventually, all entities will need to be created in connection to an `AtomPool` that comes from the `InputSchema`. This also changes `Entity` from `HashMap` to `HashMap` --- chain/substreams/src/trigger.rs | 14 +- graph/src/components/store/entity_cache.rs | 6 + .../subgraph/proof_of_indexing/event.rs | 3 +- .../subgraph/proof_of_indexing/mod.rs | 6 +- graph/src/data/store/mod.rs | 47 ++--- graph/src/data/value.rs | 24 +++ graph/src/schema/input_schema.rs | 12 +- graph/src/schema/mod.rs | 4 + runtime/test/src/test.rs | 15 +- runtime/wasm/src/host_exports.rs | 8 +- runtime/wasm/src/module/mod.rs | 8 +- runtime/wasm/src/to_from/external.rs | 5 +- runtime/wasm/src/to_from/mod.rs | 31 ++- store/postgres/src/fork.rs | 45 ++--- store/test-store/tests/core/interfaces.rs | 191 ++++-------------- store/test-store/tests/graph/entity_cache.rs | 96 +++------ store/test-store/tests/postgres/store.rs | 39 +--- 17 files changed, 216 insertions(+), 338 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index cbe73125bb7..74519f0a563 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -7,10 +7,10 @@ use graph::{ store::{DeploymentLocator, EntityKey, EntityType, SubgraphFork}, subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, }, - data::store::scalar::Bytes, + data::{store::scalar::Bytes, value::Word}, data_source::{self, CausalityRegion}, prelude::{ - anyhow, async_trait, BigDecimal, BigInt, BlockHash, BlockNumber, BlockState, Entity, + anyhow, async_trait, BigDecimal, BigInt, BlockHash, BlockNumber, BlockState, RuntimeHostBuilder, Value, }, slog::Logger, @@ -189,7 +189,7 @@ where entity_id: entity_id.clone().into(), causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data }; - let mut data: HashMap = HashMap::from_iter(vec![]); + let mut data: HashMap = HashMap::from_iter(vec![]); for field in entity_change.fields.iter() { let new_value: &codec::value::Typed = match &field.new_value { @@ -200,7 +200,9 @@ where }; let value: Value = decode_value(new_value)?; - *data.entry(field.name.clone()).or_insert(Value::Null) = value; + *data + .entry(Word::from(field.name.clone())) + .or_insert(Value::Null) = value; } write_poi_event( @@ -208,13 +210,15 @@ where &ProofOfIndexingEvent::SetEntity { entity_type, id: &entity_id, + // TODO: This should be an entity so we do not have to build the intermediate HashMap data: &data, }, causality_region, logger, ); - state.entity_cache.set(key, Entity::from(data))?; + let entity = state.entity_cache.make_entity(data); + state.entity_cache.set(key, entity)?; } Operation::Delete => { let entity_type: &str = &entity_change.entity; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 0f3257cbad2..a1829b35a9b 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -4,6 +4,7 @@ use std::fmt::{self, Debug}; use std::sync::Arc; use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; +use crate::data::store::IntoEntityIterator; use crate::prelude::ENV_VARS; use crate::schema::InputSchema; use crate::util::lfu_cache::LfuCache; @@ -71,6 +72,11 @@ impl EntityCache { } } + /// Make a new entity. The entity is not part of the cache + pub fn make_entity(&self, iter: I) -> Entity { + self.schema.make_entity(iter) + } + pub fn with_current( store: Arc, current: LfuCache>, diff --git a/graph/src/components/subgraph/proof_of_indexing/event.rs b/graph/src/components/subgraph/proof_of_indexing/event.rs index 06d4e9f41e8..f2ca94f1d60 100644 --- a/graph/src/components/subgraph/proof_of_indexing/event.rs +++ b/graph/src/components/subgraph/proof_of_indexing/event.rs @@ -1,3 +1,4 @@ +use crate::data::value::Word; use crate::prelude::{impl_slog_value, Value}; use stable_hash_legacy::StableHasher; use std::collections::{BTreeMap, HashMap}; @@ -13,7 +14,7 @@ pub enum ProofOfIndexingEvent<'a> { SetEntity { entity_type: &'a str, id: &'a str, - data: &'a HashMap, + data: &'a HashMap, }, /// For when a deterministic error has happened. /// diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 90562d37595..457f39c7514 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -131,12 +131,12 @@ mod tests { #[test] fn online_vs_reference() { let data = hashmap! { - "val".to_owned() => Value::Int(1) + "val".into() => Value::Int(1) }; let data_empty = hashmap! {}; let data2 = hashmap! { - "key".to_owned() => Value::String("s".to_owned()), - "null".to_owned() => Value::Null, + "key".into() => Value::String("s".to_owned()), + "null".into() => Value::Null, }; let mut cases = vec![ diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 9b42805d2bf..10af093e841 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -3,7 +3,7 @@ use crate::{ data::graphql::ObjectTypeExt, prelude::{anyhow::Context, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, - schema::InputSchema, + schema::{AtomPool, InputSchema}, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; use anyhow::{anyhow, Error}; @@ -589,7 +589,11 @@ where /// An entity is represented as a map of attribute names to values. #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] -pub struct Entity(HashMap); +pub struct Entity(HashMap); + +pub trait IntoEntityIterator: IntoIterator {} + +impl> IntoEntityIterator for T {} impl stable_hash_legacy::StableHash for Entity { #[inline] @@ -628,29 +632,33 @@ macro_rules! entity { } impl Entity { + pub fn make(_pool: AtomPool, iter: I) -> Entity { + Entity(HashMap::from_iter(iter)) + } + /// Creates a new entity with no attributes set. pub fn new() -> Self { Entity(HashMap::new()) } pub fn get(&self, key: &str) -> Option<&Value> { - self.0.get(key) + self.0.get(&Word::from(key)) } pub fn insert(&mut self, key: String, value: Value) -> Option { - self.0.insert(key, value) + self.0.insert(Word::from(key), value) } pub fn remove(&mut self, key: &str) -> Option { - self.0.remove(key) + self.0.remove(&Word::from(key)) } pub fn contains_key(&self, key: &str) -> bool { - self.0.contains_key(key) + self.0.contains_key(&Word::from(key)) } // This collects the entity into an ordered vector so that it can be iterated deterministically. - pub fn sorted(self) -> Vec<(String, Value)> { + pub fn sorted(self) -> Vec<(Word, Value)> { let mut v: Vec<_> = self.0.into_iter().collect(); v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); v @@ -671,7 +679,7 @@ impl Entity { /// Convenience method to save having to `.into()` the arguments. pub fn set(&mut self, name: impl Into, value: impl Into) -> Option { - self.0.insert(name.into(), value.into()) + self.0.insert(Word::from(name.into()), value.into()) } /// Merges an entity update `update` into this entity. @@ -681,7 +689,7 @@ impl Entity { /// If a key is set to `Value::Null` in `update`, the key/value pair is set to `Value::Null`. pub fn merge(&mut self, update: Entity) { for (key, value) in update.0.into_iter() { - self.insert(key, value); + self.insert(key.to_string(), value); } } @@ -694,7 +702,7 @@ impl Entity { for (key, value) in update.0.into_iter() { match value { Value::Null => self.remove(&key), - _ => self.insert(key, value), + _ => self.insert(key.to_string(), value), }; } } @@ -830,32 +838,15 @@ impl Entity { } } -impl From> for Entity { - fn from(m: HashMap) -> Entity { - Entity(m) - } -} - impl<'a> From<&'a Entity> for Cow<'a, Entity> { fn from(entity: &'a Entity) -> Self { Cow::Borrowed(entity) } } -impl<'a> From> for Entity { - fn from(entries: Vec<(&'a str, Value)>) -> Entity { - Entity::from(HashMap::from_iter( - entries.into_iter().map(|(k, v)| (String::from(k), v)), - )) - } -} - impl FromIterator<(Word, Value)> for Entity { fn from_iter>(iter: T) -> Self { - Entity(HashMap::from_iter( - iter.into_iter() - .map(|(key, value)| (String::from(key), value)), - )) + Entity(HashMap::from_iter(iter)) } } diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 321745e3d75..7847992594d 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -1,4 +1,5 @@ use crate::prelude::{q, s, CacheWeight}; +use crate::runtime::gas::{Gas, GasSizeOf, SaturatingInto}; use serde::ser::{SerializeMap, SerializeSeq, Serializer}; use serde::Serialize; use std::collections::BTreeMap; @@ -67,6 +68,29 @@ impl<'de> serde::Deserialize<'de> for Word { } } +impl stable_hash_legacy::StableHash for Word { + #[inline] + fn stable_hash( + &self, + sequence_number: H::Seq, + state: &mut H, + ) { + self.as_str().stable_hash(sequence_number, state) + } +} + +impl stable_hash::StableHash for Word { + fn stable_hash(&self, field_address: H::Addr, state: &mut H) { + self.as_str().stable_hash(field_address, state) + } +} + +impl GasSizeOf for Word { + fn gas_size_of(&self) -> Gas { + self.0.len().saturating_into() + } +} + #[derive(Clone, Debug, PartialEq)] struct Entry { key: Option, diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 1fa1147e6d5..e01d1b4a451 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -2,22 +2,24 @@ use std::collections::{BTreeMap, HashSet}; use std::str::FromStr; use anyhow::{anyhow, Error}; +use store::Entity; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::store::{self, scalar}; +use crate::data::store::{self, scalar, IntoEntityIterator}; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, Schema, SchemaValidationError}; +use super::{ApiSchema, AtomPool, Schema, SchemaValidationError}; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { schema: Schema, immutable_types: HashSet, + pool: AtomPool, } impl InputSchema { @@ -30,9 +32,11 @@ impl InputSchema { .filter(|obj_type| obj_type.is_immutable()) .map(Into::into), ); + let pool = AtomPool; Self { schema, immutable_types, + pool, } } pub fn new(id: DeploymentHash, document: s::Document) -> Result { @@ -242,4 +246,8 @@ impl InputSchema { pub(crate) fn validate(&self) -> Result<(), Vec> { self.schema.validate() } + + pub fn make_entity(&self, iter: I) -> Entity { + Entity::make(self.pool.clone(), iter) + } } diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 5d4a3a0789a..1d5e393fb0b 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -37,6 +37,10 @@ pub use api::{ApiSchema, ErrorPolicy}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; +/// Placeholder type until we are ready to use a real intern::AtomPool +#[derive(Clone, Debug, PartialEq)] +pub struct AtomPool; + pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; pub const META_FIELD_TYPE: &str = "_Meta_"; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 90c17daabec..3f3c8f5f3e9 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1,5 +1,6 @@ use graph::data::store::scalar; use graph::data::subgraph::*; +use graph::data::value::Word; use graph::prelude::web3::types::U256; use graph::prelude::*; use graph::runtime::{AscIndexId, AscType}; @@ -923,6 +924,8 @@ async fn test_entity_store(api_version: Version) { ) .await; + let schema = store.input_schema(&deployment.hash).unwrap(); + let mut alex = Entity::new(); alex.set("id", "alex"); alex.set("name", "Alex"); @@ -942,11 +945,13 @@ async fn test_entity_store(api_version: Version) { if entity_ptr.is_null() { None } else { - Some(Entity::from( - module - .asc_get::, _>(entity_ptr) - .unwrap(), - )) + Some( + schema.make_entity( + module + .asc_get::, _>(entity_ptr) + .unwrap(), + ), + ) } }; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index f6db2b35311..c578c4b94c6 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -3,6 +3,7 @@ use std::ops::Deref; use std::str::FromStr; use std::time::{Duration, Instant}; +use graph::data::value::Word; use never::Never; use semver::Version; use wasmtime::Trap; @@ -155,7 +156,7 @@ impl HostExports { proof_of_indexing: &SharedProofOfIndexing, entity_type: String, entity_id: String, - data: HashMap, + data: HashMap, stopwatch: &StopwatchMetrics, gas: &GasCounter, ) -> Result<(), HostExportError> { @@ -181,7 +182,10 @@ impl HostExports { gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; - let entity = Entity::from(data); + let entity = state + .entity_cache + .make_entity(data.into_iter().map(|(key, value)| (key, value))); + state.entity_cache.set(key, entity)?; Ok(()) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 8b560d0ec98..73096d1f293 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -10,6 +10,7 @@ use std::time::Instant; use anyhow::anyhow; use anyhow::Error; use graph::components::store::GetScope; +use graph::data::value::Word; use graph::slog::SendSyncRefUnwindSafeKV; use never::Never; use semver::Version; @@ -1117,7 +1118,7 @@ impl WasmInstanceContext { gas, )?; - let entities: Vec> = + let entities: Vec> = entities.into_iter().map(|entity| entity.sorted()).collect(); let ret = asc_new(self, &entities, gas)?; Ok(ret) @@ -1736,12 +1737,15 @@ impl WasmInstanceContext { let name: String = asc_get(self, name_ptr, gas)?; let params: Vec = asc_get(self, params_ptr, gas)?; let context: HashMap<_, _> = asc_get(self, context_ptr, gas)?; + + let context = self.ctx.state.entity_cache.make_entity(context); + self.ctx.host_exports.data_source_create( &self.ctx.logger, &mut self.ctx.state, name, params, - Some(context.into()), + Some(context), self.ctx.block_ptr.number, gas, ) diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index fd8a2e2ad16..a2ec718ecf4 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -1,5 +1,6 @@ use ethabi; +use graph::data::value::Word; use graph::prelude::{BigDecimal, BigInt}; use graph::runtime::gas::GasCounter; use graph::runtime::{ @@ -321,7 +322,7 @@ impl ToAscObj for serde_json::Map { } // Used for serializing entities. -impl ToAscObj for Vec<(String, store::Value)> { +impl ToAscObj for Vec<(Word, store::Value)> { fn to_asc_obj( &self, heap: &mut H, @@ -333,7 +334,7 @@ impl ToAscObj for Vec<(String, store::Value)> { } } -impl ToAscObj>> for Vec> { +impl ToAscObj>> for Vec> { fn to_asc_obj( &self, heap: &mut H, diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 31713f282ea..21e79ca3242 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -3,9 +3,12 @@ use std::collections::HashMap; use std::hash::Hash; use std::iter::FromIterator; -use graph::runtime::{ - asc_get, asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, AscValue, - DeterministicHostError, FromAscObj, HostExportError, ToAscObj, +use graph::{ + data::value::Word, + runtime::{ + asc_get, asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, AscValue, + DeterministicHostError, FromAscObj, HostExportError, ToAscObj, + }, }; use crate::asc_abi::class::*; @@ -70,6 +73,16 @@ impl ToAscObj for String { } } +impl ToAscObj for Word { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + self.as_str().to_asc_obj(heap, gas) + } +} + impl FromAscObj for String { fn from_asc_obj( asc_string: AscString, @@ -87,6 +100,18 @@ impl FromAscObj for String { } } +impl FromAscObj for Word { + fn from_asc_obj( + asc_string: AscString, + heap: &H, + gas: &GasCounter, + ) -> Result { + let string = String::from_asc_obj(asc_string, heap, gas)?; + + Ok(Word::from(string)) + } +} + impl> ToAscObj>> for [T] { fn to_asc_obj( &self, diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 3cae590bf2f..9f03e5e2792 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -4,7 +4,6 @@ use std::{ sync::{Arc, Mutex}, }; -use graph::schema::InputSchema; use graph::{ block_on, components::store::{EntityType, SubgraphFork as SubgraphForkTrait}, @@ -14,11 +13,11 @@ use graph::{ r::Value as RValue, reqwest, s::{Field, ObjectType}, - serde_json, Attribute, DeploymentHash, Entity, Logger, Serialize, StoreError, Value, - ValueType, + serde_json, DeploymentHash, Entity, Logger, Serialize, StoreError, Value, ValueType, }, url::Url, }; +use graph::{data::value::Word, schema::InputSchema}; use inflector::Inflector; #[derive(Serialize, Debug, PartialEq)] @@ -83,7 +82,7 @@ impl SubgraphForkTrait for SubgraphFork { ))); } - let entity = SubgraphFork::extract_entity(&raw_json, &entity_type, fields)?; + let entity = SubgraphFork::extract_entity(&self.schema, &raw_json, &entity_type, fields)?; Ok(entity) } } @@ -172,6 +171,7 @@ query Query ($id: String) {{ } fn extract_entity( + schema: &InputSchema, raw_json: &str, entity_type: &str, fields: &[Field], @@ -183,7 +183,7 @@ query Query ($id: String) {{ return Ok(None); } - let map: HashMap = { + let map: HashMap = { let mut map = HashMap::new(); for f in fields { if f.is_derived() { @@ -215,18 +215,18 @@ query Query ($id: String) {{ e )) })?; - map.insert(f.name.clone(), value); + map.insert(Word::from(f.name.clone()), value); } map }; - Ok(Some(Entity::from(map))) + Ok(Some(schema.make_entity(map))) } } #[cfg(test)] mod tests { - use std::{iter::FromIterator, str::FromStr}; + use std::str::FromStr; use super::*; @@ -348,7 +348,9 @@ mod tests { #[test] fn test_extract_entity() { + let schema = test_schema(); let entity = SubgraphFork::extract_entity( + &schema, r#"{ "data": { "gravatar": { @@ -366,21 +368,18 @@ mod tests { assert_eq!( entity.unwrap(), - Entity::from(HashMap::from_iter( - vec![ - ("id".to_string(), Value::String("0x00".to_string())), - ( - "owner".to_string(), - Value::Bytes(scalar::Bytes::from_str("0x01").unwrap()) - ), - ("displayName".to_string(), Value::String("test".to_string())), - ( - "imageUrl".to_string(), - Value::String("http://example.com/image.png".to_string()) - ), - ] - .into_iter() - )) + schema.make_entity(vec![ + ("id".into(), Value::String("0x00".to_string())), + ( + "owner".into(), + Value::Bytes(scalar::Bytes::from_str("0x01").unwrap()) + ), + ("displayName".into(), Value::String("test".to_string())), + ( + "imageUrl".into(), + Value::String("http://example.com/image.png".to_string()) + ), + ]) ); } } diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index 2c5916ea853..33a77faa967 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -66,10 +66,7 @@ async fn one_interface_one_entity() { let schema = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int }"; - let entity = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("legs", Value::from(3))]), - ); + let entity = ("Animal", entity! { id: "1", legs: 3 }); // Collection query. let query = "query { leggeds(first: 100) { legs } }"; @@ -96,10 +93,7 @@ async fn one_interface_one_entity_typename() { let schema = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int }"; - let entity = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("legs", Value::from(3))]), - ); + let entity = ("Animal", entity! { id: "1", legs: 3 }); let query = "query { leggeds(first: 100) { __typename } }"; @@ -119,14 +113,8 @@ async fn one_interface_multiple_entities() { type Furniture implements Legged @entity { id: ID!, legs: Int } "; - let animal = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("legs", Value::from(3))]), - ); - let furniture = ( - "Furniture", - Entity::from(vec![("id", Value::from("2")), ("legs", Value::from(4))]), - ); + let animal = ("Animal", entity! { id: "1", legs: 3 }); + let furniture = ("Furniture", entity! { id: "2", legs: 4 }); let query = "query { leggeds(first: 100, orderBy: legs) { legs } }"; @@ -156,11 +144,8 @@ async fn reference_interface() { let query = "query { leggeds(first: 100) { leg { id } } }"; - let leg = ("Leg", Entity::from(vec![("id", Value::from("1"))])); - let animal = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("leg", Value::from("1"))]), - ); + let leg = ("Leg", entity! { id: "1" }); + let animal = ("Animal", entity! { id: "1", leg: 1 }); let res = insert_and_query(subgraph_id, schema, vec![leg, animal], query) .await @@ -209,20 +194,13 @@ async fn reference_interface_derived() { let query = "query { events { id transaction { id } } }"; - let buy = ("BuyEvent", Entity::from(vec![("id", "buy".into())])); - let sell1 = ("SellEvent", Entity::from(vec![("id", "sell1".into())])); - let sell2 = ("SellEvent", Entity::from(vec![("id", "sell2".into())])); - let gift = ( - "GiftEvent", - Entity::from(vec![("id", "gift".into()), ("transaction", "txn".into())]), - ); + let buy = ("BuyEvent", entity! { id: "buy" }); + let sell1 = ("SellEvent", entity! { id: "sell1" }); + let sell2 = ("SellEvent", entity! { id: "sell2" }); + let gift = ("GiftEvent", entity! { id: "gift", transaction: "txn" }); let txn = ( "Transaction", - Entity::from(vec![ - ("id", "txn".into()), - ("buyEvent", "buy".into()), - ("sellEvents", vec!["sell1", "sell2"].into()), - ]), + entity! {id: "txn", buyEvent: "buy", sellEvents: vec!["sell1", "sell2"] }, ); let entities = vec![buy, sell1, sell2, gift, txn]; @@ -289,20 +267,9 @@ async fn follow_interface_reference() { let parent = ( "Animal", - Entity::from(vec![ - ("id", Value::from("parent")), - ("legs", Value::from(4)), - ("parent", Value::Null), - ]), - ); - let child = ( - "Animal", - Entity::from(vec![ - ("id", Value::from("child")), - ("legs", Value::from(3)), - ("parent", Value::String("parent".into())), - ]), + entity! { id: "parent", legs: 4, parent: Value::Null }, ); + let child = ("Animal", entity! { id: "child", legs: 3, parent: "parent" }); let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) .await @@ -323,14 +290,8 @@ async fn conflicting_implementors_id() { type Furniture implements Legged @entity { id: ID!, legs: Int } "; - let animal = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("legs", Value::from(3))]), - ); - let furniture = ( - "Furniture", - Entity::from(vec![("id", Value::from("1")), ("legs", Value::from(3))]), - ); + let animal = ("Animal", entity! { id: "1", legs: 3 }); + let furniture = ("Furniture", entity! { id: "1", legs: 3 }); let query = "query { leggeds(first: 100) { legs } }"; @@ -357,11 +318,8 @@ async fn derived_interface_relationship() { type Forest @entity { id: ID!, dwellers: [ForestDweller]! @derivedFrom(field: \"forest\") } "; - let forest = ("Forest", Entity::from(vec![("id", Value::from("1"))])); - let animal = ( - "Animal", - Entity::from(vec![("id", Value::from("1")), ("forest", Value::from("1"))]), - ); + let forest = ("Forest", entity! { id: "1" }); + let animal = ("Animal", entity! { id: "1", forest: "1" }); let query = "query { forests(first: 100) { dwellers(first: 100) { id } } }"; @@ -387,22 +345,9 @@ async fn two_interfaces() { type AB implements IFoo & IBar @entity { id: ID!, foo: String!, bar: Int! } "; - let a = ( - "A", - Entity::from(vec![("id", Value::from("1")), ("foo", Value::from("bla"))]), - ); - let b = ( - "B", - Entity::from(vec![("id", Value::from("1")), ("bar", Value::from(100))]), - ); - let ab = ( - "AB", - Entity::from(vec![ - ("id", Value::from("2")), - ("foo", Value::from("ble")), - ("bar", Value::from(200)), - ]), - ); + let a = ("A", entity! { id: "1", foo: "bla" }); + let b = ("B", entity! { id: "1", bar: 100 }); + let ab = ("AB", entity! { id: "2", foo: "ble", bar: 200 }); let query = "query { ibars(first: 100, orderBy: bar) { bar } @@ -425,14 +370,7 @@ async fn interface_non_inline_fragment() { let schema = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, name: String, legs: Int }"; - let entity = ( - "Animal", - Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("cow")), - ("legs", Value::from(3)), - ]), - ); + let entity = ("Animal", entity! { id: "1", name: "cow", legs: 3 }); // Query only the fragment. let query = "query { leggeds { ...frag } } fragment frag on Animal { name }"; @@ -460,22 +398,8 @@ async fn interface_inline_fragment() { type Animal implements Legged @entity { id: ID!, name: String, legs: Int } type Bird implements Legged @entity { id: ID!, airspeed: Int, legs: Int }"; - let animal = ( - "Animal", - Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("cow")), - ("legs", Value::from(4)), - ]), - ); - let bird = ( - "Bird", - Entity::from(vec![ - ("id", Value::from("2")), - ("airspeed", Value::from(24)), - ("legs", Value::from(2)), - ]), - ); + let animal = ("Animal", entity! { id: "1", name: "cow", legs: 4 }); + let bird = ("Bird", entity! { id: "2", airspeed: 24, legs: 2 }); let query = "query { leggeds(orderBy: legs) { ... on Animal { name } ...on Bird { airspeed } } }"; @@ -509,32 +433,16 @@ async fn interface_inline_fragment_with_subquery() { } "; - let mama_cow = ( - "Parent", - Entity::from(vec![("id", Value::from("mama_cow"))]), - ); + let mama_cow = ("Parent", entity! { id: "mama_cow" }); let cow = ( "Animal", - Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("cow")), - ("legs", Value::from(4)), - ("parent", Value::from("mama_cow")), - ]), + entity! { id: "1", name: "cow", legs: 4, parent: "mama_cow" }, ); - let mama_bird = ( - "Parent", - Entity::from(vec![("id", Value::from("mama_bird"))]), - ); + let mama_bird = ("Parent", entity! { id: "mama_bird" }); let bird = ( "Bird", - Entity::from(vec![ - ("id", Value::from("2")), - ("airspeed", Value::from(5)), - ("legs", Value::from(2)), - ("parent", Value::from("mama_bird")), - ]), + entity! { id: "2", airspeed: 5, legs: 2, parent: "mama_bird" }, ); let query = "query { leggeds(orderBy: legs) { legs ... on Bird { airspeed parent { id } } } }"; @@ -610,20 +518,9 @@ async fn alias() { let parent = ( "Animal", - Entity::from(vec![ - ("id", Value::from("parent")), - ("legs", Value::from(4)), - ("parent", Value::Null), - ]), - ); - let child = ( - "Animal", - Entity::from(vec![ - ("id", Value::from("child")), - ("legs", Value::from(3)), - ("parent", Value::String("parent".into())), - ]), + entity! { id: "parent", legs: 4, parent: Value::Null }, ); + let child = ("Animal", entity! { id: "child", legs: 3, parent: "parent" }); let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) .await @@ -1244,19 +1141,11 @@ async fn enums() { let entities = vec![ ( "Trajectory", - Entity::from(vec![ - ("id", Value::from("1")), - ("direction", Value::from("EAST")), - ("meters", Value::from(10)), - ]), + entity! { id: "1", direction: "EAST", meters: 10 }, ), ( "Trajectory", - Entity::from(vec![ - ("id", Value::from("2")), - ("direction", Value::from("NORTH")), - ("meters", Value::from(15)), - ]), + entity! { id: "2", direction: "NORTH", meters: 15 }, ), ]; let query = "query { trajectories { id, direction, meters } }"; @@ -1304,27 +1193,15 @@ async fn enum_list_filters() { let entities = vec![ ( "Trajectory", - Entity::from(vec![ - ("id", Value::from("1")), - ("direction", Value::from("EAST")), - ("meters", Value::from(10)), - ]), + entity! { id: "1", direction: "EAST", meters: 10 }, ), ( "Trajectory", - Entity::from(vec![ - ("id", Value::from("2")), - ("direction", Value::from("NORTH")), - ("meters", Value::from(15)), - ]), + entity! { id: "2", direction: "NORTH", meters: 15 }, ), ( "Trajectory", - Entity::from(vec![ - ("id", Value::from("3")), - ("direction", Value::from("WEST")), - ("meters", Value::from(20)), - ]), + entity! { id: "3", direction: "WEST", meters: 20 }, ), ]; diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 414ab08678c..4ff62561dc2 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -171,15 +171,12 @@ impl WritableStore for MockStore { } } -fn make_band(id: &'static str, data: Vec<(&str, Value)>) -> (EntityKey, Entity) { - ( - EntityKey { - entity_type: EntityType::new("Band".to_string()), - entity_id: id.into(), - causality_region: CausalityRegion::ONCHAIN, - }, - Entity::from(data), - ) +fn make_band_key(id: &'static str) -> EntityKey { + EntityKey { + entity_type: EntityType::new("Band".to_string()), + entity_id: id.into(), + causality_region: CausalityRegion::ONCHAIN, + } } fn sort_by_entity_key(mut mods: Vec) -> Vec { @@ -204,16 +201,12 @@ fn insert_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let (mogwai_key, mogwai_data) = make_band( - "mogwai", - vec![("id", "mogwai".into()), ("name", "Mogwai".into())], - ); + let mogwai_data = entity! { id: "mogwai", name: "Mogwai" }; + let mogwai_key = make_band_key("mogwai"); cache.set(mogwai_key.clone(), mogwai_data.clone()).unwrap(); - let (sigurros_key, sigurros_data) = make_band( - "sigurros", - vec![("id", "sigurros".into()), ("name", "Sigur Ros".into())], - ); + let sigurros_data = entity! { id: "sigurros", name: "Sigur Ros" }; + let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone()) .unwrap(); @@ -253,16 +246,8 @@ fn overwrite_modifications() { // every set operation as an overwrite. let store = { let entities = vec![ - make_band( - "mogwai", - vec![("id", "mogwai".into()), ("name", "Mogwai".into())], - ) - .1, - make_band( - "sigurros", - vec![("id", "sigurros".into()), ("name", "Sigur Ros".into())], - ) - .1, + entity! { id: "mogwai", name: "Mogwai" }, + entity! { id: "sigurros", name: "Sigur Ros" }, ]; MockStore::new(entity_version_map("Band", entities)) }; @@ -270,24 +255,12 @@ fn overwrite_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let (mogwai_key, mogwai_data) = make_band( - "mogwai", - vec![ - ("id", "mogwai".into()), - ("name", "Mogwai".into()), - ("founded", 1995.into()), - ], - ); + let mogwai_data = entity! { id: "mogwai", name: "Mogwai", founded: 1995 }; + let mogwai_key = make_band_key("mogwai"); cache.set(mogwai_key.clone(), mogwai_data.clone()).unwrap(); - let (sigurros_key, sigurros_data) = make_band( - "sigurros", - vec![ - ("id", "sigurros".into()), - ("name", "Sigur Ros".into()), - ("founded", 1994.into()), - ], - ); + let sigurros_data = entity! { id: "sigurros", name: "Sigur Ros", founded: 1994 }; + let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone()) .unwrap(); @@ -313,17 +286,8 @@ fn consecutive_modifications() { // Pre-populate the store with data so that we can test setting a field to // `Value::Null`. let store = { - let entities = vec![ - make_band( - "mogwai", - vec![ - ("id", "mogwai".into()), - ("name", "Mogwai".into()), - ("label", "Chemikal Underground".into()), - ], - ) - .1, - ]; + let entities = + vec![entity! { id: "mogwai", name: "Mogwai", label: "Chemikal Underground" }]; MockStore::new(entity_version_map("Band", entities)) }; @@ -332,21 +296,13 @@ fn consecutive_modifications() { let mut cache = EntityCache::new(store); // First, add "founded" and change the "label". - let (update_key, update_data) = make_band( - "mogwai", - vec![ - ("id", "mogwai".into()), - ("founded", 1995.into()), - ("label", "Rock Action Records".into()), - ], - ); + let update_data = entity! { id: "mogwai", founded: 1995, label: "Rock Action Records" }; + let update_key = make_band_key("mogwai"); cache.set(update_key, update_data).unwrap(); // Then, just reset the "label". - let (update_key, update_data) = make_band( - "mogwai", - vec![("id", "mogwai".into()), ("label", Value::Null)], - ); + let update_data = entity! { id: "mogwai", label: Value::Null }; + let update_key = make_band_key("mogwai"); cache.set(update_key.clone(), update_data).unwrap(); // We expect a single overwrite modification for the above that leaves "id" @@ -356,12 +312,8 @@ fn consecutive_modifications() { sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![EntityModification::Overwrite { key: update_key, - data: Entity::from(vec![ - ("id", "mogwai".into()), - ("name", "Mogwai".into()), - ("founded", 1995.into()), - ]), - },]) + data: entity! { id: "mogwai", name: "Mogwai", founded: 1995 } + }]) ); } diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 7fbacb9c102..3ea62a54dae 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -475,11 +475,7 @@ fn partially_update_existing() { run_test(|store, writable, deployment| async move { let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); - let partial_entity = Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("Johnny Boy")), - ("email", Value::Null), - ]); + let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; let original_entity = writable .get(&entity_key) @@ -1088,11 +1084,7 @@ fn revert_block_with_partial_update() { run_test(|store, writable, deployment| async move { let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); - let partial_entity = Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("Johnny Boy")), - ("email", Value::Null), - ]); + let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; let original_entity = writable.get(&entity_key).unwrap().expect("missing entity"); @@ -1185,11 +1177,7 @@ fn revert_block_with_dynamic_data_source_operations() { // Create operations to add a user let user_key = EntityKey::data(USER.to_owned(), "1".to_owned()); - let partial_entity = Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("Johnny Boy")), - ("email", Value::Null), - ]); + let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; // Get the original user for comparisons let original_user = writable.get(&user_key).unwrap().expect("missing entity"); @@ -1305,20 +1293,8 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { // Add two entities to the store let added_entities = vec![ - ( - "1".to_owned(), - Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("Johnny Boy")), - ]), - ), - ( - "2".to_owned(), - Entity::from(vec![ - ("id", Value::from("2")), - ("name", Value::from("Tessa")), - ]), - ), + ("1".to_owned(), entity! { id: "1", name: "Johnny Boy" }), + ("2".to_owned(), entity! { id: "2", name: "Tessa" }), ]; transact_entity_operations( &store.subgraph_store(), @@ -1336,10 +1312,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { .unwrap(); // Update an entity in the store - let updated_entity = Entity::from(vec![ - ("id", Value::from("1")), - ("name", Value::from("Johnny")), - ]); + let updated_entity = entity! { id: "1", name: "Johnny" }; let update_op = EntityOperation::Set { key: EntityKey::data(USER.to_owned(), "1".to_owned()), data: updated_entity.clone(), From 1d2f31a9db0afb782e6d1a5ab6e149cdc3cc31a2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 13:06:12 -0700 Subject: [PATCH 0146/2104] graph: Make InputSchema cheap to clone --- graph/src/schema/input_schema.rs | 33 +++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index e01d1b4a451..76f9a5a4228 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -1,9 +1,11 @@ use std::collections::{BTreeMap, HashSet}; use std::str::FromStr; +use std::sync::Arc; use anyhow::{anyhow, Error}; use store::Entity; +use crate::cheap_clone::CheapClone; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; @@ -17,11 +19,32 @@ use super::{ApiSchema, AtomPool, Schema, SchemaValidationError}; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { + inner: Arc, +} + +#[derive(Debug, PartialEq)] +pub struct Inner { schema: Schema, immutable_types: HashSet, pool: AtomPool, } +impl std::ops::Deref for InputSchema { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl CheapClone for InputSchema { + fn cheap_clone(&self) -> Self { + InputSchema { + inner: self.inner.cheap_clone(), + } + } +} + impl InputSchema { fn create(schema: Schema) -> Self { let immutable_types = HashSet::from_iter( @@ -34,9 +57,11 @@ impl InputSchema { ); let pool = AtomPool; Self { - schema, - immutable_types, - pool, + inner: Arc::new(Inner { + schema, + immutable_types, + pool, + }), } } pub fn new(id: DeploymentHash, document: s::Document) -> Result { @@ -49,7 +74,9 @@ impl InputSchema { Ok(Self::create(schema)) } +} +impl Inner { pub fn api_schema(&self) -> Result { let mut schema = self.schema.clone(); schema.document = api_schema(&self.schema.document)?; From e1d902ca0c28d1504b03f41af029da08535706ec Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 13:11:14 -0700 Subject: [PATCH 0147/2104] store: Keep the input schema in the Layout --- store/postgres/src/relational.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b7169980ce6..ee87e0e0b00 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -249,6 +249,8 @@ pub struct Layout { pub count_query: String, /// How many blocks of history the subgraph should keep pub history_blocks: BlockNumber, + + pub input_schema: InputSchema, } impl Layout { @@ -380,6 +382,7 @@ impl Layout { enums, count_query, history_blocks: i32::MAX, + input_schema: schema.cheap_clone(), }) } From cc6fbde48f0055bd7dc16ff9f0950cd56ebde845 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 13:12:40 -0700 Subject: [PATCH 0148/2104] graph, store: Use the input schema to create entities from the store --- graph/src/data/store/mod.rs | 15 +++++++++------ graph/src/schema/input_schema.rs | 6 +++++- store/postgres/src/relational_queries.rs | 9 ++++++--- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 10af093e841..eec2eaf6e81 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -595,6 +595,10 @@ pub trait IntoEntityIterator: IntoIterator {} impl> IntoEntityIterator for T {} +pub trait TryIntoEntityIterator: IntoIterator> {} + +impl>> TryIntoEntityIterator for T {} + impl stable_hash_legacy::StableHash for Entity { #[inline] fn stable_hash( @@ -636,6 +640,11 @@ impl Entity { Entity(HashMap::from_iter(iter)) } + pub fn try_make>(_pool: AtomPool, iter: I) -> Result { + let map: HashMap<_, _> = iter.into_iter().collect::>()?; + Ok(Entity(map)) + } + /// Creates a new entity with no attributes set. pub fn new() -> Self { Entity(HashMap::new()) @@ -844,12 +853,6 @@ impl<'a> From<&'a Entity> for Cow<'a, Entity> { } } -impl FromIterator<(Word, Value)> for Entity { - fn from_iter>(iter: T) -> Self { - Entity(HashMap::from_iter(iter)) - } -} - impl CacheWeight for Entity { fn indirect_weight(&self) -> usize { self.0.indirect_weight() diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 76f9a5a4228..93b2d323bd6 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -9,7 +9,7 @@ use crate::cheap_clone::CheapClone; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::store::{self, scalar, IntoEntityIterator}; +use crate::data::store::{self, scalar, IntoEntityIterator, TryIntoEntityIterator}; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; @@ -277,4 +277,8 @@ impl Inner { pub fn make_entity(&self, iter: I) -> Entity { Entity::make(self.pool.clone(), iter) } + + pub fn try_make_entity>(&self, iter: I) -> Result { + Entity::try_make(self.pool.clone(), iter) + } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 5d878cee115..2ae198d87bd 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -20,7 +20,7 @@ use graph::prelude::{ EntityFilter, EntityLink, EntityOrder, EntityOrderByChild, EntityOrderByChildInfo, EntityRange, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, ENV_VARS, }; -use graph::schema::FulltextAlgorithm; +use graph::schema::{FulltextAlgorithm, InputSchema}; use graph::{ components::store::{AttributeNames, EntityType}, data::store::scalar, @@ -242,6 +242,7 @@ pub trait FromEntityData: Sized { type Value: FromColumnValue; fn from_data>>( + schema: &InputSchema, iter: I, ) -> Result; } @@ -250,9 +251,10 @@ impl FromEntityData for Entity { type Value = graph::prelude::Value; fn from_data>>( + schema: &InputSchema, iter: I, ) -> Result { - as FromIterator>>::from_iter(iter) + schema.try_make_entity(iter) } } @@ -260,6 +262,7 @@ impl FromEntityData for Object { type Value = r::Value; fn from_data>>( + _schema: &InputSchema, iter: I, ) -> Result { as FromIterator>>::from_iter(iter) @@ -526,7 +529,7 @@ impl EntityData { None } }); - T::from_data(typname.chain(entries)) + T::from_data(&layout.input_schema, typname.chain(entries)) } _ => unreachable!( "we use `to_json` in our queries, and will therefore always get an object back" From 5b7090cb018b81f9e62af7895a2db5743706e164 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 14:29:22 -0700 Subject: [PATCH 0149/2104] all: Remove Entity::new --- core/src/subgraph/runner.rs | 1 + graph/src/components/store/entity_cache.rs | 2 +- graph/src/data/store/mod.rs | 106 +++++++++++++++--- graph/src/data_source/tests.rs | 4 +- graph/src/schema/ast.rs | 9 +- graph/src/schema/mod.rs | 4 + runtime/test/src/test.rs | 13 +-- .../tests/chain/ethereum/manifest.rs | 9 +- store/test-store/tests/graph/entity_cache.rs | 20 +--- store/test-store/tests/graphql/query.rs | 63 ++++++----- store/test-store/tests/postgres/graft.rs | 30 ++--- store/test-store/tests/postgres/relational.rs | 31 ++++- .../tests/postgres/relational_bytes.rs | 36 ++---- store/test-store/tests/postgres/store.rs | 96 ++++++---------- 14 files changed, 227 insertions(+), 197 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index e0bc04d1b91..671bc0b8877 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -1043,6 +1043,7 @@ async fn update_proof_of_indexing( // Put this onto an entity with the same digest attribute // that was expected before when reading. let new_poi_entity = entity! { + entity_cache.schema => id: entity_key.entity_id.to_string(), digest: updated_proof_of_indexing, }; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index a1829b35a9b..f2d82292aed 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -43,7 +43,7 @@ pub struct EntityCache { /// The store is only used to read entities. pub store: Arc, - schema: Arc, + pub schema: Arc, } impl Debug for EntityCache { diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index eec2eaf6e81..6945e4de147 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -619,20 +619,107 @@ impl StableHash for Entity { } } +/// The `entity!` macro is a convenient way to create entities. It comes in +/// two forms, one where a schema is provided and one where it is not. The +/// schema-less form can only be used in tests, since it creates an +/// `AtomPool` just for this entity behind the scenes. +/// +/// Production code should always use the form with the schema +/// ``` +/// use graph::entity; +/// use graph::schema::InputSchema; +/// use graph::data::subgraph::DeploymentHash; +/// +/// let id = DeploymentHash::new("Qm123").unwrap(); +/// let schema = InputSchema::parse("type User @entity { id: String!, name: String! }", id).unwrap(); +/// +/// let entity = entity! { schema => id: "1", name: "John Doe" }; +/// ``` +/// +/// Test code which often doesn't have access to an `InputSchema` can use +/// the form without the schema +/// ``` +/// use graph::entity; +/// let entity = entity! { id: "1", name: "John Doe" }; +/// ``` +/// +/// In the test form, it is also possible to provide additional names after +/// a `;` that should be put into the `AtomPool` so that they can be set +/// later in the test +/// ``` +/// use graph::entity; +/// let entity = entity! { id: "1", name: "John Doe"; phone, email }; +/// ``` +#[cfg(debug_assertions)] #[macro_export] macro_rules! entity { + () => { + { + let pairs = Vec::new(); + let pool = $crate::schema::AtomPool; + Entity::make(pool, pairs) + } + }; ($($name:ident: $value:expr,)*) => { { - let mut result = $crate::data::store::Entity::new(); + let mut pairs = Vec::new(); + let mut pool = $crate::schema::AtomPool; $( - result.set(stringify!($name), $crate::data::store::Value::from($value)); + pool.intern(stringify!($name)); + pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); )* - result + $crate::data::store::Entity::make(pool, pairs) } }; ($($name:ident: $value:expr),*) => { entity! {$($name: $value,)*} }; + ($($name:ident: $value:expr,)*; $($extra:ident,)*) => { + { + let mut pairs = Vec::new(); + let mut pool = $crate::schema::AtomPool; + $( + pool.intern(stringify!($name)); + pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); + )* + $( + pool.intern(stringify!($extra)); + )* + $crate::data::store::Entity::make(pool, pairs) + } + }; + ($($name:ident: $value:expr),*; $($extra:ident),*) => { + entity! {$($name: $value,)*; $($extra,)*} + }; + ($schema:expr => $($name:ident: $value:expr,)*) => { + { + let mut result = Vec::new(); + $( + result.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); + )* + $schema.make_entity(result) + } + }; + ($schema:expr => $($name:ident: $value:expr),*) => { + entity! {$schema => $($name: $value,)*} + }; +} + +#[cfg(not(debug_assertions))] +#[macro_export] +macro_rules! entity { + ($schema:expr => $($name:ident: $value:expr,)*) => { + { + let mut pairs = Vec::new(); + $( + pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); + )* + $schema.make_entity(pairs) + } + }; + ($schema:expr => $($name:ident: $value:expr),*) => { + entity! {$schema => $($name: $value,)*} + }; } impl Entity { @@ -645,11 +732,6 @@ impl Entity { Ok(Entity(map)) } - /// Creates a new entity with no attributes set. - pub fn new() -> Self { - Entity(HashMap::new()) - } - pub fn get(&self, key: &str) -> Option<&Value> { self.0.get(&Word::from(key)) } @@ -900,13 +982,7 @@ fn value_bigint() { #[test] fn entity_validation() { fn make_thing(name: &str) -> Entity { - let mut thing = Entity::new(); - thing.set("id", name); - thing.set("name", name); - thing.set("stuff", "less"); - thing.set("favorite_color", "red"); - thing.set("things", Value::List(vec![])); - thing + entity! { id: name, name: name, stuff: "less", favorite_color: "red", things: Value::List(vec![]); cruft } } fn check(thing: Entity, errmsg: &str) { diff --git a/graph/src/data_source/tests.rs b/graph/src/data_source/tests.rs index 30421fca84f..6f0ac625dba 100644 --- a/graph/src/data_source/tests.rs +++ b/graph/src/data_source/tests.rs @@ -3,6 +3,7 @@ use cid::Cid; use crate::{ blockchain::mock::{MockBlockchain, MockDataSource}, components::subgraph::Entity, + entity, ipfs_client::CidFile, prelude::Link, }; @@ -39,7 +40,8 @@ fn offchain_duplicate() { assert!(!a.is_duplicate_of(&c)); let mut c = a.clone(); - c.context = Arc::new(Some(Entity::new())); + let entity = entity! {}; + c.context = Arc::new(Some(entity)); assert!(!a.is_duplicate_of(&c)); } diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 80f5433ef28..4d5635bece7 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -407,17 +407,12 @@ pub fn is_list(field_type: &s::Type) -> bool { fn entity_validation() { use crate::components::store::EntityKey; use crate::data::store; + use crate::entity; use crate::prelude::{DeploymentHash, Entity}; use crate::schema::InputSchema; fn make_thing(name: &str) -> Entity { - let mut thing = Entity::new(); - thing.set("id", name); - thing.set("name", name); - thing.set("stuff", "less"); - thing.set("favorite_color", "red"); - thing.set("things", store::Value::List(vec![])); - thing + entity! { id: name, name: name, stuff: "less", favorite_color: "red", things: store::Value::List(vec![]); cruft} } fn check(thing: Entity, errmsg: &str) { diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 1d5e393fb0b..4a669a9ac89 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -41,6 +41,10 @@ pub use input_schema::InputSchema; #[derive(Clone, Debug, PartialEq)] pub struct AtomPool; +impl AtomPool { + pub fn intern(&mut self, _s: &str) {} +} + pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; pub const META_FIELD_TYPE: &str = "_Meta_"; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 3f3c8f5f3e9..3f428669d90 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -419,10 +419,7 @@ async fn test_ipfs_block() { const USER_DATA: &str = "user_data"; fn make_thing(id: &str, value: &str) -> (String, EntityModification) { - let mut data = Entity::new(); - data.set("id", id); - data.set("value", value); - data.set("extra", USER_DATA); + let data = entity! { id: id, value: value, extra: USER_DATA }; let key = EntityKey::data("Thing".to_string(), id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), @@ -926,12 +923,8 @@ async fn test_entity_store(api_version: Version) { let schema = store.input_schema(&deployment.hash).unwrap(); - let mut alex = Entity::new(); - alex.set("id", "alex"); - alex.set("name", "Alex"); - let mut steve = Entity::new(); - steve.set("id", "steve"); - steve.set("name", "Steve"); + let alex = entity! { id: "alex", name: "Alex" }; + let steve = entity! { id: "steve", name: "Steve" }; let user_type = EntityType::from("User"); test_store::insert_entities( &deployment, diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index f2278311cf9..cc87a237195 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -5,8 +5,9 @@ use std::time::Duration; use graph::data::subgraph::schema::SubgraphError; use graph::data::subgraph::{SPEC_VERSION_0_0_4, SPEC_VERSION_0_0_7}; use graph::data_source::DataSourceTemplate; +use graph::entity; use graph::prelude::{ - anyhow, async_trait, serde_yaml, tokio, DeploymentHash, Entity, Link, Logger, SubgraphManifest, + anyhow, async_trait, serde_yaml, tokio, DeploymentHash, Link, Logger, SubgraphManifest, SubgraphManifestValidationError, UnvalidatedSubgraphManifest, }; use graph::{ @@ -201,8 +202,7 @@ specVersion: 0.0.2 let deployment = test_store::create_test_subgraph(&subgraph, GQL_SCHEMA).await; // Adds an example entity. - let mut thing = Entity::new(); - thing.set("id", "datthing"); + let thing = entity! { id: "datthing" }; test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) .await .unwrap(); @@ -294,8 +294,7 @@ specVersion: 0.0.2 msg ); - let mut thing = Entity::new(); - thing.set("id", "datthing"); + let thing = entity! { id: "datthing" }; test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) .await .unwrap(); diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 4ff62561dc2..4859c953658 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -246,8 +246,8 @@ fn overwrite_modifications() { // every set operation as an overwrite. let store = { let entities = vec![ - entity! { id: "mogwai", name: "Mogwai" }, - entity! { id: "sigurros", name: "Sigur Ros" }, + entity! { id: "mogwai", name: "Mogwai"; founded }, + entity! { id: "sigurros", name: "Sigur Ros"; founded }, ]; MockStore::new(entity_version_map("Band", entities)) }; @@ -287,7 +287,7 @@ fn consecutive_modifications() { // `Value::Null`. let store = { let entities = - vec![entity! { id: "mogwai", name: "Mogwai", label: "Chemikal Underground" }]; + vec![entity! { id: "mogwai", name: "Mogwai", label: "Chemikal Underground"; founded }]; MockStore::new(entity_version_map("Band", entities)) }; @@ -455,12 +455,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator } fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityOperation { - let mut test_entity = Entity::new(); - - test_entity.insert("id".to_owned(), Value::String(id.to_owned())); - test_entity.insert("name".to_owned(), Value::String(name.to_owned())); - test_entity.insert("email".to_owned(), Value::String(email.to_owned())); - test_entity.insert("age".to_owned(), Value::Int(age)); + let test_entity = entity! { id: id, name: name, email: email, age: age }; EntityOperation::Set { key: EntityKey::data(ACCOUNT.to_owned(), id.to_owned()), @@ -469,12 +464,7 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO } fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { - let mut test_wallet = Entity::new(); - - test_wallet.insert("id".to_owned(), Value::String(id.to_owned())); - test_wallet.insert("account".to_owned(), Value::String(account_id.to_owned())); - test_wallet.insert("balance".to_owned(), Value::Int(balance)); - test_wallet + entity! { id: id, account: account_id, balance: balance } } fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { let test_wallet = create_wallet_entity(id, account_id, balance); diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 8cc8787c20d..6be99be3152 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -313,41 +313,42 @@ async fn insert_test_entities( let s = id_type.songs(); let md = id_type.medias(); + let is = &manifest.schema; let entities0 = vec![ - entity! { __typename: "Musician", id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"] }, - entity! { __typename: "Musician", id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"] }, - entity! { __typename: "Publisher", id: "0xb1" }, - entity! { __typename: "Band", id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, - entity! { __typename: "Band", id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, - entity! { __typename: "Song", id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, - entity! { __typename: "Song", id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, - entity! { __typename: "Song", id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, - entity! { __typename: "Song", id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, - entity! { __typename: "SongStat", id: s[1], played: 10 }, - entity! { __typename: "SongStat", id: s[2], played: 15 }, - entity! { __typename: "BandReview", id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, - entity! { __typename: "BandReview", id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, - entity! { __typename: "BandReview", id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, - entity! { __typename: "SongReview", id: "r3", body: "Bad", song: s[2], author: "u1" }, - entity! { __typename: "SongReview", id: "r4", body: "Good", song: s[3], author: "u2" }, - entity! { __typename: "SongReview", id: "r6", body: "Very Bad", song: s[2], author: "u3" }, - entity! { __typename: "User", id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, - entity! { __typename: "User", id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, - entity! { __typename: "AnonymousUser", id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, - entity! { __typename: "Photo", id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, - entity! { __typename: "Video", id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, - entity! { __typename: "Photo", id: md[3], title: "Rock Tune Single Cover", author: "u1" }, - entity! { __typename: "Video", id: md[4], title: "Rock Tune Music Video", author: "u2" }, - entity! { __typename: "Photo", id: md[5], title: "Pop Tune Single Cover", author: "u1" }, - entity! { __typename: "Video", id: md[6], title: "Folk Tune Music Video", author: "u2" }, - entity! { __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, - entity! { __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, - entity! { __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, + entity! { is => __typename: "Musician", id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"] }, + entity! { is => __typename: "Musician", id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"] }, + entity! { is => __typename: "Publisher", id: "0xb1" }, + entity! { is => __typename: "Band", id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, + entity! { is => __typename: "Band", id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, + entity! { is => __typename: "Song", id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, + entity! { is => __typename: "Song", id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, + entity! { is => __typename: "Song", id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, + entity! { is => __typename: "Song", id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, + entity! { is => __typename: "SongStat", id: s[1], played: 10 }, + entity! { is => __typename: "SongStat", id: s[2], played: 15 }, + entity! { is => __typename: "BandReview", id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, + entity! { is => __typename: "BandReview", id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, + entity! { is => __typename: "BandReview", id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, + entity! { is => __typename: "SongReview", id: "r3", body: "Bad", song: s[2], author: "u1" }, + entity! { is => __typename: "SongReview", id: "r4", body: "Good", song: s[3], author: "u2" }, + entity! { is => __typename: "SongReview", id: "r6", body: "Very Bad", song: s[2], author: "u3" }, + entity! { is => __typename: "User", id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, + entity! { is => __typename: "User", id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, + entity! { is => __typename: "AnonymousUser", id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, + entity! { is => __typename: "Photo", id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, + entity! { is => __typename: "Video", id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, + entity! { is => __typename: "Photo", id: md[3], title: "Rock Tune Single Cover", author: "u1" }, + entity! { is => __typename: "Video", id: md[4], title: "Rock Tune Music Video", author: "u2" }, + entity! { is => __typename: "Photo", id: md[5], title: "Pop Tune Single Cover", author: "u1" }, + entity! { is => __typename: "Video", id: md[6], title: "Folk Tune Music Video", author: "u2" }, + entity! { is => __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, + entity! { is => __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, + entity! { is => __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, ]; let entities1 = vec![ - entity! { __typename: "Musician", id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, - entity! { __typename: "Musician", id: "m4", name: "Valerie", bands: Vec::::new() }, + entity! { is => __typename: "Musician", id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, + entity! { is => __typename: "Musician", id: "m4", name: "Valerie", bands: Vec::::new() }, ]; async fn insert_at(entities: Vec, deployment: &DeploymentLocator, block_ptr: BlockPtr) { diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 190406e63f7..8f6a9f7deff 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -241,26 +241,18 @@ fn create_test_entity( coffee: bool, favorite_color: Option<&str>, ) -> EntityOperation { - let mut test_entity = Entity::new(); - - test_entity.insert("id".to_owned(), Value::String(id.to_owned())); - test_entity.insert("name".to_owned(), Value::String(name.to_owned())); let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); - test_entity.insert("bin_name".to_owned(), Value::Bytes(bin_name)); - test_entity.insert("email".to_owned(), Value::String(email.to_owned())); - test_entity.insert("age".to_owned(), Value::Int(age)); - test_entity.insert( - "seconds_age".to_owned(), - Value::BigInt(BigInt::from(age) * 31557600.into()), - ); - test_entity.insert("weight".to_owned(), Value::BigDecimal(weight.into())); - test_entity.insert("coffee".to_owned(), Value::Bool(coffee)); - test_entity.insert( - "favorite_color".to_owned(), - favorite_color - .map(|s| Value::String(s.to_owned())) - .unwrap_or(Value::Null), - ); + let test_entity = entity! { + id: id, + name: name, + bin_name: bin_name, + email: email, + age: age, + seconds_age: age * 31557600, + weight: Value::BigDecimal(weight.into()), + coffee: coffee, + favorite_color: favorite_color + }; EntityOperation::Set { key: EntityKey::data(entity_type.to_string(), id), diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 214232a0e47..cf551d72c5f 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -298,12 +298,23 @@ fn insert_user_entity( drinks: Option>, block: BlockNumber, ) { - let user = make_user(id, name, email, age, weight, coffee, favorite_color, drinks); + let user = make_user( + &layout.input_schema, + id, + name, + email, + age, + weight, + coffee, + favorite_color, + drinks, + ); insert_entity_at(conn, layout, entity_type, vec![user], block); } fn make_user( + schema: &InputSchema, id: &str, name: &str, email: &str, @@ -317,7 +328,7 @@ fn make_user( .map(|s| Value::String(s.to_owned())) .unwrap_or(Value::Null); let bin_name = Bytes::from_str(&hex::encode(name)).unwrap(); - let mut user = entity! { + let mut user = entity! { schema => id: id, name: name, bin_name: bin_name, @@ -393,7 +404,17 @@ fn update_user_entity( drinks: Option>, block: BlockNumber, ) { - let user = make_user(id, name, email, age, weight, coffee, favorite_color, drinks); + let user = make_user( + &layout.input_schema, + id, + name, + email, + age, + weight, + coffee, + favorite_color, + drinks, + ); update_entity_at(conn, layout, entity_type, vec![user], block); } @@ -845,9 +866,7 @@ fn conflicting_entity() { let dog = EntityType::from(dog); let ferret = EntityType::from(ferret); - let mut fred = Entity::new(); - fred.set("id", id.clone()); - fred.set("name", Value::String(id.to_string())); + let fred = entity! { id: id.clone(), name: id.clone() }; insert_entity(conn, layout, cat.as_str(), vec![fred]); // If we wanted to create Fred the dog, which is forbidden, we'd run this: diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index e495daea0ac..b840345be2c 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -4,6 +4,7 @@ use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data_source::CausalityRegion; +use graph::entity; use graph::prelude::{EntityQuery, MetricsRegistry}; use graph::schema::InputSchema; use hex_literal::hex; @@ -15,7 +16,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::prelude::{ o, slog, web3::types::H256, AttributeNames, ChildMultiplicity, DeploymentHash, Entity, - EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, StopwatchMetrics, Value, + EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, StopwatchMetrics, WindowAttribute, BLOCK_NUMBER_MAX, }; use graph::{ @@ -39,21 +40,6 @@ const THINGS_GQL: &str = " } "; -macro_rules! entity { - ($($name:ident: $value:expr,)*) => { - { - let mut result = ::graph::prelude::Entity::new(); - $( - result.insert(stringify!($name).to_string(), Value::from($value)); - )* - result - } - }; - ($($name:ident: $value:expr),*) => { - entity! {$($name: $value,)*} - }; -} - lazy_static! { static ref THINGS_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("things").unwrap(); static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17); @@ -111,7 +97,7 @@ fn insert_thing(conn: &PgConnection, layout: &Layout, id: &str, name: &str) { conn, layout, "Thing", - entity! { + entity! { layout.input_schema => id: id, name: name }, @@ -134,10 +120,8 @@ fn create_schema(conn: &PgConnection) -> Layout { } fn scrub(entity: &Entity) -> Entity { - let mut scrubbed = Entity::new(); - // merge has the sideffect of removing any attribute - // that is Value::Null - scrubbed.merge(entity.clone()); + let mut scrubbed = entity.clone(); + scrubbed.remove_null_fields(); scrubbed } @@ -383,29 +367,29 @@ const GRANDCHILD2: &str = "0xfafa02"; /// +- grandchild2 /// fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Entity) { - let root = entity! { + let root = entity! { layout.input_schema => id: ROOT, name: "root", children: vec!["babe01", "babe02"] }; - let child1 = entity! { + let child1 = entity! { layout.input_schema => id: CHILD1, name: "child1", parent: "dead00", children: vec![GRANDCHILD1] }; - let child2 = entity! { + let child2 = entity! { layout.input_schema => id: CHILD2, name: "child2", parent: "dead00", children: vec![GRANDCHILD1] }; - let grand_child1 = entity! { + let grand_child1 = entity! { layout.input_schema => id: GRANDCHILD1, name: "grandchild1", parent: CHILD1 }; - let grand_child2 = entity! { + let grand_child2 = entity! { layout.input_schema => id: GRANDCHILD2, name: "grandchild2", parent: CHILD2 diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 3ea62a54dae..9d3aa66be20 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -266,26 +266,18 @@ fn create_test_entity( coffee: bool, favorite_color: Option<&str>, ) -> EntityOperation { - let mut test_entity = Entity::new(); - - test_entity.insert("id".to_owned(), Value::String(id.to_owned())); - test_entity.insert("name".to_owned(), Value::String(name.to_owned())); let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); - test_entity.insert("bin_name".to_owned(), Value::Bytes(bin_name)); - test_entity.insert("email".to_owned(), Value::String(email.to_owned())); - test_entity.insert("age".to_owned(), Value::Int(age)); - test_entity.insert( - "seconds_age".to_owned(), - Value::BigInt(BigInt::from(age) * 31557600.into()), - ); - test_entity.insert("weight".to_owned(), Value::BigDecimal(weight.into())); - test_entity.insert("coffee".to_owned(), Value::Bool(coffee)); - test_entity.insert( - "favorite_color".to_owned(), - favorite_color - .map(|s| Value::String(s.to_owned())) - .unwrap_or(Value::Null), - ); + let test_entity = entity! { + id: id, + name: name, + bin_name: bin_name, + email: email, + age: age, + seconds_age: Value::BigInt(BigInt::from(age) * 31557600.into()), + weight: Value::BigDecimal(weight.into()), + coffee: coffee, + favorite_color: favorite_color, + }; EntityOperation::Set { key: EntityKey::data(entity_type.to_owned(), id.to_owned()), @@ -341,22 +333,17 @@ fn get_entity_1() { let key = EntityKey::data(USER.to_owned(), "1".to_owned()); let result = writable.get(&key).unwrap(); - let mut expected_entity = Entity::new(); - - expected_entity.insert("id".to_owned(), "1".into()); - expected_entity.insert("name".to_owned(), "Johnton".into()); - expected_entity.insert( - "bin_name".to_owned(), - Value::Bytes("Johnton".as_bytes().into()), - ); - expected_entity.insert("email".to_owned(), "tonofjohn@email.com".into()); - expected_entity.insert("age".to_owned(), Value::Int(67_i32)); - expected_entity.insert( - "seconds_age".to_owned(), - Value::BigInt(BigInt::from(2114359200)), - ); - expected_entity.insert("weight".to_owned(), Value::BigDecimal(184.4.into())); - expected_entity.insert("coffee".to_owned(), Value::Bool(false)); + let bin_name = Value::Bytes("Johnton".as_bytes().into()); + let expected_entity = entity! { + id: "1", + name: "Johnton", + bin_name: bin_name, + email: "tonofjohn@email.com", + age: 67_i32, + seconds_age: Value::BigInt(BigInt::from(2114359200)), + weight: Value::BigDecimal(184.4.into()), + coffee: false, + }; // "favorite_color" was set to `Null` earlier and should be absent // Check that the expected entity was returned @@ -371,22 +358,16 @@ fn get_entity_3() { let key = EntityKey::data(USER.to_owned(), "3".to_owned()); let result = writable.get(&key).unwrap(); - let mut expected_entity = Entity::new(); - - expected_entity.insert("id".to_owned(), "3".into()); - expected_entity.insert("name".to_owned(), "Shaqueeena".into()); - expected_entity.insert( - "bin_name".to_owned(), - Value::Bytes("Shaqueeena".as_bytes().into()), - ); - expected_entity.insert("email".to_owned(), "teeko@email.com".into()); - expected_entity.insert("age".to_owned(), Value::Int(28_i32)); - expected_entity.insert( - "seconds_age".to_owned(), - Value::BigInt(BigInt::from(883612800)), - ); - expected_entity.insert("weight".to_owned(), Value::BigDecimal(111.7.into())); - expected_entity.insert("coffee".to_owned(), Value::Bool(false)); + let expected_entity = entity! { + id: "3", + name: "Shaqueeena", + bin_name: Value::Bytes("Shaqueeena".as_bytes().into()), + email: "teeko@email.com", + age: 28_i32, + seconds_age: Value::BigInt(BigInt::from(883612800)), + weight: Value::BigDecimal(111.7.into()), + coffee: false, + }; // "favorite_color" was set to `Null` earlier and should be absent // Check that the expected entity was returned @@ -1503,9 +1484,7 @@ fn handle_large_string_with_index() { const TWO: &str = "large_string_two"; fn make_insert_op(id: &str, name: &str) -> EntityModification { - let mut data = Entity::new(); - data.set("id", id); - data.set(NAME, name); + let data = entity! { id: id, name: name }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); @@ -1594,9 +1573,7 @@ fn handle_large_bytea_with_index() { const TWO: &str = "large_string_two"; fn make_insert_op(id: &str, name: &[u8]) -> EntityModification { - let mut data = Entity::new(); - data.set("id", id); - data.set(NAME, scalar::Bytes::from(name)); + let data = entity! { id: id, bin_name: scalar::Bytes::from(name) }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); @@ -1794,11 +1771,8 @@ impl WindowQuery { #[test] fn window() { fn make_color_end_age(entity_type: &str, id: &str, color: &str, age: i32) -> EntityOperation { - let mut entity = Entity::new(); + let entity = entity! { id: id, age: age, favorite_color: color }; - entity.set("id", id.to_owned()); - entity.set("age", age); - entity.set("favorite_color", color); EntityOperation::Set { key: EntityKey::data(entity_type.to_owned(), id.to_owned()), data: entity, From 268efe3b70b2128e483dcafd86168caf164ecfca Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 12 Apr 2023 14:32:13 -0700 Subject: [PATCH 0150/2104] graph: Remove unused TryFromValue for Entity --- graph/src/data/graphql/values.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/graph/src/data/graphql/values.rs b/graph/src/data/graphql/values.rs index 7db68d7a484..7f15d26dc98 100644 --- a/graph/src/data/graphql/values.rs +++ b/graph/src/data/graphql/values.rs @@ -5,7 +5,7 @@ use std::str::FromStr; use crate::blockchain::BlockHash; use crate::data::value::Object; -use crate::prelude::{r, BigInt, Entity}; +use crate::prelude::{r, BigInt}; use web3::types::H160; pub trait TryFromValue: Sized { @@ -127,19 +127,6 @@ where } } -/// Assumes the entity is stored as a JSON string. -impl TryFromValue for Entity { - fn try_from_value(value: &r::Value) -> Result { - match value { - r::Value::String(s) => serde_json::from_str(s).map_err(Into::into), - _ => Err(anyhow!( - "Cannot parse entity, value is not a string: {:?}", - value - )), - } - } -} - pub trait ValueMap { fn get_required(&self, key: &str) -> Result; fn get_optional(&self, key: &str) -> Result, Error>; From 1e90d4268cb94eca6696f1064b67eb390b17cfda Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 12 Apr 2023 14:48:04 -0700 Subject: [PATCH 0151/2104] graph, runtime: Make DataSourceContext distinct from Entity That allows us to remove `Deserialize` from `Entity` --- graph/src/data/store/mod.rs | 2 +- graph/src/data/subgraph/mod.rs | 34 +++++++++++++++++++++++++++----- graph/src/data_source/tests.rs | 5 +---- runtime/wasm/src/host_exports.rs | 2 +- runtime/wasm/src/module/mod.rs | 3 +-- 5 files changed, 33 insertions(+), 13 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 6945e4de147..ee85050b72f 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -588,7 +588,7 @@ where } /// An entity is represented as a map of attribute names to values. -#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct Entity(HashMap); pub trait IntoEntityIterator: IntoIterator {} diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index a8b18fe67ef..f6e7512fae2 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -18,7 +18,10 @@ use serde_yaml; use slog::Logger; use stable_hash::{FieldAddress, StableHash}; use stable_hash_legacy::SequenceNumber; -use std::{collections::BTreeSet, marker::PhantomData}; +use std::{ + collections::{BTreeSet, HashMap}, + marker::PhantomData, +}; use thiserror::Error; use wasmparser; use web3::types::Address; @@ -31,7 +34,7 @@ use crate::{ store::{StoreError, SubgraphStore}, }, data::{ - graphql::TryFromValue, query::QueryExecutionError, store::Entity, + graphql::TryFromValue, query::QueryExecutionError, subgraph::features::validate_subgraph_features, }, data_source::{ @@ -39,7 +42,7 @@ use crate::{ UnresolvedDataSourceTemplate, }, ensure, - prelude::{r, CheapClone, ENV_VARS}, + prelude::{r, CheapClone, Value, ENV_VARS}, schema::{InputSchema, SchemaValidationError}, }; @@ -50,6 +53,8 @@ use std::ops::Deref; use std::str::FromStr; use std::sync::Arc; +use super::value::Word; + /// Deserialize an Address (with or without '0x' prefix). fn deserialize_address<'de, D>(deserializer: D) -> Result, D::Error> where @@ -354,8 +359,27 @@ pub enum SubgraphManifestResolveError { ResolveError(#[from] anyhow::Error), } -/// Data source contexts are conveniently represented as entities. -pub type DataSourceContext = Entity; +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct DataSourceContext(HashMap); + +impl DataSourceContext { + pub fn new() -> Self { + Self(HashMap::new()) + } + + // This collects the entries into an ordered vector so that it can be iterated deterministically. + pub fn sorted(self) -> Vec<(Word, Value)> { + let mut v: Vec<_> = self.0.into_iter().collect(); + v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + v + } +} + +impl From> for DataSourceContext { + fn from(map: HashMap) -> Self { + Self(map) + } +} /// IPLD link. #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] diff --git a/graph/src/data_source/tests.rs b/graph/src/data_source/tests.rs index 6f0ac625dba..b3205fbab79 100644 --- a/graph/src/data_source/tests.rs +++ b/graph/src/data_source/tests.rs @@ -2,8 +2,6 @@ use cid::Cid; use crate::{ blockchain::mock::{MockBlockchain, MockDataSource}, - components::subgraph::Entity, - entity, ipfs_client::CidFile, prelude::Link, }; @@ -40,8 +38,7 @@ fn offchain_duplicate() { assert!(!a.is_duplicate_of(&c)); let mut c = a.clone(); - let entity = entity! {}; - c.context = Arc::new(Some(entity)); + c.context = Arc::new(Some(DataSourceContext::new())); assert!(!a.is_duplicate_of(&c)); } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index c578c4b94c6..a2136f02271 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -756,7 +756,7 @@ impl HostExports { pub(crate) fn data_source_context( &self, gas: &GasCounter, - ) -> Result, DeterministicHostError> { + ) -> Result, DeterministicHostError> { gas.consume_host_fn(Gas::new(gas::DEFAULT_BASE_COST))?; Ok(self.data_source_context.as_ref().clone()) } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 73096d1f293..308013b3d8d 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1737,8 +1737,7 @@ impl WasmInstanceContext { let name: String = asc_get(self, name_ptr, gas)?; let params: Vec = asc_get(self, params_ptr, gas)?; let context: HashMap<_, _> = asc_get(self, context_ptr, gas)?; - - let context = self.ctx.state.entity_cache.make_entity(context); + let context = DataSourceContext::from(context); self.ctx.host_exports.data_source_create( &self.ctx.logger, From 2b8a1f0eb6427d34cb9efe19cd2bb8d9dae72f46 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 12 Apr 2023 18:15:43 -0700 Subject: [PATCH 0152/2104] graph: Remove unused StableHash impl for Entity --- graph/src/data/store/mod.rs | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index ee85050b72f..8024fb9db09 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -599,26 +599,6 @@ pub trait TryIntoEntityIterator: IntoIterator impl>> TryIntoEntityIterator for T {} -impl stable_hash_legacy::StableHash for Entity { - #[inline] - fn stable_hash( - &self, - mut sequence_number: H::Seq, - state: &mut H, - ) { - use stable_hash_legacy::SequenceNumber; - let Self(inner) = self; - stable_hash_legacy::StableHash::stable_hash(inner, sequence_number.next_child(), state); - } -} - -impl StableHash for Entity { - fn stable_hash(&self, field_address: H::Addr, state: &mut H) { - let Self(inner) = self; - StableHash::stable_hash(inner, field_address.child(0), state); - } -} - /// The `entity!` macro is a convenient way to create entities. It comes in /// two forms, one where a schema is provided and one where it is not. The /// schema-less form can only be used in tests, since it creates an From 10f963ee43b041aa04b98d42cf2186a4a6bcc86b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 22 Oct 2021 20:37:47 -0700 Subject: [PATCH 0153/2104] graph: A simple string pool --- graph/src/data/value.rs | 6 + graph/src/util/intern.rs | 650 +++++++++++++++++++++++++++++++++++++++ graph/src/util/mod.rs | 2 + 3 files changed, 658 insertions(+) create mode 100644 graph/src/util/intern.rs diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 7847992594d..768ff909343 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -91,6 +91,12 @@ impl GasSizeOf for Word { } } +impl AsRef for Word { + fn as_ref(&self) -> &str { + self.as_str() + } +} + #[derive(Clone, Debug, PartialEq)] struct Entry { key: Option, diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs new file mode 100644 index 00000000000..2d68ae82f20 --- /dev/null +++ b/graph/src/util/intern.rs @@ -0,0 +1,650 @@ +//! Interning of strings. +//! +//! This module provides an interned string pool `AtomPool` and a map-like +//! data structure `Object` that uses the string pool. It offers two +//! different kinds of atom: a plain `Atom` (an integer) and a `FatAtom` (a +//! reference to the pool and an integer). The former is useful when the +//! pool is known from context whereas the latter carries a reference to the +//! pool and can be used anywhere. + +use std::convert::TryFrom; +use std::{collections::HashMap, sync::Arc}; + +use serde::Serialize; + +use crate::cheap_clone::CheapClone; +use crate::data::value::Word; +use crate::runtime::gas::{Gas, GasSizeOf}; + +use super::cache_weight::CacheWeight; + +// We could probably get away with a `u16` here, but unless we improve the +// layout of `Object`, there's little point in that +type AtomInt = u32; + +/// An atom in a pool. To look up the underlying string, surrounding code +/// needs to know the pool for it. +#[derive(Eq, Hash, PartialEq, Clone, Copy, Debug)] +pub struct Atom(AtomInt); + +/// An atom and the underlying pool. A `FatAtom` can be used in place of a +/// `String` or `Word` +pub struct FatAtom { + pool: Arc, + atom: Atom, +} + +impl FatAtom { + pub fn as_str(&self) -> &str { + self.pool.get(self.atom).expect("atom is in the pool") + } +} + +impl AsRef for FatAtom { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +#[derive(Debug)] +pub enum Error { + NotInterned(String), +} + +#[derive(Debug, PartialEq)] +/// A pool of interned strings. Pools can be organized hierarchically with +/// lookups in child pools also considering the parent pool. The chain of +/// pools from a pool through all its ancestors act as one big pool to the +/// outside. +pub struct AtomPool { + base: Option>, + base_sym: AtomInt, + atoms: Vec>, + words: HashMap, Atom>, +} + +impl AtomPool { + /// Create a new root pool. + pub fn new() -> Self { + Self { + base: None, + base_sym: 0, + atoms: Vec::new(), + words: HashMap::new(), + } + } + + /// Create a child pool that extends the set of strings interned in the + /// current pool. + pub fn child(self: &Arc) -> Self { + let base_sym = AtomInt::try_from(self.atoms.len()).unwrap(); + AtomPool { + base: Some(self.clone()), + base_sym, + atoms: Vec::new(), + words: HashMap::new(), + } + } + + /// Get the string for `atom`. Return `None` if the atom is not in this + /// pool or any of its ancestors. + pub fn get(&self, atom: Atom) -> Option<&str> { + if atom.0 < self.base_sym { + self.base.as_ref().map(|base| base.get(atom)).flatten() + } else { + self.atoms + .get((atom.0 - self.base_sym) as usize) + .map(|s| s.as_ref()) + } + } + + /// Get the atom for `word`. Return `None` if the word is not in this + /// pool or any of its ancestors. + pub fn lookup(&self, word: &str) -> Option { + if let Some(base) = &self.base { + if let Some(atom) = base.lookup(word) { + return Some(atom); + } + } + + self.words.get(word).cloned() + } + + /// Add `word` to this pool if it is not already in it. Return the atom + /// for the word. + pub fn intern(&mut self, word: &str) -> Atom { + if let Some(atom) = self.lookup(word) { + return atom; + } + + let atom = + AtomInt::try_from(self.base_sym as usize + self.atoms.len()).expect("too many atoms"); + let atom = Atom(atom); + if atom == TOMBSTONE_KEY { + panic!("too many atoms"); + } + self.words.insert(Box::from(word), atom); + self.atoms.push(Box::from(word)); + atom + } +} + +/// A marker for an empty entry in an `Object` +const TOMBSTONE_KEY: Atom = Atom(AtomInt::MAX); + +/// A value that can be used as a null value in an `Object`. The null value +/// is used when removing an entry as `Object.remove` does not actually +/// remove the entry but replaces it with a tombstone marker. +pub trait NullValue { + fn null() -> Self; +} + +impl NullValue for T { + fn null() -> Self { + T::default() + } +} + +#[derive(Clone, Debug, PartialEq)] +struct Entry { + key: Atom, + value: V, +} + +impl GasSizeOf for Entry { + fn gas_size_of(&self) -> Gas { + Gas::new(std::mem::size_of::() as u64) + self.value.gas_size_of() + } +} + +/// A map-like data structure that uses an `AtomPool` for its keys. The data +/// structure assumes that reads are much more common than writes, and that +/// entries are rarely removed. It also assumes that each instance has +/// relatively few entries. +#[derive(Clone)] +pub struct Object { + pool: Arc, + // This could be further improved by using two `Vec`s, one for keys and + // one for values. That would avoid losing memory to padding. + entries: Vec>, +} + +impl Object { + /// Create a new `Object` whose keys are interned in `pool`. + pub fn new(pool: Arc) -> Self { + Self { + pool, + entries: Vec::new(), + } + } + + /// Return the number of entries in the object. Because of tombstones, + /// this operation has to traverse all entries + pub fn len(&self) -> usize { + // Because of tombstones we can't just return `self.entries.len()`. + self.entries + .iter() + .filter(|entry| entry.key != TOMBSTONE_KEY) + .count() + } + + /// Find the value for `key` in the object. Return `None` if the key is + /// not present. + pub fn get(&self, key: &str) -> Option<&V> { + match self.pool.lookup(key) { + None => None, + Some(key) => self + .entries + .iter() + .find(|entry| entry.key == key) + .map(|entry| &entry.value), + } + } + + /// Find the value for `atom` in the object. Return `None` if the atom + /// is not present. + fn get_by_atom(&self, atom: &Atom) -> Option<&V> { + if *atom == TOMBSTONE_KEY { + return None; + } + + self.entries + .iter() + .find(|entry| &entry.key == atom) + .map(|entry| &entry.value) + } + + pub fn iter(&self) -> impl Iterator { + ObjectIter::new(self) + } + + /// Add or update an entry to the object. Return the value that was + /// previously associated with the `key`. The `key` must already be part + /// of the `AtomPool` that this object uses. Trying to set a key that is + /// not in the pool will result in an error. + pub fn insert>(&mut self, key: K, value: V) -> Result, Error> { + let key = self + .pool + .lookup(key.as_ref()) + .ok_or_else(|| Error::NotInterned(key.as_ref().to_string()))?; + Ok(self.insert_atom(key, value)) + } + + fn insert_atom(&mut self, key: Atom, value: V) -> Option { + if key == TOMBSTONE_KEY { + // Ignore attempts to insert the tombstone key. + return None; + } + + match self.entries.iter_mut().find(|entry| entry.key == key) { + Some(entry) => Some(std::mem::replace(&mut entry.value, value)), + None => { + self.entries.push(Entry { key, value }); + None + } + } + } + + pub(crate) fn contains_key(&self, key: &str) -> bool { + self.entries + .iter() + .any(|entry| self.pool.get(entry.key).map_or(false, |k| key == k)) + } + + pub fn merge(&mut self, other: Object) { + if self.same_pool(&other) { + for Entry { key, value } in other.entries { + self.insert_atom(key, value); + } + } else { + for (key, value) in other { + self.insert(key, value).expect("pools use the same keys"); + } + } + } + + pub fn retain(&mut self, mut f: impl FnMut(&str, &V) -> bool) { + self.entries.retain(|entry| { + if entry.key == TOMBSTONE_KEY { + // Since we are going through the trouble of removing + // entries, remove deleted entries opportunistically. + false + } else { + let key = self.pool.get(entry.key).unwrap(); + f(key, &entry.value) + } + }) + } + + fn same_pool(&self, other: &Object) -> bool { + Arc::ptr_eq(&self.pool, &other.pool) + } +} + +impl Object { + /// Remove `key` from the object and return the value that was + /// associated with the `key`. The entry is actually not removed for + /// efficiency reasons. It is instead replaced with an entry with a + /// dummy key and a null value. + pub fn remove(&mut self, key: &str) -> Option { + match self.pool.lookup(key) { + None => None, + Some(key) => self + .entries + .iter_mut() + .find(|entry| entry.key == key) + .map(|entry| { + entry.key = TOMBSTONE_KEY; + std::mem::replace(&mut entry.value, V::null()) + }), + } + } +} + +pub struct ObjectIter<'a, V> { + pool: &'a AtomPool, + iter: std::slice::Iter<'a, Entry>, +} + +impl<'a, V> ObjectIter<'a, V> { + fn new(object: &'a Object) -> Self { + Self { + pool: object.pool.as_ref(), + iter: object.entries.as_slice().iter(), + } + } +} + +impl<'a, V> Iterator for ObjectIter<'a, V> { + type Item = (&'a str, &'a V); + + fn next(&mut self) -> Option { + while let Some(entry) = self.iter.next() { + if entry.key != TOMBSTONE_KEY { + // unwrap: we only add entries that are backed by the pool + let key = self.pool.get(entry.key).unwrap(); + return Some((key, &entry.value)); + } + } + None + } +} + +impl<'a, V> IntoIterator for &'a Object { + type Item = as Iterator>::Item; + + type IntoIter = ObjectIter<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + ObjectIter::new(self) + } +} + +pub struct ObjectOwningIter { + pool: Arc, + iter: std::vec::IntoIter>, +} + +impl ObjectOwningIter { + fn new(object: Object) -> Self { + Self { + pool: object.pool.cheap_clone(), + iter: object.entries.into_iter(), + } + } +} + +impl Iterator for ObjectOwningIter { + type Item = (Word, V); + + fn next(&mut self) -> Option { + while let Some(entry) = self.iter.next() { + if entry.key != TOMBSTONE_KEY { + // unwrap: we only add entries that are backed by the pool + let key = self.pool.get(entry.key).unwrap(); + return Some((Word::from(key), entry.value)); + } + } + None + } +} + +impl IntoIterator for Object { + type Item = as Iterator>::Item; + + type IntoIter = ObjectOwningIter; + + fn into_iter(self) -> Self::IntoIter { + ObjectOwningIter::new(self) + } +} + +impl CacheWeight for Entry { + fn indirect_weight(&self) -> usize { + self.value.indirect_weight() + } +} + +impl CacheWeight for Object { + fn indirect_weight(&self) -> usize { + self.entries.indirect_weight() + } +} + +impl std::fmt::Debug for Object { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.entries.fmt(f) + } +} + +impl PartialEq for Object { + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + if self.same_pool(other) { + self.entries + .iter() + .filter(|e| e.key != TOMBSTONE_KEY) + .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + } else { + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + } + } +} + +impl Eq for Object { + fn assert_receiver_is_total_eq(&self) {} +} + +impl Serialize for Object { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_map(self.iter()) + } +} + +impl GasSizeOf for Object { + fn gas_size_of(&self) -> Gas { + Gas::new(std::mem::size_of::>() as u64) + self.entries.gas_size_of() + } +} + +#[cfg(test)] +mod tests { + use crate::prelude::r; + + use super::*; + + #[test] + fn simple() { + let mut intern = AtomPool::new(); + let hello = intern.intern("Hello"); + assert_eq!(Some(hello), intern.lookup("Hello")); + assert_eq!(None, intern.lookup("World")); + assert_eq!(Some("Hello"), intern.get(hello)); + + // Print some size information, just for understanding better how + // big these data structures are + use std::mem; + + println!( + "pool: {}, arc: {}", + mem::size_of::(), + mem::size_of::>() + ); + + println!( + "Atom: {}, FatAtom: {}", + mem::size_of::(), + mem::size_of::(), + ); + println!( + "Entry: {}, Object: {}", + mem::size_of::>(), + mem::size_of::>() + ); + println!( + "Entry: {}, Object: {}, r::Value: {}", + mem::size_of::>(), + mem::size_of::>(), + mem::size_of::() + ); + } + + #[test] + fn stacked() { + let mut base = AtomPool::new(); + let bsym = base.intern("base"); + let isym = base.intern("intern"); + let base = Arc::new(base); + + let mut intern = base.child(); + assert_eq!(Some(bsym), intern.lookup("base")); + + assert_eq!(bsym, intern.intern("base")); + let hello = intern.intern("hello"); + assert_eq!(None, base.get(hello)); + assert_eq!(Some("hello"), intern.get(hello)); + assert_eq!(None, base.lookup("hello")); + assert_eq!(Some(hello), intern.lookup("hello")); + assert_eq!(Some(isym), base.lookup("intern")); + assert_eq!(Some(isym), intern.lookup("intern")); + } + + fn make_pool(words: Vec<&str>) -> Arc { + let mut pool = AtomPool::new(); + for word in words { + pool.intern(word); + } + Arc::new(pool) + } + + fn make_obj(pool: Arc, entries: Vec<(&str, usize)>) -> Object { + let mut obj: Object = Object::new(pool); + for (k, v) in entries { + obj.insert(k, v).unwrap(); + } + obj + } + + #[test] + fn object_eq() { + // Make an object `{ "one": 1, "two": 2 }` that has a removed key + // `three` in it to make sure equality checking ignores removed keys + fn make_obj1(pool: Arc) -> Object { + let mut obj = make_obj(pool, vec![("one", 1), ("two", 2), ("three", 3)]); + obj.remove("three"); + obj + } + + // Make two pools with the same atoms, but different order + let pool1 = make_pool(vec!["one", "two", "three"]); + let pool2 = make_pool(vec!["three", "two", "one"]); + + // Make two objects with the same keys and values in the same order + // but different pools + let obj1 = make_obj1(pool1.clone()); + let obj2 = make_obj(pool2.clone(), vec![("one", 1), ("two", 2)]); + assert_eq!(obj1, obj2); + + // Make two objects with the same keys and values in different order + // and with different pools + let obj1 = make_obj1(pool1.clone()); + let obj2 = make_obj(pool2.clone(), vec![("two", 2), ("one", 1)]); + assert_eq!(obj1, obj2); + + // Check that two objects using the same pools and the same keys and + // values but in different order are equal + let pool = pool1; + let obj1 = make_obj1(pool.clone()); + let obj2 = make_obj(pool.clone(), vec![("two", 2), ("one", 1)]); + assert_eq!(obj1, obj2); + } + + #[test] + fn object_remove() { + let pool = make_pool(vec!["one", "two", "three"]); + let mut obj = make_obj(pool.clone(), vec![("one", 1), ("two", 2)]); + + assert_eq!(Some(1), obj.remove("one")); + assert_eq!(None, obj.get("one")); + assert_eq!(Some(&2), obj.get("two")); + + let entries = obj.iter().collect::>(); + assert_eq!(vec![("two", &2)], entries); + + assert_eq!(None, obj.remove("one")); + let entries = obj.into_iter().collect::>(); + assert_eq!(vec![(Word::from("two"), 2)], entries); + } + + #[test] + fn object_insert() { + let pool = make_pool(vec!["one", "two", "three"]); + let mut obj = make_obj(pool.clone(), vec![("one", 1), ("two", 2)]); + + assert_eq!(Some(1), obj.insert("one", 17).unwrap()); + assert_eq!(Some(&17), obj.get("one")); + assert_eq!(Some(&2), obj.get("two")); + assert!(obj.insert("not interned", 42).is_err()); + + let entries = obj.iter().collect::>(); + assert_eq!(vec![("one", &17), ("two", &2)], entries); + + assert_eq!(None, obj.insert("three", 3).unwrap()); + let entries = obj.into_iter().collect::>(); + assert_eq!( + vec![ + (Word::from("one"), 17), + (Word::from("two"), 2), + (Word::from("three"), 3) + ], + entries + ); + } + + #[test] + fn object_remove_insert() { + let pool = make_pool(vec!["one", "two", "three"]); + let mut obj = make_obj(pool.clone(), vec![("one", 1), ("two", 2)]); + + // Remove an entry + assert_eq!(Some(1), obj.remove("one")); + assert_eq!(None, obj.get("one")); + + let entries = obj.iter().collect::>(); + assert_eq!(vec![("two", &2)], entries); + + // And insert it again + assert_eq!(None, obj.insert("one", 1).unwrap()); + + let entries = obj.iter().collect::>(); + assert_eq!(vec![("two", &2), ("one", &1)], entries); + + let entries = obj.into_iter().collect::>(); + assert_eq!( + vec![(Word::from("two"), 2), (Word::from("one"), 1)], + entries + ); + } + + #[test] + fn object_merge() { + let pool1 = make_pool(vec!["one", "two", "three"]); + let pool2 = make_pool(vec!["three", "two", "one"]); + + // Merge objects with different pools + let mut obj1 = make_obj(pool1.clone(), vec![("one", 1), ("two", 2)]); + let obj2 = make_obj(pool2.clone(), vec![("one", 11), ("three", 3)]); + + obj1.merge(obj2); + let entries = obj1.into_iter().collect::>(); + assert_eq!( + vec![ + (Word::from("one"), 11), + (Word::from("two"), 2), + (Word::from("three"), 3) + ], + entries + ); + + // Merge objects with the same pool + let mut obj1 = make_obj(pool1.clone(), vec![("one", 1), ("two", 2)]); + let obj2 = make_obj(pool1.clone(), vec![("one", 11), ("three", 3)]); + obj1.merge(obj2); + let entries = obj1.into_iter().collect::>(); + assert_eq!( + vec![ + (Word::from("one"), 11), + (Word::from("two"), 2), + (Word::from("three"), 3) + ], + entries + ); + } +} diff --git a/graph/src/util/mod.rs b/graph/src/util/mod.rs index 8af2540f401..04e7d8d76a0 100644 --- a/graph/src/util/mod.rs +++ b/graph/src/util/mod.rs @@ -29,3 +29,5 @@ pub mod mem; /// Data structures instrumented with Prometheus metrics. pub mod monitored; + +pub mod intern; From 468d2df1ec96f7c3dce90aaac026396b3abc3bea Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 22 Oct 2021 20:37:47 -0700 Subject: [PATCH 0154/2104] graph: Keep an AtomPool in InputSchema --- graph/src/data/store/mod.rs | 24 ++++++---- graph/src/schema/input_schema.rs | 79 +++++++++++++++++++++++++++++--- graph/src/schema/mod.rs | 8 ---- 3 files changed, 87 insertions(+), 24 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 8024fb9db09..f0e282cda41 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -3,7 +3,8 @@ use crate::{ data::graphql::ObjectTypeExt, prelude::{anyhow::Context, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, - schema::{AtomPool, InputSchema}, + schema::InputSchema, + util::intern::AtomPool, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; use anyhow::{anyhow, Error}; @@ -11,11 +12,11 @@ use itertools::Itertools; use serde::de; use serde::{Deserialize, Serialize}; use stable_hash::{FieldAddress, StableHash, StableHasher}; -use std::convert::TryFrom; use std::fmt; use std::iter::FromIterator; use std::str::FromStr; use std::{borrow::Cow, collections::HashMap}; +use std::{convert::TryFrom, sync::Arc}; use strum::AsStaticRef as _; use strum_macros::AsStaticStr; @@ -636,19 +637,19 @@ macro_rules! entity { () => { { let pairs = Vec::new(); - let pool = $crate::schema::AtomPool; - Entity::make(pool, pairs) + let pool = $crate::util::intern::AtomPool::new(); + Entity::make(std::sync::Arc::new(pool), pairs) } }; ($($name:ident: $value:expr,)*) => { { let mut pairs = Vec::new(); - let mut pool = $crate::schema::AtomPool; + let mut pool = $crate::util::intern::AtomPool::new(); $( pool.intern(stringify!($name)); pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); )* - $crate::data::store::Entity::make(pool, pairs) + $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs) } }; ($($name:ident: $value:expr),*) => { @@ -657,7 +658,7 @@ macro_rules! entity { ($($name:ident: $value:expr,)*; $($extra:ident,)*) => { { let mut pairs = Vec::new(); - let mut pool = $crate::schema::AtomPool; + let mut pool = $crate::util::intern::AtomPool::new(); $( pool.intern(stringify!($name)); pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); @@ -665,7 +666,7 @@ macro_rules! entity { $( pool.intern(stringify!($extra)); )* - $crate::data::store::Entity::make(pool, pairs) + $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs) } }; ($($name:ident: $value:expr),*; $($extra:ident),*) => { @@ -703,11 +704,14 @@ macro_rules! entity { } impl Entity { - pub fn make(_pool: AtomPool, iter: I) -> Entity { + pub fn make(_pool: Arc, iter: I) -> Entity { Entity(HashMap::from_iter(iter)) } - pub fn try_make>(_pool: AtomPool, iter: I) -> Result { + pub fn try_make>( + _pool: Arc, + iter: I, + ) -> Result { let map: HashMap<_, _> = iter.into_iter().collect::>()?; Ok(Entity(map)) } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 93b2d323bd6..0faa5e7ae22 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -13,9 +13,10 @@ use crate::data::store::{self, scalar, IntoEntityIterator, TryIntoEntityIterator use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; +use crate::util::intern::AtomPool; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, AtomPool, Schema, SchemaValidationError}; +use super::{ApiSchema, Schema, SchemaValidationError}; #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { @@ -26,7 +27,7 @@ pub struct InputSchema { pub struct Inner { schema: Schema, immutable_types: HashSet, - pool: AtomPool, + pool: Arc, } impl std::ops::Deref for InputSchema { @@ -55,7 +56,9 @@ impl InputSchema { .filter(|obj_type| obj_type.is_immutable()) .map(Into::into), ); - let pool = AtomPool; + + let pool = Arc::new(atom_pool(&schema.document)); + Self { inner: Arc::new(Inner { schema, @@ -64,6 +67,7 @@ impl InputSchema { }), } } + pub fn new(id: DeploymentHash, document: s::Document) -> Result { let schema = Schema::new(id, document)?; Ok(Self::create(schema)) @@ -235,9 +239,14 @@ impl Inner { &self, entity: &str, ) -> Result, anyhow::Error> { - Ok(self - .schema - .document + Self::fulltext_definitions(&self.schema.document, entity) + } + + fn fulltext_definitions( + document: &s::Document, + entity: &str, + ) -> Result, anyhow::Error> { + Ok(document .get_fulltext_directives()? .into_iter() .filter(|directive| match directive.argument("include") { @@ -282,3 +291,61 @@ impl Inner { Entity::try_make(self.pool.clone(), iter) } } + +/// Create a new pool that contains the names of all the types defined +/// in the document and the names of all their fields +fn atom_pool(document: &s::Document) -> AtomPool { + let mut pool = AtomPool::new(); + // These two entries are always required + pool.intern("g$parent_id"); // Used by queries + pool.intern("__typename"); // Mandated by GraphQL + pool.intern("digest"); // Attribute of PoI object + for definition in &document.definitions { + match definition { + s::Definition::TypeDefinition(typedef) => match typedef { + s::TypeDefinition::Object(t) => { + pool.intern(&t.name); + for field in &t.fields { + pool.intern(&field.name); + } + } + s::TypeDefinition::Enum(t) => { + pool.intern(&t.name); + } + s::TypeDefinition::Interface(t) => { + pool.intern(&t.name); + for field in &t.fields { + pool.intern(&field.name); + } + } + s::TypeDefinition::InputObject(input_object) => { + pool.intern(&input_object.name); + for field in &input_object.fields { + pool.intern(&field.name); + } + } + s::TypeDefinition::Scalar(scalar_type) => { + pool.intern(&scalar_type.name); + } + s::TypeDefinition::Union(union_type) => { + pool.intern(&union_type.name); + for typ in &union_type.types { + pool.intern(typ); + } + } + }, + s::Definition::SchemaDefinition(_) + | s::Definition::TypeExtension(_) + | s::Definition::DirectiveDefinition(_) => { /* ignore, these only happen for introspection schemas */ + } + } + } + + for object_type in document.get_object_type_definitions() { + for defn in Inner::fulltext_definitions(&document, &object_type.name).unwrap() { + pool.intern(defn.name.as_str()); + } + } + + pool +} diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 4a669a9ac89..5d4a3a0789a 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -37,14 +37,6 @@ pub use api::{ApiSchema, ErrorPolicy}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; -/// Placeholder type until we are ready to use a real intern::AtomPool -#[derive(Clone, Debug, PartialEq)] -pub struct AtomPool; - -impl AtomPool { - pub fn intern(&mut self, _s: &str) {} -} - pub const SCHEMA_TYPE_NAME: &str = "_Schema_"; pub const META_FIELD_TYPE: &str = "_Meta_"; From abff069591a418599ccfa02670d33a93bb266ca7 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 13:28:14 -0700 Subject: [PATCH 0155/2104] graph: Use an interned Object for Entity --- graph/src/data/store/mod.rs | 45 ++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index f0e282cda41..52e33a8a1ff 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -5,6 +5,7 @@ use crate::{ runtime::gas::{Gas, GasSizeOf}, schema::InputSchema, util::intern::AtomPool, + util::intern::{NullValue, Object}, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; use anyhow::{anyhow, Error}; @@ -12,11 +13,11 @@ use itertools::Itertools; use serde::de; use serde::{Deserialize, Serialize}; use stable_hash::{FieldAddress, StableHash, StableHasher}; +use std::borrow::Cow; +use std::convert::TryFrom; use std::fmt; -use std::iter::FromIterator; use std::str::FromStr; -use std::{borrow::Cow, collections::HashMap}; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use strum::AsStaticRef as _; use strum_macros::AsStaticStr; @@ -279,6 +280,12 @@ impl StableHash for Value { } } +impl NullValue for Value { + fn null() -> Self { + Value::Null + } +} + impl Value { pub fn from_query_value(value: &r::Value, ty: &s::Type) -> Result { use graphql_parser::schema::Type::{ListType, NamedType, NonNullType}; @@ -590,7 +597,7 @@ where /// An entity is represented as a map of attribute names to values. #[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct Entity(HashMap); +pub struct Entity(Object); pub trait IntoEntityIterator: IntoIterator {} @@ -704,16 +711,24 @@ macro_rules! entity { } impl Entity { - pub fn make(_pool: Arc, iter: I) -> Entity { - Entity(HashMap::from_iter(iter)) + pub fn make(pool: Arc, iter: I) -> Entity { + let mut obj = Object::new(pool); + for (key, value) in iter { + obj.insert(key, value).expect("key is in AtomPool"); + } + Entity(obj) } pub fn try_make>( - _pool: Arc, + pool: Arc, iter: I, ) -> Result { - let map: HashMap<_, _> = iter.into_iter().collect::>()?; - Ok(Entity(map)) + let mut obj = Object::new(pool); + for pair in iter { + let (key, value) = pair?; + obj.insert(key, value).expect("key is in AtomPool"); + } + Ok(Entity(obj)) } pub fn get(&self, key: &str) -> Option<&Value> { @@ -721,7 +736,7 @@ impl Entity { } pub fn insert(&mut self, key: String, value: Value) -> Option { - self.0.insert(Word::from(key), value) + self.0.insert(&key, value).expect("key is in AtomPool") } pub fn remove(&mut self, key: &str) -> Option { @@ -734,7 +749,7 @@ impl Entity { // This collects the entity into an ordered vector so that it can be iterated deterministically. pub fn sorted(self) -> Vec<(Word, Value)> { - let mut v: Vec<_> = self.0.into_iter().collect(); + let mut v: Vec<_> = self.0.into_iter().map(|(k, v)| (k, v)).collect(); v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); v } @@ -754,7 +769,9 @@ impl Entity { /// Convenience method to save having to `.into()` the arguments. pub fn set(&mut self, name: impl Into, value: impl Into) -> Option { - self.0.insert(Word::from(name.into()), value.into()) + self.0 + .insert(Word::from(name.into()), value.into()) + .expect("key is in AtomPool") } /// Merges an entity update `update` into this entity. @@ -763,9 +780,7 @@ impl Entity { /// If a key only exists on one entity, the value from that entity is chosen. /// If a key is set to `Value::Null` in `update`, the key/value pair is set to `Value::Null`. pub fn merge(&mut self, update: Entity) { - for (key, value) in update.0.into_iter() { - self.insert(key.to_string(), value); - } + self.0.merge(update.0); } /// Merges an entity update `update` into this entity, removing `Value::Null` values. From 2582131d95b73ecfc8247049598f48cd0a10c3d4 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 13:34:17 -0700 Subject: [PATCH 0156/2104] graph, store: Take &str, not String, for the key in Entity.insert --- graph/src/data/store/mod.rs | 6 +++--- store/postgres/src/relational_queries.rs | 2 +- store/test-store/tests/postgres/relational.rs | 2 +- store/test-store/tests/postgres/store.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 52e33a8a1ff..5aa9153456f 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -735,8 +735,8 @@ impl Entity { self.0.get(&Word::from(key)) } - pub fn insert(&mut self, key: String, value: Value) -> Option { - self.0.insert(&key, value).expect("key is in AtomPool") + pub fn insert(&mut self, key: &str, value: Value) -> Option { + self.0.insert(key, value).expect("key is in AtomPool") } pub fn remove(&mut self, key: &str) -> Option { @@ -792,7 +792,7 @@ impl Entity { for (key, value) in update.0.into_iter() { match value { Value::Null => self.remove(&key), - _ => self.insert(key.to_string(), value), + _ => self.insert(&key, value), }; } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 2ae198d87bd..d2d3417d5b3 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1762,7 +1762,7 @@ impl<'a> InsertQuery<'a> { if !fulltext_field_values.is_empty() { entity .to_mut() - .insert(column.field.to_string(), Value::List(fulltext_field_values)); + .insert(&column.field, Value::List(fulltext_field_values)); } } if !column.is_nullable() && !entity.contains_key(&column.field) { diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index cf551d72c5f..bc53b23647c 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -340,7 +340,7 @@ fn make_user( favorite_color: favorite_color }; if let Some(drinks) = drinks { - user.insert("drinks".to_owned(), drinks.into()); + user.insert("drinks", drinks.into()); } user } diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 9d3aa66be20..05783fcaf4e 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -446,7 +446,7 @@ fn update_existing() { _ => unreachable!(), }; - new_data.insert("bin_name".to_owned(), Value::Bytes(bin_name)); + new_data.insert("bin_name", Value::Bytes(bin_name)); assert_eq!(writable.get(&entity_key).unwrap(), Some(new_data)); }) } From e6238220d4fb0164961b4efeaea200a31e3ef116 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 13:39:42 -0700 Subject: [PATCH 0157/2104] graph: Use &str for the key in Entity.set --- graph/src/data/store/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 5aa9153456f..5d70bea5834 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -768,9 +768,9 @@ impl Entity { } /// Convenience method to save having to `.into()` the arguments. - pub fn set(&mut self, name: impl Into, value: impl Into) -> Option { + pub fn set(&mut self, name: &str, value: impl Into) -> Option { self.0 - .insert(Word::from(name.into()), value.into()) + .insert(name, value.into()) .expect("key is in AtomPool") } From 22c82ba8baacb15e5233380419ad1e96cf13adcb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 13:53:45 -0700 Subject: [PATCH 0158/2104] graph: Avoid Word::from in a few places in Entity --- graph/src/data/store/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 5d70bea5834..8789e04a8a1 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -732,7 +732,7 @@ impl Entity { } pub fn get(&self, key: &str) -> Option<&Value> { - self.0.get(&Word::from(key)) + self.0.get(key) } pub fn insert(&mut self, key: &str, value: Value) -> Option { @@ -740,11 +740,11 @@ impl Entity { } pub fn remove(&mut self, key: &str) -> Option { - self.0.remove(&Word::from(key)) + self.0.remove(key) } pub fn contains_key(&self, key: &str) -> bool { - self.0.contains_key(&Word::from(key)) + self.0.contains_key(key) } // This collects the entity into an ordered vector so that it can be iterated deterministically. From 9a5c305f9a130bb99dd5d58791c0c5def3b5ff13 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 14:17:11 -0700 Subject: [PATCH 0159/2104] all: Do not panic in Entity::make when given uninterned keys --- chain/substreams/src/trigger.rs | 2 +- core/src/subgraph/runner.rs | 2 +- graph/src/components/store/entity_cache.rs | 2 +- graph/src/data/store/mod.rs | 21 ++++++++++---- graph/src/schema/input_schema.rs | 2 +- graph/src/util/intern.rs | 8 ++++++ runtime/test/src/test.rs | 12 ++++---- runtime/wasm/src/host_exports.rs | 2 +- store/postgres/src/fork.rs | 28 ++++++++++--------- store/test-store/tests/graphql/query.rs | 4 +-- store/test-store/tests/postgres/relational.rs | 3 +- .../tests/postgres/relational_bytes.rs | 18 ++++++++---- 12 files changed, 66 insertions(+), 38 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 74519f0a563..e2d92541a9f 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -217,7 +217,7 @@ where logger, ); - let entity = state.entity_cache.make_entity(data); + let entity = state.entity_cache.make_entity(data)?; state.entity_cache.set(key, entity)?; } Operation::Delete => { diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 671bc0b8877..3d88e5b3036 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -1046,7 +1046,7 @@ async fn update_proof_of_indexing( entity_cache.schema => id: entity_key.entity_id.to_string(), digest: updated_proof_of_indexing, - }; + }?; entity_cache.set(entity_key, new_poi_entity)?; } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index f2d82292aed..e4adfe81811 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -73,7 +73,7 @@ impl EntityCache { } /// Make a new entity. The entity is not part of the cache - pub fn make_entity(&self, iter: I) -> Entity { + pub fn make_entity(&self, iter: I) -> Result { self.schema.make_entity(iter) } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 8789e04a8a1..986b41bcf61 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -612,6 +612,10 @@ impl>> TryIntoEntityIterator< /// schema-less form can only be used in tests, since it creates an /// `AtomPool` just for this entity behind the scenes. /// +/// The form with schema returns a `Result` since it can be +/// used in production code. The schemaless form returns an `Entity` because +/// it unwraps the `Result` for you. +/// /// Production code should always use the form with the schema /// ``` /// use graph::entity; @@ -621,7 +625,7 @@ impl>> TryIntoEntityIterator< /// let id = DeploymentHash::new("Qm123").unwrap(); /// let schema = InputSchema::parse("type User @entity { id: String!, name: String! }", id).unwrap(); /// -/// let entity = entity! { schema => id: "1", name: "John Doe" }; +/// let entity = entity! { schema => id: "1", name: "John Doe" }.unwrap(); /// ``` /// /// Test code which often doesn't have access to an `InputSchema` can use @@ -656,7 +660,7 @@ macro_rules! entity { pool.intern(stringify!($name)); pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); )* - $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs) + $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs).unwrap() } }; ($($name:ident: $value:expr),*) => { @@ -673,7 +677,7 @@ macro_rules! entity { $( pool.intern(stringify!($extra)); )* - $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs) + $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs).unwrap() } }; ($($name:ident: $value:expr),*; $($extra:ident),*) => { @@ -711,12 +715,17 @@ macro_rules! entity { } impl Entity { - pub fn make(pool: Arc, iter: I) -> Entity { + pub fn make(pool: Arc, iter: I) -> Result { let mut obj = Object::new(pool); for (key, value) in iter { - obj.insert(key, value).expect("key is in AtomPool"); + obj.insert(key, value).map_err(|e| { + anyhow!( + "Unknown key `{}`. It probably is not part of the schema", + e.not_interned() + ) + })?; } - Entity(obj) + Ok(Entity(obj)) } pub fn try_make>( diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 0faa5e7ae22..5411fe02ce0 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -283,7 +283,7 @@ impl Inner { self.schema.validate() } - pub fn make_entity(&self, iter: I) -> Entity { + pub fn make_entity(&self, iter: I) -> Result { Entity::make(self.pool.clone(), iter) } diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 2d68ae82f20..cefab46a91a 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -51,6 +51,14 @@ pub enum Error { NotInterned(String), } +impl Error { + pub fn not_interned(&self) -> &str { + match self { + Error::NotInterned(s) => s, + } + } +} + #[derive(Debug, PartialEq)] /// A pool of interned strings. Pools can be organized hierarchically with /// lookups in child pools also considering the parent pool. The chain of diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 3f428669d90..08a83866f4c 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -939,11 +939,13 @@ async fn test_entity_store(api_version: Version) { None } else { Some( - schema.make_entity( - module - .asc_get::, _>(entity_ptr) - .unwrap(), - ), + schema + .make_entity( + module + .asc_get::, _>(entity_ptr) + .unwrap(), + ) + .unwrap(), ) } }; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index a2136f02271..e6f0d411193 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -184,7 +184,7 @@ impl HostExports { let entity = state .entity_cache - .make_entity(data.into_iter().map(|(key, value)| (key, value))); + .make_entity(data.into_iter().map(|(key, value)| (key, value)))?; state.entity_cache.set(key, entity)?; diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 9f03e5e2792..ddad71e2514 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -220,7 +220,7 @@ query Query ($id: String) {{ map }; - Ok(Some(schema.make_entity(map))) + Ok(Some(schema.make_entity(map)?)) } } @@ -368,18 +368,20 @@ mod tests { assert_eq!( entity.unwrap(), - schema.make_entity(vec![ - ("id".into(), Value::String("0x00".to_string())), - ( - "owner".into(), - Value::Bytes(scalar::Bytes::from_str("0x01").unwrap()) - ), - ("displayName".into(), Value::String("test".to_string())), - ( - "imageUrl".into(), - Value::String("http://example.com/image.png".to_string()) - ), - ]) + schema + .make_entity(vec![ + ("id".into(), Value::String("0x00".to_string())), + ( + "owner".into(), + Value::Bytes(scalar::Bytes::from_str("0x01").unwrap()) + ), + ("displayName".into(), Value::String("test".to_string())), + ( + "imageUrl".into(), + Value::String("http://example.com/image.png".to_string()) + ), + ]) + .unwrap() ); } } diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 6be99be3152..1edf00e6894 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -344,12 +344,12 @@ async fn insert_test_entities( entity! { is => __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, entity! { is => __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, entity! { is => __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, - ]; + ].into_iter().collect::>().unwrap(); let entities1 = vec![ entity! { is => __typename: "Musician", id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, entity! { is => __typename: "Musician", id: "m4", name: "Valerie", bands: Vec::::new() }, - ]; + ].into_iter().collect::>().unwrap(); async fn insert_at(entities: Vec, deployment: &DeploymentLocator, block_ptr: BlockPtr) { let insert_ops = entities.into_iter().map(|data| EntityOperation::Set { diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index bc53b23647c..562612e443d 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -338,7 +338,8 @@ fn make_user( weight: BigDecimal::from(weight), coffee: coffee, favorite_color: favorite_color - }; + } + .unwrap(); if let Some(drinks) = drinks { user.insert("drinks", drinks.into()); } diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index b840345be2c..a0a3f5577f2 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -100,7 +100,8 @@ fn insert_thing(conn: &PgConnection, layout: &Layout, id: &str, name: &str) { entity! { layout.input_schema => id: id, name: name - }, + } + .unwrap(), ); } @@ -371,29 +372,34 @@ fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Ent id: ROOT, name: "root", children: vec!["babe01", "babe02"] - }; + } + .unwrap(); let child1 = entity! { layout.input_schema => id: CHILD1, name: "child1", parent: "dead00", children: vec![GRANDCHILD1] - }; + } + .unwrap(); let child2 = entity! { layout.input_schema => id: CHILD2, name: "child2", parent: "dead00", children: vec![GRANDCHILD1] - }; + } + .unwrap(); let grand_child1 = entity! { layout.input_schema => id: GRANDCHILD1, name: "grandchild1", parent: CHILD1 - }; + } + .unwrap(); let grand_child2 = entity! { layout.input_schema => id: GRANDCHILD2, name: "grandchild2", parent: CHILD2 - }; + } + .unwrap(); insert_entity(conn, layout, "Thing", root.clone()); insert_entity(conn, layout, "Thing", child1.clone()); From 7dc748bb021f657579d85ec7378e02e71f0472e6 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 14:42:13 -0700 Subject: [PATCH 0160/2104] graph, store: Do not panic in Entity.insert when given an uninterned key --- graph/src/components/store/entity_cache.rs | 17 +++++++---------- graph/src/components/store/err.rs | 2 ++ graph/src/components/store/mod.rs | 17 ++++++++++++----- graph/src/data/store/mod.rs | 11 ++++++----- graph/src/util/intern.rs | 2 +- store/postgres/src/relational_queries.rs | 3 ++- store/test-store/tests/postgres/relational.rs | 2 +- store/test-store/tests/postgres/store.rs | 2 +- 8 files changed, 32 insertions(+), 24 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index e4adfe81811..f25c8d20646 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -9,7 +9,7 @@ use crate::prelude::ENV_VARS; use crate::schema::InputSchema; use crate::util::lfu_cache::LfuCache; -use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest}; +use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; /// The scope in which the `EntityCache` should perform a `get` operation pub enum GetScope { @@ -113,11 +113,7 @@ impl EntityCache { self.handler_updates.clear(); } - pub fn get( - &mut self, - key: &EntityKey, - scope: GetScope, - ) -> Result, s::QueryExecutionError> { + pub fn get(&mut self, key: &EntityKey, scope: GetScope) -> Result, StoreError> { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. let mut entity = match scope { @@ -133,10 +129,10 @@ impl EntityCache { }); if let Some(op) = self.updates.get(key).cloned() { - entity = op.apply_to(entity) + entity = op.apply_to(entity).map_err(|e| key.unknown_attribute(e))?; } if let Some(op) = self.handler_updates.get(key).cloned() { - entity = op.apply_to(entity) + entity = op.apply_to(entity).map_err(|e| key.unknown_attribute(e))?; } Ok(entity) } @@ -266,7 +262,7 @@ impl EntityCache { /// to the current state is actually needed. /// /// Also returns the updated `LfuCache`. - pub fn as_modifications(mut self) -> Result { + pub fn as_modifications(mut self) -> Result { assert!(!self.in_handler); // The first step is to make sure all entities being set are in `self.current`. @@ -305,7 +301,8 @@ impl EntityCache { // Entity may have been changed (Some(current), EntityOp::Update(updates)) => { let mut data = current.clone(); - data.merge_remove_null_fields(updates); + data.merge_remove_null_fields(updates) + .map_err(|e| key.unknown_attribute(e))?; self.current.insert(key.clone(), Some(data.clone())); if current != data { Some(Overwrite { key, data }) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index e20c1f9915b..53869bc4241 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -18,6 +18,8 @@ pub enum StoreError { UnknownField(String), #[error("unknown table '{0}'")] UnknownTable(String), + #[error("entity type '{0}' does not have an attribute '{0}'")] + UnknownAttribute(String, String), #[error("malformed directive '{0}'")] MalformedDirective(String), #[error("query execution failed: {0}")] diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index e84dc59a13b..4146be3d6d3 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -29,6 +29,7 @@ use crate::data::store::*; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::schema::InputSchema; +use crate::util::intern::{self, Error as InternError}; use crate::{constraint_violation, prelude::*}; /// The type name of an entity. This is the string that is used in the @@ -139,6 +140,12 @@ pub struct EntityKey { pub causality_region: CausalityRegion, } +impl EntityKey { + pub fn unknown_attribute(&self, err: intern::Error) -> StoreError { + StoreError::UnknownAttribute(self.entity_type.to_string(), err.not_interned()) + } +} + #[derive(Debug, Clone)] pub struct LoadRelatedRequest { /// Name of the entity type. @@ -1008,14 +1015,14 @@ enum EntityOp { } impl EntityOp { - fn apply_to(self, entity: Option) -> Option { + fn apply_to(self, entity: Option) -> Result, InternError> { use EntityOp::*; match (self, entity) { - (Remove, _) => None, - (Overwrite(new), _) | (Update(new), None) => Some(new), + (Remove, _) => Ok(None), + (Overwrite(new), _) | (Update(new), None) => Ok(Some(new)), (Update(updates), Some(mut entity)) => { - entity.merge_remove_null_fields(updates); - Some(entity) + entity.merge_remove_null_fields(updates)?; + Ok(Some(entity)) } } } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 986b41bcf61..63036d8f4c7 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -5,7 +5,7 @@ use crate::{ runtime::gas::{Gas, GasSizeOf}, schema::InputSchema, util::intern::AtomPool, - util::intern::{NullValue, Object}, + util::intern::{Error as InternError, NullValue, Object}, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; use anyhow::{anyhow, Error}; @@ -744,8 +744,8 @@ impl Entity { self.0.get(key) } - pub fn insert(&mut self, key: &str, value: Value) -> Option { - self.0.insert(key, value).expect("key is in AtomPool") + pub fn insert(&mut self, key: &str, value: Value) -> Result, InternError> { + self.0.insert(key, value) } pub fn remove(&mut self, key: &str) -> Option { @@ -797,13 +797,14 @@ impl Entity { /// If a key exists in both entities, the value from `update` is chosen. /// If a key only exists on one entity, the value from that entity is chosen. /// If a key is set to `Value::Null` in `update`, the key/value pair is removed. - pub fn merge_remove_null_fields(&mut self, update: Entity) { + pub fn merge_remove_null_fields(&mut self, update: Entity) -> Result<(), InternError> { for (key, value) in update.0.into_iter() { match value { Value::Null => self.remove(&key), - _ => self.insert(&key, value), + _ => self.insert(&key, value)?, }; } + Ok(()) } /// Remove all entries with value `Value::Null` from `self` diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index cefab46a91a..1c039a73fa0 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -52,7 +52,7 @@ pub enum Error { } impl Error { - pub fn not_interned(&self) -> &str { + pub fn not_interned(self) -> String { match self { Error::NotInterned(s) => s, } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index d2d3417d5b3..519701b8a1e 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1762,7 +1762,8 @@ impl<'a> InsertQuery<'a> { if !fulltext_field_values.is_empty() { entity .to_mut() - .insert(&column.field, Value::List(fulltext_field_values)); + .insert(&column.field, Value::List(fulltext_field_values)) + .map_err(|e| entity_key.unknown_attribute(e))?; } } if !column.is_nullable() && !entity.contains_key(&column.field) { diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 562612e443d..056c28357fa 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -341,7 +341,7 @@ fn make_user( } .unwrap(); if let Some(drinks) = drinks { - user.insert("drinks", drinks.into()); + user.insert("drinks", drinks.into()).unwrap(); } user } diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 05783fcaf4e..7326de0c1ca 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -446,7 +446,7 @@ fn update_existing() { _ => unreachable!(), }; - new_data.insert("bin_name", Value::Bytes(bin_name)); + new_data.insert("bin_name", Value::Bytes(bin_name)).unwrap(); assert_eq!(writable.get(&entity_key).unwrap(), Some(new_data)); }) } From 5a065c1f285f81770f17ed86a30623fdd36c06a4 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 14:47:39 -0700 Subject: [PATCH 0161/2104] graph, store: Do not panic in Entity::set when given an uninterned key --- graph/src/components/store/entity_cache.rs | 3 ++- graph/src/data/store/mod.rs | 22 ++++++++++------ graph/src/schema/ast.rs | 18 ++++++++----- store/test-store/tests/graph/entity_cache.rs | 2 +- store/test-store/tests/postgres/graft.rs | 2 +- store/test-store/tests/postgres/relational.rs | 26 +++++++++---------- .../tests/postgres/relational_bytes.rs | 4 +-- 7 files changed, 44 insertions(+), 33 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index f25c8d20646..4280cc0ba65 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -192,7 +192,8 @@ impl EntityCache { } None => { let value = self.schema.id_value(&key)?; - entity.set("id", value); + // unwrap: our AtomPool always has an id in it + entity.set("id", value).unwrap(); } } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 63036d8f4c7..d05f2193212 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -777,10 +777,12 @@ impl Entity { } /// Convenience method to save having to `.into()` the arguments. - pub fn set(&mut self, name: &str, value: impl Into) -> Option { - self.0 - .insert(name, value.into()) - .expect("key is in AtomPool") + pub fn set( + &mut self, + name: &str, + value: impl Into, + ) -> Result, InternError> { + self.0.insert(name, value.into()) } /// Merges an entity update `update` into this entity. @@ -1036,7 +1038,9 @@ fn entity_validation() { } let mut thing = make_thing("t1"); - thing.set("things", Value::from(vec!["thing1", "thing2"])); + thing + .set("things", Value::from(vec!["thing1", "thing2"])) + .unwrap(); check(thing, ""); let thing = make_thing("t2"); @@ -1057,7 +1061,7 @@ fn entity_validation() { ); let mut thing = make_thing("t5"); - thing.set("name", Value::Int(32)); + thing.set("name", Value::Int(32)).unwrap(); check( thing, "Entity Thing[t5]: the value `32` for field `name` must \ @@ -1065,7 +1069,9 @@ fn entity_validation() { ); let mut thing = make_thing("t6"); - thing.set("things", Value::List(vec!["thing1".into(), 17.into()])); + thing + .set("things", Value::List(vec!["thing1".into(), 17.into()])) + .unwrap(); check( thing, "Entity Thing[t6]: field `things` is of type [Thing!]!, \ @@ -1078,7 +1084,7 @@ fn entity_validation() { check(thing, ""); let mut thing = make_thing("t8"); - thing.set("cruft", "wat"); + thing.set("cruft", "wat").unwrap(); check( thing, "Entity Thing[t8]: field `cruft` is derived and can not be set", diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 4d5635bece7..550e43df7f4 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -457,7 +457,9 @@ fn entity_validation() { } let mut thing = make_thing("t1"); - thing.set("things", store::Value::from(vec!["thing1", "thing2"])); + thing + .set("things", store::Value::from(vec!["thing1", "thing2"])) + .unwrap(); check(thing, ""); let thing = make_thing("t2"); @@ -478,7 +480,7 @@ fn entity_validation() { ); let mut thing = make_thing("t5"); - thing.set("name", store::Value::Int(32)); + thing.set("name", store::Value::Int(32)).unwrap(); check( thing, "Entity Thing[t5]: the value `32` for field `name` must \ @@ -486,10 +488,12 @@ fn entity_validation() { ); let mut thing = make_thing("t6"); - thing.set( - "things", - store::Value::List(vec!["thing1".into(), 17.into()]), - ); + thing + .set( + "things", + store::Value::List(vec!["thing1".into(), 17.into()]), + ) + .unwrap(); check( thing, "Entity Thing[t6]: field `things` is of type [Thing!]!, \ @@ -502,7 +506,7 @@ fn entity_validation() { check(thing, ""); let mut thing = make_thing("t8"); - thing.set("cruft", "wat"); + thing.set("cruft", "wat").unwrap(); check( thing, "Entity Thing[t8]: field `cruft` is derived and can not be set", diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 4859c953658..f9784464669 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -727,7 +727,7 @@ fn scoped_get() { assert_eq!(None, act1); // But if it gets updated, it becomes visible with either scope let mut wallet1 = wallet1; - wallet1.set("balance", 70); + wallet1.set("balance", 70).unwrap(); cache.set(key1.clone(), wallet1.clone()).unwrap(); let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); assert_eq!(Some(&wallet1), act1.as_ref()); diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 8f6a9f7deff..7194cb7ab15 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -316,7 +316,7 @@ async fn check_graft( assert_eq!(Some(&Value::from("queensha@email.com")), shaq.get("email")); // Make our own entries for block 2 - shaq.set("email", "shaq@gmail.com"); + shaq.set("email", "shaq@gmail.com").unwrap(); let op = EntityOperation::Set { key: EntityKey::data(USER.to_owned(), "3"), data: shaq, diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 056c28357fa..2f5dbb599d0 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -580,9 +580,9 @@ fn update() { // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); - entity.set("string", "updated"); + entity.set("string", "updated").unwrap(); entity.remove("strings"); - entity.set("bool", Value::Null); + entity.set("bool", Value::Null).unwrap(); let key = EntityKey::data("Scalar".to_owned(), entity.id().unwrap()); let entity_type = EntityType::from("Scalar"); @@ -608,9 +608,9 @@ fn update_many() { run_test(|conn, layout| { let mut one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); - two.set("id", "two"); + two.set("id", "two").unwrap(); let mut three = SCALAR_ENTITY.clone(); - three.set("id", "three"); + three.set("id", "three").unwrap(); insert_entity( conn, layout, @@ -622,15 +622,15 @@ fn update_many() { assert_eq!(3, count_scalar_entities(conn, layout)); // update with overwrite - one.set("string", "updated"); + one.set("string", "updated").unwrap(); one.remove("strings"); - two.set("string", "updated too"); - two.set("bool", false); + two.set("string", "updated too").unwrap(); + two.set("bool", false).unwrap(); - three.set("string", "updated in a different way"); + three.set("string", "updated in a different way").unwrap(); three.remove("strings"); - three.set("color", "red"); + three.set("color", "red").unwrap(); // generate keys let entity_type = EntityType::from("Scalar"); @@ -708,7 +708,7 @@ fn serialize_bigdecimal() { for d in &["50", "50.00", "5000", "0.5000", "0.050", "0.5", "0.05"] { let d = BigDecimal::from_str(d).unwrap(); - entity.set("bigDecimal", d); + entity.set("bigDecimal", d).unwrap(); let key = EntityKey::data("Scalar".to_owned(), entity.id().unwrap().clone()); let entity_type = EntityType::from("Scalar"); @@ -757,7 +757,7 @@ fn delete() { run_test(|conn, layout| { insert_entity(conn, layout, "Scalar", vec![SCALAR_ENTITY.clone()]); let mut two = SCALAR_ENTITY.clone(); - two.set("id", "two"); + two.set("id", "two").unwrap(); insert_entity(conn, layout, "Scalar", vec![two]); // Delete where nothing is getting deleted @@ -789,9 +789,9 @@ fn insert_many_and_delete_many() { run_test(|conn, layout| { let one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); - two.set("id", "two"); + two.set("id", "two").unwrap(); let mut three = SCALAR_ENTITY.clone(); - three.set("id", "three"); + three.set("id", "three").unwrap(); insert_entity(conn, layout, "Scalar", vec![one, two, three]); // confidence test: there should be 3 scalar entities in store right now diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index a0a3f5577f2..7de87497f5a 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -295,7 +295,7 @@ fn update() { // Update the entity let mut entity = BEEF_ENTITY.clone(); - entity.set("name", "Moo"); + entity.set("name", "Moo").unwrap(); let key = EntityKey::data("Thing".to_owned(), entity.id().unwrap()); let entity_id = entity.id().unwrap(); @@ -325,7 +325,7 @@ fn delete() { insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()); let mut two = BEEF_ENTITY.clone(); - two.set("id", TWO_ID); + two.set("id", TWO_ID).unwrap(); insert_entity(conn, layout, "Thing", two); // Delete where nothing is getting deleted From 58811ae102325164b4da4b9b2d4731fbf0bcf8af Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 13 Apr 2023 15:27:01 -0700 Subject: [PATCH 0162/2104] graph, store: Do not panic in Entity::try_make on uninterned keys --- graph/src/data/store/mod.rs | 7 ++++--- graph/src/schema/input_schema.rs | 8 +++++++- store/postgres/src/relational_queries.rs | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index d05f2193212..62aa3298eda 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -728,14 +728,15 @@ impl Entity { Ok(Entity(obj)) } - pub fn try_make>( + pub fn try_make>( pool: Arc, iter: I, - ) -> Result { + ) -> Result { let mut obj = Object::new(pool); for pair in iter { let (key, value) = pair?; - obj.insert(key, value).expect("key is in AtomPool"); + obj.insert(key, value) + .map_err(|e| anyhow!("unknown attribute {}", e.not_interned()))?; } Ok(Entity(obj)) } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 5411fe02ce0..4eeea3c4cbd 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -287,7 +287,13 @@ impl Inner { Entity::make(self.pool.clone(), iter) } - pub fn try_make_entity>(&self, iter: I) -> Result { + pub fn try_make_entity< + E: std::error::Error + Send + Sync + 'static, + I: TryIntoEntityIterator, + >( + &self, + iter: I, + ) -> Result { Entity::try_make(self.pool.clone(), iter) } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 519701b8a1e..dfe170f5aac 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -254,7 +254,7 @@ impl FromEntityData for Entity { schema: &InputSchema, iter: I, ) -> Result { - schema.try_make_entity(iter) + schema.try_make_entity(iter).map_err(StoreError::from) } } From 8a620be0e53f837687ce82402b2063c26a2c43ba Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 20 Apr 2023 11:01:05 -0700 Subject: [PATCH 0163/2104] graph: Reduce size of Atom to a u16 With the current implementation, it doesn't save much memory compared to u32, but it makes sure we can fit all atoms into a u16, and enables a few more memory optimizations. --- graph/src/util/intern.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 1c039a73fa0..f7d0ee4b728 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -18,9 +18,9 @@ use crate::runtime::gas::{Gas, GasSizeOf}; use super::cache_weight::CacheWeight; -// We could probably get away with a `u16` here, but unless we improve the -// layout of `Object`, there's little point in that -type AtomInt = u32; +// An `Atom` is really just an integer value of this type. The size of the +// type determines how many atoms a pool (and all its parents) can hold. +type AtomInt = u16; /// An atom in a pool. To look up the underlying string, surrounding code /// needs to know the pool for it. From d14c1558fd6005a45dd081a00b2b335d4e64e225 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 21 Apr 2023 09:35:09 -0700 Subject: [PATCH 0164/2104] graph: Address review comments --- graph/src/schema/api.rs | 4 ++ graph/src/schema/input_schema.rs | 63 ++++++++++++++++++-------------- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index e3022553bdf..7b38c92e564 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -73,6 +73,10 @@ impl TryFrom<&r::Value> for ErrorPolicy { } } +/// The schema used for responding to queries. It is generated from an +/// `InputSchema` by calling `api_schema` on it. Code that handles GraphQL +/// queries against a subgraph should use an `ApiSchema` to access the +/// underlying GraphQL schema #[derive(Debug)] pub struct ApiSchema { schema: Schema, diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 4eeea3c4cbd..1dcd5c2dd2a 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -18,6 +18,11 @@ use crate::util::intern::AtomPool; use super::fulltext::FulltextDefinition; use super::{ApiSchema, Schema, SchemaValidationError}; +/// The internal representation of a subgraph schema, i.e., the +/// `schema.graphql` file that is part of a subgraph. Any code that deals +/// with writing a subgraph should use this struct. Code that deals with +/// querying subgraphs will instead want to use an `ApiSchema` which can be +/// generated with the `api_schema` method on `InputSchema` #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { inner: Arc, @@ -30,14 +35,6 @@ pub struct Inner { pool: Arc, } -impl std::ops::Deref for InputSchema { - type Target = Inner; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - impl CheapClone for InputSchema { fn cheap_clone(&self) -> Self { InputSchema { @@ -68,22 +65,29 @@ impl InputSchema { } } + /// Create a new `InputSchema` from the GraphQL document that resulted + /// from parsing a subgraph's `schema.graphql`. The document must have + /// already been validated. pub fn new(id: DeploymentHash, document: s::Document) -> Result { let schema = Schema::new(id, document)?; Ok(Self::create(schema)) } + /// A convenience function for creating an `InputSchema` from the string + /// representation of the subgraph's GraphQL schema `raw` and its + /// deployment hash `id`. An `InputSchema` that is constructed with this + /// function still has to be validated after construction. pub fn parse(raw: &str, id: DeploymentHash) -> Result { let schema = Schema::parse(raw, id)?; Ok(Self::create(schema)) } -} -impl Inner { + /// Generate the `ApiSchema` for use with GraphQL queries for this + /// `InputSchema` pub fn api_schema(&self) -> Result { - let mut schema = self.schema.clone(); - schema.document = api_schema(&self.schema.document)?; + let mut schema = self.inner.schema.clone(); + schema.document = api_schema(&self.inner.schema.document)?; schema.add_subgraph_id_directives(schema.id.clone()); ApiSchema::from_api_schema(schema) } @@ -105,6 +109,7 @@ impl Inner { /// This function will return the type "Wallet" with the field "account" pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &s::Field), Error> { let field = self + .inner .schema .document .get_object_type_definition(key.entity_type.as_str()) @@ -131,6 +136,7 @@ impl Inner { let field_name = derived_from.argument("field").unwrap(); let field = self + .inner .schema .document .get_object_type_definition(base_type) @@ -166,6 +172,7 @@ impl Inner { /// Construct a value for the entity type's id attribute pub fn id_value(&self, key: &EntityKey) -> Result { let base_type = self + .inner .schema .document .get_object_type_definition(key.entity_type.as_str()) @@ -198,21 +205,23 @@ impl Inner { } pub fn is_immutable(&self, entity_type: &EntityType) -> bool { - self.immutable_types.contains(entity_type) + self.inner.immutable_types.contains(entity_type) } pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { - self.schema.document.get_named_type(name) + self.inner.schema.document.get_named_type(name) } pub fn types_for_interface(&self, intf: &s::InterfaceType) -> Option<&Vec> { - self.schema + self.inner + .schema .types_for_interface .get(&EntityType::new(intf.name.clone())) } pub fn find_object_type(&self, entity_type: &EntityType) -> Option<&s::ObjectType> { - self.schema + self.inner + .schema .document .definitions .iter() @@ -224,22 +233,22 @@ impl Inner { } pub fn get_enum_definitions(&self) -> Vec<&s::EnumType> { - self.schema.document.get_enum_definitions() + self.inner.schema.document.get_enum_definitions() } pub fn get_object_type_definitions(&self) -> Vec<&s::ObjectType> { - self.schema.document.get_object_type_definitions() + self.inner.schema.document.get_object_type_definitions() } pub fn interface_types(&self) -> &BTreeMap> { - &self.schema.types_for_interface + &self.inner.schema.types_for_interface } pub fn entity_fulltext_definitions( &self, entity: &str, ) -> Result, anyhow::Error> { - Self::fulltext_definitions(&self.schema.document, entity) + Self::fulltext_definitions(&self.inner.schema.document, entity) } fn fulltext_definitions( @@ -268,23 +277,23 @@ impl Inner { } pub fn id(&self) -> &DeploymentHash { - &self.schema.id + &self.inner.schema.id } pub fn document_string(&self) -> String { - self.schema.document.to_string() + self.inner.schema.document.to_string() } pub fn get_fulltext_directives(&self) -> Result, Error> { - self.schema.document.get_fulltext_directives() + self.inner.schema.document.get_fulltext_directives() } pub(crate) fn validate(&self) -> Result<(), Vec> { - self.schema.validate() + self.inner.schema.validate() } pub fn make_entity(&self, iter: I) -> Result { - Entity::make(self.pool.clone(), iter) + Entity::make(self.inner.pool.clone(), iter) } pub fn try_make_entity< @@ -294,7 +303,7 @@ impl Inner { &self, iter: I, ) -> Result { - Entity::try_make(self.pool.clone(), iter) + Entity::try_make(self.inner.pool.clone(), iter) } } @@ -348,7 +357,7 @@ fn atom_pool(document: &s::Document) -> AtomPool { } for object_type in document.get_object_type_definitions() { - for defn in Inner::fulltext_definitions(&document, &object_type.name).unwrap() { + for defn in InputSchema::fulltext_definitions(&document, &object_type.name).unwrap() { pool.intern(defn.name.as_str()); } } From b94ba9d48046aed93d81546e897b9fe7e1b65062 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 09:26:50 -0700 Subject: [PATCH 0165/2104] core, graph, store: Restart writable when restarting subgraph When the subgraph runner encounters an error, it needs to restart the store to clear any errors that might have happened. --- core/src/subgraph/inputs.rs | 39 +++++++++++++ core/src/subgraph/runner.rs | 14 ++++- graph/src/components/store/traits.rs | 11 ++++ store/postgres/src/writable.rs | 15 ++++- store/test-store/tests/graph/entity_cache.rs | 4 ++ store/test-store/tests/postgres/writable.rs | 58 +++++++++++++++++++- 6 files changed, 136 insertions(+), 5 deletions(-) diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 11b35352f85..060c698fc19 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -33,3 +33,42 @@ pub struct IndexingInputs { /// possibly expensive and noisy, information pub instrument: bool, } + +impl IndexingInputs { + pub fn with_store(&self, store: Arc) -> Self { + let IndexingInputs { + deployment, + features, + start_blocks, + stop_block, + store: _, + debug_fork, + triggers_adapter, + chain, + templates, + unified_api_version, + static_filters, + poi_version, + network, + manifest_idx_and_name, + instrument, + } = self; + IndexingInputs { + deployment: deployment.clone(), + features: features.clone(), + start_blocks: start_blocks.clone(), + stop_block: stop_block.clone(), + store, + debug_fork: debug_fork.clone(), + triggers_adapter: triggers_adapter.clone(), + chain: chain.clone(), + templates: templates.clone(), + unified_api_version: unified_api_version.clone(), + static_filters: *static_filters, + poi_version: *poi_version, + network: network.clone(), + manifest_idx_and_name: manifest_idx_and_name.clone(), + instrument: *instrument, + } + } +} diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 3d88e5b3036..db2d37bd58b 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -179,7 +179,19 @@ where self.inputs.store.flush().await?; return Ok(self); } - Action::Restart => break, + Action::Restart => { + // Restart the store to clear any errors that it + // might have encountered and use that from now on + let store = self.inputs.store.cheap_clone(); + let store = store.restart().await?; + self.inputs = Arc::new(self.inputs.with_store(store)); + // Also clear the entity cache since we might have + // entries in there that never made it to the + // database + self.state.entity_lfu_cache = LfuCache::new(); + self.state.synced = self.inputs.store.is_deployment_synced().await?; + break; + } }; } } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 8ca56a09c84..141729253ce 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -324,6 +324,17 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { /// Wait for the background writer to finish processing its queue async fn flush(&self) -> Result<(), StoreError>; + + /// Restart the `WritableStore`. This will clear any errors that have + /// been encountered. Code that calls this must not make any assumptions + /// about what has been written already, as the write queue might + /// contain unprocessed write requests that will be discarded by this + /// call. + /// + /// After this call, `self` should not be used anymore, as it will + /// continue to produce errors for any write requests, and instead, the + /// returned `WritableStore` should be used. + async fn restart(self: Arc) -> Result, StoreError>; } #[async_trait] diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 1b03dd60fd6..8f4a6170f91 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -13,7 +13,7 @@ use graph::prelude::{ BLOCK_NUMBER_MAX, }; use graph::schema::InputSchema; -use graph::slog::info; +use graph::slog::{info, warn}; use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ @@ -1313,4 +1313,17 @@ impl WritableStoreTrait for WritableStore { async fn flush(&self) -> Result<(), StoreError> { self.writer.flush().await } + + async fn restart(self: Arc) -> Result, StoreError> { + if self.poisoned() { + let logger = self.store.logger.clone(); + if let Err(e) = self.stop().await { + warn!(logger, "Writable had error when stopping, it is safe to ignore this error"; "error" => e.to_string()); + } + let store = Arc::new(self.store.store.0.clone()); + store.writable(logger, self.store.site.id.into()).await + } else { + Ok(self) + } + } } diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index f9784464669..e66764b326c 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -169,6 +169,10 @@ impl WritableStore for MockStore { async fn causality_region_curr_val(&self) -> Result, StoreError> { unimplemented!() } + + async fn restart(self: Arc) -> Result, StoreError> { + unimplemented!() + } } fn make_band_key(id: &'static str) -> EntityKey { diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 6f5364d2935..c07c3f2f888 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -16,7 +16,7 @@ use web3::types::H256; const SCHEMA_GQL: &str = " type Counter @entity { id: ID!, - count: Int, + count: Int!, } "; @@ -93,8 +93,7 @@ where // Run test and wait for the background writer to finish its work so // it won't conflict with the next test - test(store, writable.clone(), deployment).await; - writable.flush().await.unwrap(); + test(store, writable, deployment).await; }); } @@ -161,5 +160,58 @@ fn tracker() { resume_writer(&deployment, 1).await; assert_eq!(2, read_count()); + + // There shouldn't be anything left to do, but make sure of that + writable.flush().await.unwrap(); + }) +} + +#[test] +fn restart() { + run_test(|store, writable, deployment| async move { + let subgraph_store = store.subgraph_store(); + + // Cause an error by leaving out the non-nullable `count` attribute + let entity_ops = vec![EntityOperation::Set { + key: count_key("1"), + data: entity! { id: "1" }, + }]; + transact_entity_operations( + &subgraph_store, + &deployment, + block_pointer(1), + entity_ops.clone(), + ) + .await + .unwrap(); + // flush checks for errors and therefore fails + writable + .flush() + .await + .expect_err("writing with missing non-nullable field should fail"); + + // We now have a poisoned store. Restarting it gives us a new store + // that works again + let writable = writable.restart().await.unwrap(); + writable.flush().await.unwrap(); + + // Retry our write with correct data + let entity_ops = vec![EntityOperation::Set { + key: count_key("1"), + data: entity! { id: "1", count: 1 }, + }]; + // `SubgraphStore` caches the correct writable so that this call + // uses the restarted writable, and is equivalent to using + // `writable` directly + transact_entity_operations( + &subgraph_store, + &deployment, + block_pointer(1), + entity_ops.clone(), + ) + .await + .unwrap(); + // Look, no errors + writable.flush().await.unwrap(); }) } From 9ac5a435f067ff05547e1d1c9659cb737b6838df Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 11:10:24 -0700 Subject: [PATCH 0166/2104] core: Simplify the return type of SubgraphRunner.run --- core/src/subgraph/runner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index db2d37bd58b..50bd7ea0272 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -105,8 +105,8 @@ where self.run_inner(break_on_restart).await } - pub async fn run(self) -> Result { - self.run_inner(false).await + pub async fn run(self) -> Result<(), Error> { + self.run_inner(false).await.map(|_| ()) } async fn run_inner(mut self, break_on_restart: bool) -> Result { From 2574b37d8ecf7ef0e2fa5a0fe55e4a03f77e68e6 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 11 Apr 2023 11:13:05 -0700 Subject: [PATCH 0167/2104] store: Improve errors from Writer.check_queue_running --- store/postgres/src/writable.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 8f4a6170f91..8b0b3ffae9d 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -961,6 +961,8 @@ impl Writer { match self { Writer::Sync(_) => Ok(()), Writer::Async { join_handle, queue } => { + // If there was an error, report that instead of a naked 'writer not running' + queue.check_err()?; if join_handle.is_finished() { Err(constraint_violation!( "Subgraph writer for {} is not running", From 400c24de72568276e42bb940657fce62ebe9c422 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 14 Apr 2023 10:57:11 -0700 Subject: [PATCH 0168/2104] core, graph, store: Reset runner state only when needed We only need to reset the state of the runner when the `WritableStore` actually had to be restarted because of an error; if it had an error, we have to reset the state. Use `SubgraphRunner.revert_state` to properly reset the runner state. --- core/src/subgraph/runner.rs | 14 +++++++------- graph/src/components/store/traits.rs | 9 +++++---- store/postgres/src/writable.rs | 9 ++++++--- store/test-store/tests/graph/entity_cache.rs | 2 +- store/test-store/tests/postgres/writable.rs | 2 +- 5 files changed, 20 insertions(+), 16 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 50bd7ea0272..d50543d27f6 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -183,13 +183,13 @@ where // Restart the store to clear any errors that it // might have encountered and use that from now on let store = self.inputs.store.cheap_clone(); - let store = store.restart().await?; - self.inputs = Arc::new(self.inputs.with_store(store)); - // Also clear the entity cache since we might have - // entries in there that never made it to the - // database - self.state.entity_lfu_cache = LfuCache::new(); - self.state.synced = self.inputs.store.is_deployment_synced().await?; + if let Some(store) = store.restart().await? { + let last_good_block = + store.block_ptr().map(|ptr| ptr.number).unwrap_or(0); + self.revert_state(last_good_block)?; + self.inputs = Arc::new(self.inputs.with_store(store)); + self.state.synced = self.inputs.store.is_deployment_synced().await?; + } break; } }; diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 141729253ce..a48cc7b9f5f 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -331,10 +331,11 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { /// contain unprocessed write requests that will be discarded by this /// call. /// - /// After this call, `self` should not be used anymore, as it will - /// continue to produce errors for any write requests, and instead, the - /// returned `WritableStore` should be used. - async fn restart(self: Arc) -> Result, StoreError>; + /// This call returns `None` if a restart was not needed because `self` + /// had no errors. If it returns `Some`, `self` should not be used + /// anymore, as it will continue to produce errors for any write + /// requests, and instead, the returned `WritableStore` should be used. + async fn restart(self: Arc) -> Result>, StoreError>; } #[async_trait] diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 8b0b3ffae9d..69c352f7f2e 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1316,16 +1316,19 @@ impl WritableStoreTrait for WritableStore { self.writer.flush().await } - async fn restart(self: Arc) -> Result, StoreError> { + async fn restart(self: Arc) -> Result>, StoreError> { if self.poisoned() { let logger = self.store.logger.clone(); if let Err(e) = self.stop().await { warn!(logger, "Writable had error when stopping, it is safe to ignore this error"; "error" => e.to_string()); } let store = Arc::new(self.store.store.0.clone()); - store.writable(logger, self.store.site.id.into()).await + store + .writable(logger, self.store.site.id.into()) + .await + .map(|store| Some(store)) } else { - Ok(self) + Ok(None) } } } diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index e66764b326c..5cb9fca0f36 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -170,7 +170,7 @@ impl WritableStore for MockStore { unimplemented!() } - async fn restart(self: Arc) -> Result, StoreError> { + async fn restart(self: Arc) -> Result>, StoreError> { unimplemented!() } } diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index c07c3f2f888..fca57ea42bc 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -192,7 +192,7 @@ fn restart() { // We now have a poisoned store. Restarting it gives us a new store // that works again - let writable = writable.restart().await.unwrap(); + let writable = writable.restart().await.unwrap().unwrap(); writable.flush().await.unwrap(); // Retry our write with correct data From 2ce75665aa7dc219af2a02222e0b240b0f781fd1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 25 Apr 2023 11:39:11 -0700 Subject: [PATCH 0169/2104] store: Warn when background writer hasn't stopped yet --- store/postgres/src/writable.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 69c352f7f2e..767d74a19a1 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1318,9 +1318,22 @@ impl WritableStoreTrait for WritableStore { async fn restart(self: Arc) -> Result>, StoreError> { if self.poisoned() { + // When the writer is poisoned, the background thread has + // finished since `start_writer` returns whenever it encounters + // an error. Just to make extra-sure, we log a warning if the + // join handle indicates that the writer hasn't stopped yet. let logger = self.store.logger.clone(); - if let Err(e) = self.stop().await { - warn!(logger, "Writable had error when stopping, it is safe to ignore this error"; "error" => e.to_string()); + match &self.writer { + Writer::Sync(_) => { /* can't happen, a sync writer never gets poisoned */ } + Writer::Async { join_handle, queue } => { + let err = match queue.check_err() { + Ok(()) => "error missing".to_string(), + Err(e) => e.to_string(), + }; + if !join_handle.is_finished() { + warn!(logger, "Writer was poisoned, but background thread didn't finish. Creating new writer regardless"; "error" => err); + } + } } let store = Arc::new(self.store.store.0.clone()); store From 9fea677d7d0a9f92f7fa0ef0fecfb98af6ceadd4 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 25 Apr 2023 12:08:07 -0700 Subject: [PATCH 0170/2104] core, graph: Log more details about the entity cache --- core/src/subgraph/runner.rs | 11 +++++++++-- graph/src/components/store/entity_cache.rs | 8 ++++++-- graph/src/util/lfu_cache.rs | 12 ++++++++++++ 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index d50543d27f6..9dc5e139ac6 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -215,8 +215,7 @@ where )); debug!(logger, "Start processing block"; - "triggers" => triggers.len(), - "cached_entities" => self.state.entity_lfu_cache.len()); + "triggers" => triggers.len()); let proof_of_indexing = if self.inputs.store.supports_proof_of_indexing().await? { Some(Arc::new(AtomicRefCell::new(ProofOfIndexing::new( @@ -400,12 +399,20 @@ where let ModificationsAndCache { modifications: mut mods, entity_lfu_cache: cache, + evict_stats, } = block_state .entity_cache .as_modifications() .map_err(|e| BlockProcessingError::Unknown(e.into()))?; section.end(); + debug!(self.logger, "Entity cache statistics"; + "weight" => evict_stats.new_weight, + "evicted_weight" => evict_stats.evicted_weight, + "count" => evict_stats.new_count, + "evicted_count" => evict_stats.evicted_count, + "stale_update" => evict_stats.stale_update); + // Check for offchain events and process them, including their entity modifications in the // set to be transacted. let offchain_events = self.ctx.offchain_monitor.ready_offchain_events()?; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 4280cc0ba65..aee54d4914b 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -7,7 +7,7 @@ use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOpe use crate::data::store::IntoEntityIterator; use crate::prelude::ENV_VARS; use crate::schema::InputSchema; -use crate::util::lfu_cache::LfuCache; +use crate::util::lfu_cache::{EvictStats, LfuCache}; use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; @@ -58,6 +58,7 @@ impl Debug for EntityCache { pub struct ModificationsAndCache { pub modifications: Vec, pub entity_lfu_cache: LfuCache>, + pub evict_stats: EvictStats, } impl EntityCache { @@ -332,11 +333,14 @@ impl EntityCache { mods.push(modification) } } - self.current.evict(ENV_VARS.mappings.entity_cache_size); + let evict_stats = self + .current + .evict_and_stats(ENV_VARS.mappings.entity_cache_size); Ok(ModificationsAndCache { modifications: mods, entity_lfu_cache: self.current, + evict_stats, }) } } diff --git a/graph/src/util/lfu_cache.rs b/graph/src/util/lfu_cache.rs index 55f252c8669..eac04f93134 100644 --- a/graph/src/util/lfu_cache.rs +++ b/graph/src/util/lfu_cache.rs @@ -187,6 +187,18 @@ impl self.queue.len() } + pub fn evict_and_stats(&mut self, max_weight: usize) -> EvictStats { + self.evict_with_period(max_weight, STALE_PERIOD) + .unwrap_or_else(|| EvictStats { + new_weight: self.total_weight, + evicted_weight: 0, + new_count: self.len(), + evicted_count: 0, + stale_update: false, + evict_time: Duration::from_millis(0), + }) + } + /// Same as `evict_with_period(max_weight, STALE_PERIOD)` pub fn evict(&mut self, max_weight: usize) -> Option { self.evict_with_period(max_weight, STALE_PERIOD) From 357517f4e38cd4abee05527316cc027bb5bae47f Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Wed, 26 Apr 2023 15:29:22 +0100 Subject: [PATCH 0171/2104] ipfs: Remove concurrency limit, rely on rate limit (#4570) * ipfs: Remove concurrency limit, rely on rate limit The concurrency limit, because it uses a semaphore, had unfortunate interactions with the use of `call_all` in the polling monitor. `CallAll` can internally have requests which are holding the semaphore, so it must be polled regularly to check on those requests. If the task holding the `CallAll` hangs on some other future, that can lead to a deadlock. In our case, it can hang in the `response_sender.send((id, response)).await`, if the channel is full and the subgraph runner takes a long time to check it. This could be fixed, but the footgun would remain so it seemed best to rely only on the rate limit which can't deadlock. * polling monitor: Log when not found --- core/src/polling_monitor/ipfs_service.rs | 5 ++--- core/src/polling_monitor/mod.rs | 2 ++ docs/environment-variables.md | 4 ++-- graph/src/env/mappings.rs | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/core/src/polling_monitor/ipfs_service.rs b/core/src/polling_monitor/ipfs_service.rs index 284d25063db..f9436b851ca 100644 --- a/core/src/polling_monitor/ipfs_service.rs +++ b/core/src/polling_monitor/ipfs_service.rs @@ -17,7 +17,7 @@ pub fn ipfs_service( client: IpfsClient, max_file_size: u64, timeout: Duration, - concurrency_and_rate_limit: u16, + rate_limit: u16, ) -> IpfsService { let ipfs = IpfsServiceInner { client, @@ -26,8 +26,7 @@ pub fn ipfs_service( }; let svc = ServiceBuilder::new() - .rate_limit(concurrency_and_rate_limit.into(), Duration::from_secs(1)) - .concurrency_limit(concurrency_and_rate_limit as usize) + .rate_limit(rate_limit.into(), Duration::from_secs(1)) .service_fn(move |req| ipfs.cheap_clone().call_inner(req)) .boxed(); diff --git a/core/src/polling_monitor/mod.rs b/core/src/polling_monitor/mod.rs index e50979d39f2..19f30f28cda 100644 --- a/core/src/polling_monitor/mod.rs +++ b/core/src/polling_monitor/mod.rs @@ -161,6 +161,8 @@ where // Object not found, push the id to the back of the queue. Ok((id, None)) => { + debug!(logger, "not found on polling"; "object_id" => id.to_string()); + metrics.not_found.inc(); queue.push_back(id); } diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 635abc040c5..83ee938f059 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -78,8 +78,8 @@ those. may use (in bytes, defaults to 256MB). - `GRAPH_MAX_IPFS_CACHE_SIZE`: maximum number of files cached (defaults to 50). - `GRAPH_MAX_IPFS_CACHE_FILE_SIZE`: maximum size of each cached file (in bytes, defaults to 1MiB). -- `GRAPH_IPFS_REQUEST_LIMIT`: Limits both concurrent and per second requests to IPFS for file data - sources. Defaults to 100. +- `GRAPH_IPFS_REQUEST_LIMIT`: Limits the number of requests per second to IPFS for file data sources. + Defaults to 100. ## GraphQL diff --git a/graph/src/env/mappings.rs b/graph/src/env/mappings.rs index bb3ee2c1d30..639199d94c6 100644 --- a/graph/src/env/mappings.rs +++ b/graph/src/env/mappings.rs @@ -49,7 +49,7 @@ pub struct EnvVarsMapping { /// bytes). Defaults to 256 MiB. pub max_ipfs_file_bytes: usize, - /// Limits both concurrent and per second requests to IPFS for file data sources. + /// Limits per second requests to IPFS for file data sources. /// /// Set by the environment variable `GRAPH_IPFS_REQUEST_LIMIT`. Defaults to 100. pub ipfs_request_limit: u16, From ce693d8893e9fa7cd5b17bdeffcae5a100b6cdb9 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 26 Apr 2023 16:14:25 +0100 Subject: [PATCH 0172/2104] json-rpc: Add history_blocks as parameter to create_subgraph_version (#4564) --- core/src/subgraph/registrar.rs | 12 +++++++++++- graph/src/components/subgraph/registrar.rs | 1 + graph/src/data/subgraph/schema.rs | 7 +++++++ node/src/main.rs | 1 + node/src/manager/commands/run.rs | 1 + server/json-rpc/src/lib.rs | 2 ++ store/postgres/src/deployment.rs | 3 ++- store/postgres/src/subgraph_store.rs | 3 +++ tests/src/fixture/mod.rs | 1 + 9 files changed, 29 insertions(+), 2 deletions(-) diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index b8d0f408e23..7f706fcd622 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -269,6 +269,7 @@ where debug_fork: Option, start_block_override: Option, graft_block_override: Option, + history_blocks: Option, ) -> Result { // We don't have a location for the subgraph yet; that will be // assigned when we deploy for real. For logging purposes, make up a @@ -311,6 +312,7 @@ where debug_fork, self.version_switching_mode, &self.resolver, + history_blocks, ) .await? } @@ -328,6 +330,7 @@ where debug_fork, self.version_switching_mode, &self.resolver, + history_blocks, ) .await? } @@ -345,6 +348,7 @@ where debug_fork, self.version_switching_mode, &self.resolver, + history_blocks, ) .await? } @@ -362,6 +366,7 @@ where debug_fork, self.version_switching_mode, &self.resolver, + history_blocks, ) .await? } @@ -379,6 +384,7 @@ where debug_fork, self.version_switching_mode, &self.resolver, + history_blocks, ) .await? } @@ -541,6 +547,7 @@ async fn create_subgraph_version( debug_fork: Option, version_switching_mode: SubgraphVersionSwitchingMode, resolver: &Arc, + history_blocks: Option, ) -> Result { let raw_string = serde_yaml::to_string(&raw).unwrap(); let unvalidated = UnvalidatedSubgraphManifest::::resolve( @@ -626,10 +633,13 @@ async fn create_subgraph_version( // Apply the subgraph versioning and deployment operations, // creating a new subgraph deployment if one doesn't exist. - let deployment = DeploymentCreate::new(raw_string, &manifest, start_block) + let mut deployment = DeploymentCreate::new(raw_string, &manifest, start_block) .graft(base_block) .debug(debug_fork) .entities_with_causality_region(needs_causality_region); + if let Some(history_blocks) = history_blocks { + deployment = deployment.with_history_blocks(history_blocks); + } deployment_store .create_subgraph_deployment( diff --git a/graph/src/components/subgraph/registrar.rs b/graph/src/components/subgraph/registrar.rs index cfb2c2ffa2c..8da173cd70d 100644 --- a/graph/src/components/subgraph/registrar.rs +++ b/graph/src/components/subgraph/registrar.rs @@ -44,6 +44,7 @@ pub trait SubgraphRegistrar: Send + Sync + 'static { debug_fork: Option, start_block_block: Option, graft_block_override: Option, + history_blocks: Option, ) -> Result; async fn remove_subgraph(&self, name: SubgraphName) -> Result<(), SubgraphRegistrarError>; diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index 185f8227a4f..9f617a6b761 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -108,6 +108,7 @@ pub struct DeploymentCreate { pub graft_base: Option, pub graft_block: Option, pub debug_fork: Option, + pub history_blocks: Option, } impl DeploymentCreate { @@ -122,9 +123,15 @@ impl DeploymentCreate { graft_base: None, graft_block: None, debug_fork: None, + history_blocks: None, } } + pub fn with_history_blocks(mut self, blocks: i32) -> Self { + self.history_blocks = Some(blocks); + self + } + pub fn graft(mut self, base: Option<(DeploymentHash, BlockPtr)>) -> Self { if let Some((subgraph, ptr)) = base { self.graft_base = Some(subgraph); diff --git a/node/src/main.rs b/node/src/main.rs index 9c38192f5ba..aad0c9f26e6 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -546,6 +546,7 @@ async fn main() { debug_fork, start_block, None, + None, ) .await } diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index fec254b1123..856484d082f 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -232,6 +232,7 @@ pub async fn run( None, None, None, + None, ) .await?; diff --git a/server/json-rpc/src/lib.rs b/server/json-rpc/src/lib.rs index c720905345e..0779a30b73d 100644 --- a/server/json-rpc/src/lib.rs +++ b/server/json-rpc/src/lib.rs @@ -123,6 +123,7 @@ impl ServerState { // startBlock, we'll use the one from the manifest. None, None, + params.history_blocks, ) .await { @@ -236,6 +237,7 @@ struct SubgraphDeployParams { ipfs_hash: DeploymentHash, node_id: Option, debug_fork: Option, + history_blocks: Option, } #[derive(Debug, Deserialize)] diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 03f8b16eb0c..8f38c24410f 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1050,6 +1050,7 @@ pub fn create_deployment( graft_base, graft_block, debug_fork, + history_blocks: history_blocks_override, } = deployment; let earliest_block_number = start_block.as_ref().map(|ptr| ptr.number).unwrap_or(0); let entities_with_causality_region = Vec::from_iter(entities_with_causality_region.into_iter()); @@ -1089,7 +1090,7 @@ pub fn create_deployment( m::start_block_number.eq(start_block.as_ref().map(|ptr| ptr.number)), m::raw_yaml.eq(raw_yaml), m::entities_with_causality_region.eq(entities_with_causality_region), - m::history_blocks.eq(history_blocks), + m::history_blocks.eq(history_blocks_override.unwrap_or(history_blocks)), ); if exists && replace { diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 7ae6c7ef71c..0529b1c9954 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -622,6 +622,8 @@ impl SubgraphStoreInner { ))); } + let history_blocks = deployment.manifest.history_blocks; + // Transmogrify the deployment into a new one let deployment = DeploymentCreate { manifest: deployment.manifest, @@ -629,6 +631,7 @@ impl SubgraphStoreInner { graft_base: Some(src.deployment.clone()), graft_block: Some(block), debug_fork: deployment.debug_fork, + history_blocks: Some(history_blocks), }; let graft_base = self.layout(&src.deployment)?; diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 7a803c3d341..e31d02dbf9d 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -422,6 +422,7 @@ pub async fn setup( None, None, graft_block, + None, ) .await .expect("failed to create subgraph version"); From aa6677a386755ebbd4012c88f2a63dfff889617f Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Wed, 26 Apr 2023 21:53:23 +0100 Subject: [PATCH 0173/2104] Limit stack depth of asc_get (#4576) --- chain/ethereum/src/runtime/abi.rs | 20 ++++---- chain/ethereum/src/runtime/runtime_adapter.rs | 4 +- graph/src/runtime/asc_heap.rs | 21 ++++++++- runtime/test/src/test.rs | 18 +++++++ .../api_version_0_0_5/recursion_limit.ts | 19 ++++++++ .../api_version_0_0_5/recursion_limit.wasm | Bin 0 -> 8673 bytes runtime/wasm/src/module/mod.rs | 15 +++++- runtime/wasm/src/to_from/external.rs | 44 ++++++++++-------- runtime/wasm/src/to_from/mod.rs | 17 +++++-- 9 files changed, 121 insertions(+), 37 deletions(-) create mode 100644 runtime/test/wasm_test/api_version_0_0_5/recursion_limit.ts create mode 100644 runtime/test/wasm_test/api_version_0_0_5/recursion_limit.wasm diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index 66862871c19..92a18f499da 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -138,13 +138,14 @@ impl FromAscObj for UnresolvedContractCall { asc_call: AscUnresolvedContractCall_0_0_4, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { Ok(UnresolvedContractCall { - contract_name: asc_get(heap, asc_call.contract_name, gas)?, - contract_address: asc_get(heap, asc_call.contract_address, gas)?, - function_name: asc_get(heap, asc_call.function_name, gas)?, - function_signature: Some(asc_get(heap, asc_call.function_signature, gas)?), - function_args: asc_get(heap, asc_call.function_args, gas)?, + contract_name: asc_get(heap, asc_call.contract_name, gas, depth)?, + contract_address: asc_get(heap, asc_call.contract_address, gas, depth)?, + function_name: asc_get(heap, asc_call.function_name, gas, depth)?, + function_signature: Some(asc_get(heap, asc_call.function_signature, gas, depth)?), + function_args: asc_get(heap, asc_call.function_args, gas, depth)?, }) } } @@ -163,13 +164,14 @@ impl FromAscObj for UnresolvedContractCall { asc_call: AscUnresolvedContractCall, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { Ok(UnresolvedContractCall { - contract_name: asc_get(heap, asc_call.contract_name, gas)?, - contract_address: asc_get(heap, asc_call.contract_address, gas)?, - function_name: asc_get(heap, asc_call.function_name, gas)?, + contract_name: asc_get(heap, asc_call.contract_name, gas, depth)?, + contract_address: asc_get(heap, asc_call.contract_address, gas, depth)?, + function_name: asc_get(heap, asc_call.function_name, gas, depth)?, function_signature: None, - function_args: asc_get(heap, asc_call.function_args, gas)?, + function_args: asc_get(heap, asc_call.function_args, gas, depth)?, }) } } diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 3a0c7f7e62a..e7f083c82a4 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -77,9 +77,9 @@ fn ethereum_call( // function signature; subgraphs using an apiVersion < 0.0.4 don't pass // the signature along with the call. let call: UnresolvedContractCall = if ctx.heap.api_version() >= Version::new(0, 0, 4) { - asc_get::<_, AscUnresolvedContractCall_0_0_4, _>(ctx.heap, wasm_ptr.into(), &ctx.gas)? + asc_get::<_, AscUnresolvedContractCall_0_0_4, _>(ctx.heap, wasm_ptr.into(), &ctx.gas, 0)? } else { - asc_get::<_, AscUnresolvedContractCall, _>(ctx.heap, wasm_ptr.into(), &ctx.gas)? + asc_get::<_, AscUnresolvedContractCall, _>(ctx.heap, wasm_ptr.into(), &ctx.gas, 0)? }; let result = eth_call( diff --git a/graph/src/runtime/asc_heap.rs b/graph/src/runtime/asc_heap.rs index 065af4f5200..bf31f7dc3f2 100644 --- a/graph/src/runtime/asc_heap.rs +++ b/graph/src/runtime/asc_heap.rs @@ -6,6 +6,11 @@ use super::{ gas::GasCounter, AscIndexId, AscPtr, AscType, DeterministicHostError, HostExportError, IndexForAscTypeId, }; + +// A 128 limit is plenty for any subgraph, while the `fn recursion_limit` test ensures it is not +// large enough to cause stack overflows. +const MAX_RECURSION_DEPTH: usize = 128; + /// A type that can read and write to the Asc heap. Call `asc_new` and `asc_get` /// for reading and writing Rust structs from and to Asc. /// @@ -95,12 +100,21 @@ pub fn asc_get( heap: &H, asc_ptr: AscPtr, gas: &GasCounter, + mut depth: usize, ) -> Result where C: AscType + AscIndexId, T: FromAscObj, { - T::from_asc_obj(asc_ptr.read_ptr(heap, gas)?, heap, gas) + depth += 1; + + if depth > MAX_RECURSION_DEPTH { + return Err(DeterministicHostError::Other(anyhow::anyhow!( + "recursion limit reached" + ))); + } + + T::from_asc_obj(asc_ptr.read_ptr(heap, gas)?, heap, gas, depth) } /// Type that can be converted to an Asc object of class `C`. @@ -133,10 +147,15 @@ impl ToAscObj for bool { } /// Type that can be converted from an Asc object of class `C`. +/// +/// ### Overflow protection +/// The `depth` parameter is used to prevent stack overflows, it measures how many `asc_get` calls +/// have been made. `from_asc_obj` does not need to increment the depth, only pass it through. pub trait FromAscObj: Sized { fn from_asc_obj( obj: C, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result; } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 08a83866f4c..ed6f02c7a60 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1157,3 +1157,21 @@ async fn test_boolean() { .is_err()); } } + +#[tokio::test] +async fn recursion_limit() { + let module = test_module_latest("RecursionLimit", "recursion_limit.wasm").await; + + // An error about 'unknown key' means the entity was fully read with no stack overflow. + module + .invoke_export1_val_void("recursionLimit", 128) + .unwrap_err() + .to_string() + .contains("Unknown key `foobar`"); + + assert!(module + .invoke_export1_val_void("recursionLimit", 129) + .unwrap_err() + .to_string() + .contains("recursion limit reached")); +} diff --git a/runtime/test/wasm_test/api_version_0_0_5/recursion_limit.ts b/runtime/test/wasm_test/api_version_0_0_5/recursion_limit.ts new file mode 100644 index 00000000000..0781475e234 --- /dev/null +++ b/runtime/test/wasm_test/api_version_0_0_5/recursion_limit.ts @@ -0,0 +1,19 @@ +export * from './common/global'; + +import { Entity, Value } from './common/types' + +declare namespace store { + function get(entity: string, id: string): Entity | null + function set(entity: string, id: string, data: Entity): void + function remove(entity: string, id: string): void +} + +export function recursionLimit(depth: i32): void { + let user = new Entity(); + var val = Value.fromI32(7); + for (let i = 0; i < depth; i++) { + val = Value.fromArray([val]); + } + user.set("foobar", val); + store.set("User", "user_id", user); +} \ No newline at end of file diff --git a/runtime/test/wasm_test/api_version_0_0_5/recursion_limit.wasm b/runtime/test/wasm_test/api_version_0_0_5/recursion_limit.wasm new file mode 100644 index 0000000000000000000000000000000000000000..c31b4bc8304ded43c3941c3b4267c7192281b491 GIT binary patch literal 8673 zcmeHNJ8T@s8J@@P-W~UrT#A&$m&n;Wk|HHO#D{5FmTeAYu0$y|9otH}P$%jn^Y}RN zxMU*0Fcl+66F2Ensz?(cE`n46QU(D6xJZ>QfPu8GT%-!1eE;n3JxI~C0mp!W95Met zkAMF8Uo-p9NUXHC%^72S$-O5I57|9_$PcLoC=aQFQ6IZ!p#emFhTx;53k{ow&@0A; zEbkoHrTf)d#GH;=d40dOw^7|$+uGRPh;n;TwN{?mD@RPq`8k_$%fiY`NNEt`oMkze zGLw^>3*IIKV;LbFA#KjNZQ;isGj2=HxbqwN!$Yo_8v{l)ra1vYKoYPB*aRE`83JFI zE_*uhH)53|er*I?0(k-+fp!8N1bhOW1kMoXBG66XEP-IJhT+gPw_n$m2uRNG~FRE?qJd(X7&dA3iMCWS^gOuIxlt&4BFh zY;?^m#C-!zeRH#M--V{Wh55K|Fzvg$QRg?5u1kF>L)F}watxX~Q%)QT90m&wzlMSYOE$Hu8bt)KdzM?Lxfb0qaw_t zx+QeRth47^CpRRX>rO^mO*)@S&?N9nsk<7p7lxDBsWeSiX`11nVXf1tvw6#Qn#>Kxhj`hNLIk+Mr#k)yxH0VHp?wNA!x$Z9?;xu3HGKv zd8=*X!CLjjO)n;>RYwOjmt@3)r z=G?WlS`=-p-7oEx<=f77dAnMBBABzb7a_>me9qzYU1DudC+F^Q`|u4GaK*JB>5hu- z(1&}wfly3|JJuqhP5u12tPr3f?H>eE@q(pf!B&>CZ}$h5vKDa`-vYf%D$I3V#fG_? zW8lFT1asf#)}dnhKhMDlti_^M`g5iOeZgnUWp0rNyvVgx34{P4R*`7bWzyEBi) zVin(lbp)g_0x~}XF`1uLFp8f8WPTfl{fhesDKdxrBLO1@w&Emv08-6w_n}WNTZ?(& zDqG1)-~d_I7*mesn{iEQ@m5NKAs$l*P>6)W(XvkKG}B5DS@of8JfxYjH?`aiGT@Y{ zKq9F!u?`>ZLasyoobMZ&HnDvq??rp1N2V z%Ez>RjJjjg1Nwpb%$|C!yI%9-n(k4ywstGWvtH5|&|Q0?oC z?*%TI6J&rMKEupgo#I}-m`n8B?cVVg< z*qRmZ-0XwM48fm=cs4Boh$Ya2B@lw0ioHOE5hfA|Ep0a>>@tFa`x(Cr^Ln5Z}`sx-)YcMBvUaGRyV0cckCFY;UlwQsc!MG>D(NdKDIqX>WdyH`RS*I}=TqSTI zaKt1uf_lovp@0ooz$U0*S;s|-$1RT5BxAHi1hz;ZjK^S8xvN=7hY7LM{%XvIEHNe+ zg(Fh#3Kp>{^>&up*+x50?R?tCIaqe44XYIf*K0O-{&+bBr5{*s(?I*dnHd10k`9+d*roRkA|-XY4|j&^+`IAXAb%U3J51*#Kc9Nsf##tA}K653n#?^qC6Yb$YosHW&~#`InSa& zoIB}YE}aHDADijbXTq&VXP)3$n1p@1MuEzrq4JrA$|98=g8QVhpK9gQ1ve^V5ESWP zTRJUPxpiiHeWp>lb>>Mbi*Kef6rx?1_^_*?kW3|mxLH!DCzUx<7t1JwL3lYGbkk|E zLaj49>NAZ(tus$j2>A>$4(!*jwP(JKxc)y6I+Ysd(D%n46eo0+e}5$YZX`{=S>MfS zr>{ct&^PjQ%F`)`*Vp`>y1$dS72zw{l(!>ow2)^Y?Vt?)Rv+DbHkI7e^zq`+CK6pD zQPS@N4OLf=Na2f4q&l;UuWx2IO;o6koZrD5a}97@VuExOYS@JgZ7byH^s%?%mW5WF z{+PVS9K$$oR}&^!ZCo-+b|0fFoyslz+$=9us`KXCtumuRCX0+HN~n}ex2b;nt&$h0ZyaNEl4)|M&ZCgunU z8)jDyz`Mz9lVZlETO3_WExPB@1;kqQ=t@sh(y*j~jEnnNhGHK%EUt|Xp`6O9+)Z=> zZ6Y~E{u#3ba*Thm{Bq(r>}m@QY?Q&BI5>uCY7KX7uneiCY#kS4G$u zN|O7L@?tv_c^FtQGlwH^={@oW;wX-*A#|}&lIvuAmo)Dw^EyZ)OF8sZf;R(S!lQVu zp|~iR2Y@^Mc*g*DTF>vtse@D-Qn)xY$V!+w%;TB#j^n;&!uZ(J$iM1OBT&=d#NOf+ ze>`~~fi04kkUzx%IwrCR8NA#mbNNkWk6U6GrHFz;=6IlIF3iNuCT1aV8<+01VF7E5 zP+U?pm{ZCh#V&+3gg-`iAc{-NpXhVl4MYuuI2&+0CiDgZ@hwe!W9eJ887Z_8_BfId zhRnY}euNOP3vm?wAg+e~5S&T=hQwyeL8_;S1%Fb1F7;Em!RPKjEw~lD{)c~x1zhoB zFWp0Vv5)Sdyx5O>D9}LY;nA#kA@m?laWM4Y>*7%8A;60&^fEv}=w*Qlp_c2#!nPq(4quCMbb9!fh(2a1o$59UZ8lK}z! e=tut( + heap: &H, + ptr: AscPtr, + gas: &GasCounter, +) -> Result +where + C: AscType + AscIndexId, + T: FromAscObj, +{ + graph::runtime::asc_get(heap, ptr, gas, 0) +} + pub trait IntoTrap { fn determinism_level(&self) -> DeterminismLevel; fn into_trap(self) -> Trap; diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index a2ec718ecf4..4f4ec4d01be 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -27,8 +27,9 @@ impl FromAscObj for web3::H160 { typed_array: Uint8Array, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let data = <[u8; 20]>::from_asc_obj(typed_array, heap, gas)?; + let data = <[u8; 20]>::from_asc_obj(typed_array, heap, gas, depth)?; Ok(Self(data)) } } @@ -38,8 +39,9 @@ impl FromAscObj for web3::H256 { typed_array: Uint8Array, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let data = <[u8; 32]>::from_asc_obj(typed_array, heap, gas)?; + let data = <[u8; 32]>::from_asc_obj(typed_array, heap, gas, depth)?; Ok(Self(data)) } } @@ -82,8 +84,9 @@ impl FromAscObj for BigInt { array_buffer: AscBigInt, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let bytes = >::from_asc_obj(array_buffer, heap, gas)?; + let bytes = >::from_asc_obj(array_buffer, heap, gas, depth)?; Ok(BigInt::from_signed_bytes_le(&bytes)) } } @@ -109,9 +112,10 @@ impl FromAscObj for BigDecimal { big_decimal: AscBigDecimal, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let digits: BigInt = asc_get(heap, big_decimal.digits, gas)?; - let exp: BigInt = asc_get(heap, big_decimal.exp, gas)?; + let digits: BigInt = asc_get(heap, big_decimal.digits, gas, depth)?; + let exp: BigInt = asc_get(heap, big_decimal.exp, gas, depth)?; let bytes = exp.to_signed_bytes_le(); let mut byte_array = if exp >= 0.into() { [0; 8] } else { [255; 8] }; @@ -188,6 +192,7 @@ impl FromAscObj> for ethabi::Token { asc_enum: AscEnum, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { use ethabi::Token; @@ -196,41 +201,41 @@ impl FromAscObj> for ethabi::Token { EthereumValueKind::Bool => Token::Bool(bool::from(payload)), EthereumValueKind::Address => { let ptr: AscPtr = AscPtr::from(payload); - Token::Address(asc_get(heap, ptr, gas)?) + Token::Address(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::FixedBytes => { let ptr: AscPtr = AscPtr::from(payload); - Token::FixedBytes(asc_get(heap, ptr, gas)?) + Token::FixedBytes(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::Bytes => { let ptr: AscPtr = AscPtr::from(payload); - Token::Bytes(asc_get(heap, ptr, gas)?) + Token::Bytes(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::Int => { let ptr: AscPtr = AscPtr::from(payload); - let n: BigInt = asc_get(heap, ptr, gas)?; + let n: BigInt = asc_get(heap, ptr, gas, depth)?; Token::Int(n.to_signed_u256()) } EthereumValueKind::Uint => { let ptr: AscPtr = AscPtr::from(payload); - let n: BigInt = asc_get(heap, ptr, gas)?; + let n: BigInt = asc_get(heap, ptr, gas, depth)?; Token::Uint(n.to_unsigned_u256()) } EthereumValueKind::String => { let ptr: AscPtr = AscPtr::from(payload); - Token::String(asc_get(heap, ptr, gas)?) + Token::String(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::FixedArray => { let ptr: AscEnumArray = AscPtr::from(payload); - Token::FixedArray(asc_get(heap, ptr, gas)?) + Token::FixedArray(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::Array => { let ptr: AscEnumArray = AscPtr::from(payload); - Token::Array(asc_get(heap, ptr, gas)?) + Token::Array(asc_get(heap, ptr, gas, depth)?) } EthereumValueKind::Tuple => { let ptr: AscEnumArray = AscPtr::from(payload); - Token::Tuple(asc_get(heap, ptr, gas)?) + Token::Tuple(asc_get(heap, ptr, gas, depth)?) } }) } @@ -241,6 +246,7 @@ impl FromAscObj> for store::Value { asc_enum: AscEnum, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { use self::store::Value; @@ -248,27 +254,27 @@ impl FromAscObj> for store::Value { Ok(match asc_enum.kind { StoreValueKind::String => { let ptr: AscPtr = AscPtr::from(payload); - Value::String(asc_get(heap, ptr, gas)?) + Value::String(asc_get(heap, ptr, gas, depth)?) } StoreValueKind::Int => Value::Int(i32::from(payload)), StoreValueKind::BigDecimal => { let ptr: AscPtr = AscPtr::from(payload); - Value::BigDecimal(asc_get(heap, ptr, gas)?) + Value::BigDecimal(asc_get(heap, ptr, gas, depth)?) } StoreValueKind::Bool => Value::Bool(bool::from(payload)), StoreValueKind::Array => { let ptr: AscEnumArray = AscPtr::from(payload); - Value::List(asc_get(heap, ptr, gas)?) + Value::List(asc_get(heap, ptr, gas, depth)?) } StoreValueKind::Null => Value::Null, StoreValueKind::Bytes => { let ptr: AscPtr = AscPtr::from(payload); - let array: Vec = asc_get(heap, ptr, gas)?; + let array: Vec = asc_get(heap, ptr, gas, depth)?; Value::Bytes(array.as_slice().into()) } StoreValueKind::BigInt => { let ptr: AscPtr = AscPtr::from(payload); - let array: Vec = asc_get(heap, ptr, gas)?; + let array: Vec = asc_get(heap, ptr, gas, depth)?; Value::BigInt(store::scalar::BigInt::from_signed_bytes_le(&array)) } }) diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 21e79ca3242..fb919a3979d 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -32,6 +32,7 @@ impl FromAscObj> for Vec { typed_array: TypedArray, heap: &H, gas: &GasCounter, + _depth: usize, ) -> Result { typed_array.to_vec(heap, gas) } @@ -42,6 +43,7 @@ impl FromAscObj> for typed_array: TypedArray, heap: &H, gas: &GasCounter, + _depth: usize, ) -> Result { let v = typed_array.to_vec(heap, gas)?; let array = <[T; LEN]>::try_from(v) @@ -88,6 +90,7 @@ impl FromAscObj for String { asc_string: AscString, _: &H, _gas: &GasCounter, + _depth: usize, ) -> Result { let mut string = String::from_utf16(asc_string.content()) .map_err(|e| DeterministicHostError::from(anyhow::Error::from(e)))?; @@ -105,8 +108,9 @@ impl FromAscObj for Word { asc_string: AscString, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let string = String::from_asc_obj(asc_string, heap, gas)?; + let string = String::from_asc_obj(asc_string, heap, gas, depth)?; Ok(Word::from(string)) } @@ -129,11 +133,12 @@ impl> FromAscObj>> for array: Array>, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { array .to_vec(heap, gas)? .into_iter() - .map(|x| asc_get(heap, x, gas)) + .map(|x| asc_get(heap, x, gas, depth)) .collect() } } @@ -145,10 +150,11 @@ impl, U: From asc_entry: AscTypedMapEntry, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { Ok(( - asc_get(heap, asc_entry.key, gas)?, - asc_get(heap, asc_entry.value, gas)?, + asc_get(heap, asc_entry.key, gas, depth)?, + asc_get(heap, asc_entry.value, gas, depth)?, )) } } @@ -182,8 +188,9 @@ where asc_map: AscTypedMap, heap: &H, gas: &GasCounter, + depth: usize, ) -> Result { - let entries: Vec<(T, U)> = asc_get(heap, asc_map.entries, gas)?; + let entries: Vec<(T, U)> = asc_get(heap, asc_map.entries, gas, depth)?; Ok(HashMap::from_iter(entries.into_iter())) } } From a4f6d6207d1785e7949d7e12bec3ad8797c6ef25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Fri, 28 Apr 2023 10:52:15 -0400 Subject: [PATCH 0174/2104] bump substreams client to sf.substreams.rpc.v2.Stream/Blocks (#4556) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * bump substreams client to sf.substreams.rpc.v2.Stream/Blocks This breaks compatibility with previous substreams servers that only support 'sf.substreams.v1.Stream/Blocks' * adjust substreams mapper code branches for clarity * add comments to clarify intent when ignoring useless substreams messages --------- Co-authored-by: Stéphane Duchesneau Co-authored-by: Eduard Voiculescu --- chain/substreams/src/data_source.rs | 3 + chain/substreams/src/mapper.rs | 142 ++-- graph/build.rs | 7 + graph/proto/substreams-rpc.proto | 181 +++++ graph/proto/substreams.proto | 236 +------ graph/src/blockchain/block_stream.rs | 8 +- .../src/blockchain/substreams_block_stream.rs | 39 +- graph/src/firehose/endpoints.rs | 16 +- graph/src/lib.rs | 2 + graph/src/substreams/sf.substreams.v1.rs | 622 ++---------------- graph/src/substreams_rpc/codec.rs | 5 + graph/src/substreams_rpc/mod.rs | 3 + .../substreams_rpc/sf.substreams.rpc.v2.rs | 542 +++++++++++++++ 13 files changed, 919 insertions(+), 887 deletions(-) create mode 100644 graph/proto/substreams-rpc.proto create mode 100644 graph/src/substreams_rpc/codec.rs create mode 100644 graph/src/substreams_rpc/mod.rs create mode 100644 graph/src/substreams_rpc/sf.substreams.rpc.v2.rs diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index b3006c7b463..09e81f0669d 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -401,6 +401,9 @@ mod test { }), module_meta: vec![], package_meta: vec![], + sink_config: None, + network: "".into(), + sink_module: "".into(), } } diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index e9d5ba06862..91707d3e138 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -1,13 +1,10 @@ use crate::{Block, Chain, EntityChanges, TriggerData}; -use graph::blockchain::block_stream::SubstreamsError::{ - MultipleModuleOutputError, UnexpectedStoreDeltaOutput, -}; use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; use graph::prelude::{async_trait, BlockHash, BlockNumber, BlockPtr, Logger}; -use graph::substreams::module_output::Data; -use graph::substreams::{BlockScopedData, Clock, ForkStep}; +use graph::substreams::Clock; +use graph::substreams_rpc::response::Message as SubstreamsMessage; use prost::Message; pub struct Mapper {} @@ -17,89 +14,78 @@ impl SubstreamsMapper for Mapper { async fn to_block_stream_event( &self, logger: &Logger, - block_scoped_data: &BlockScopedData, + message: Option, ) -> Result>, SubstreamsError> { - let BlockScopedData { - outputs, - clock, - step, - cursor: _, - } = block_scoped_data; - - let step = ForkStep::from_i32(*step).unwrap_or_else(|| { - panic!( - "unknown step i32 value {}, maybe you forgot update & re-regenerate the protobuf definitions?", - step - ) - }); - - if outputs.is_empty() { - return Ok(None); - } - - if outputs.len() > 1 { - return Err(MultipleModuleOutputError); - } + match message { + Some(SubstreamsMessage::BlockUndoSignal(undo)) => { + let valid_block = match undo.last_valid_block { + Some(clock) => clock, + None => return Err(SubstreamsError::InvalidUndoError), + }; + let valid_ptr = BlockPtr { + hash: valid_block.id.trim_start_matches("0x").try_into()?, + number: valid_block.number as i32, + }; + return Ok(Some(BlockStreamEvent::Revert( + valid_ptr, + FirehoseCursor::from(undo.last_valid_cursor.clone()), + ))); + } - //todo: handle step - let module_output = &block_scoped_data.outputs[0]; - let cursor = &block_scoped_data.cursor; + Some(SubstreamsMessage::BlockScopedData(block_scoped_data)) => { + let module_output = match &block_scoped_data.output { + Some(out) => out, + None => return Ok(None), + }; - let clock = match clock { - Some(clock) => clock, - None => return Err(SubstreamsError::MissingClockError), - }; + let clock = match block_scoped_data.clock { + Some(clock) => clock, + None => return Err(SubstreamsError::MissingClockError), + }; - let Clock { - id: hash, - number, - timestamp: _, - } = clock; + let cursor = &block_scoped_data.cursor; - let hash: BlockHash = hash.as_str().try_into()?; - let number: BlockNumber = *number as BlockNumber; + let Clock { + id: hash, + number, + timestamp: _, + } = clock; - match module_output.data.as_ref() { - Some(Data::MapOutput(msg)) => { - let changes: EntityChanges = Message::decode(msg.value.as_slice()) - .map_err(SubstreamsError::DecodingError)?; + let hash: BlockHash = hash.as_str().try_into()?; + let number: BlockNumber = number as BlockNumber; - use ForkStep::*; - match step { - StepIrreversible | StepNew => Ok(Some(BlockStreamEvent::ProcessBlock( - // Even though the trigger processor for substreams doesn't care about TriggerData - // there are a bunch of places in the runner that check if trigger data - // empty and skip processing if so. This will prolly breakdown - // close to head so we will need to improve things. + let changes: EntityChanges = match module_output.map_output.as_ref() { + Some(msg) => Message::decode(msg.value.as_slice()) + .map_err(SubstreamsError::DecodingError)?, + None => EntityChanges { + entity_changes: [].to_vec(), + }, + }; - // TODO(filipe): Fix once either trigger data can be empty - // or we move the changes into trigger data. - BlockWithTriggers::new( - Block { - hash, - number, - changes, - }, - vec![TriggerData {}], - logger, - ), - FirehoseCursor::from(cursor.clone()), - ))), - StepUndo => { - let parent_ptr = BlockPtr { hash, number }; + // Even though the trigger processor for substreams doesn't care about TriggerData + // there are a bunch of places in the runner that check if trigger data + // empty and skip processing if so. This will prolly breakdown + // close to head so we will need to improve things. - Ok(Some(BlockStreamEvent::Revert( - parent_ptr, - FirehoseCursor::from(cursor.clone()), - ))) - } - StepUnknown => { - panic!("unknown step should not happen in the Firehose response") - } - } + // TODO(filipe): Fix once either trigger data can be empty + // or we move the changes into trigger data. + Ok(Some(BlockStreamEvent::ProcessBlock( + BlockWithTriggers::new( + Block { + hash, + number, + changes, + }, + vec![TriggerData {}], + logger, + ), + FirehoseCursor::from(cursor.clone()), + ))) } - Some(Data::DebugStoreDeltas(_)) => Err(UnexpectedStoreDeltaOutput), - _ => Err(SubstreamsError::ModuleOutputNotPresentOrUnexpected), + + // ignoring Progress messages and SessionInit + // We are only interested in Data and Undo signals + _ => Ok(None), } } } diff --git a/graph/build.rs b/graph/build.rs index 14399c784c1..3cc00c0dc07 100644 --- a/graph/build.rs +++ b/graph/build.rs @@ -18,4 +18,11 @@ fn main() { .out_dir("src/substreams") .compile(&["proto/substreams.proto"], &["proto"]) .expect("Failed to compile Substreams proto(s)"); + + tonic_build::configure() + .protoc_arg("--experimental_allow_proto3_optional") + .extern_path(".sf.substreams.v1", "crate::substreams") + .out_dir("src/substreams_rpc") + .compile(&["proto/substreams-rpc.proto"], &["proto"]) + .expect("Failed to compile Substreams RPC proto(s)"); } diff --git a/graph/proto/substreams-rpc.proto b/graph/proto/substreams-rpc.proto new file mode 100644 index 00000000000..cc1040f46df --- /dev/null +++ b/graph/proto/substreams-rpc.proto @@ -0,0 +1,181 @@ +syntax = "proto3"; + +package sf.substreams.rpc.v2; + +import "google/protobuf/any.proto"; +import "substreams.proto"; + +service Stream { + rpc Blocks(Request) returns (stream Response); +} + +message Request { + int64 start_block_num = 1; + string start_cursor = 2; + uint64 stop_block_num = 3; + + // With final_block_only, you only receive blocks that are irreversible: + // 'final_block_height' will be equal to current block and no 'undo_signal' will ever be sent + bool final_blocks_only = 4; + + // Substreams has two mode when executing your module(s) either development mode or production + // mode. Development and production modes impact the execution of Substreams, important aspects + // of execution include: + // * The time required to reach the first byte. + // * The speed that large ranges get executed. + // * The module logs and outputs sent back to the client. + // + // By default, the engine runs in developer mode, with richer and deeper output. Differences + // between production and development modes include: + // * Forward parallel execution is enabled in production mode and disabled in development mode + // * The time required to reach the first byte in development mode is faster than in production mode. + // + // Specific attributes of development mode include: + // * The client will receive all of the executed module's logs. + // * It's possible to request specific store snapshots in the execution tree (via `debug_initial_store_snapshot_for_modules`). + // * Multiple module's output is possible. + // + // With production mode`, however, you trade off functionality for high speed enabling forward + // parallel execution of module ahead of time. + bool production_mode = 5; + + string output_module = 6; + + sf.substreams.v1.Modules modules = 7; + + // Available only in developer mode + repeated string debug_initial_store_snapshot_for_modules = 10; +} + + +message Response { + oneof message { + SessionInit session = 1; // Always sent first + ModulesProgress progress = 2; // Progress of data preparation, before sending in the stream of `data` events. + BlockScopedData block_scoped_data = 3; + BlockUndoSignal block_undo_signal = 4; + + // Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set. + InitialSnapshotData debug_snapshot_data = 10; + // Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set. + InitialSnapshotComplete debug_snapshot_complete = 11; + + } +} + + +// BlockUndoSignal informs you that every bit of data +// with a block number above 'last_valid_block' has been reverted +// on-chain. Delete that data and restart from 'last_valid_cursor' +message BlockUndoSignal { + sf.substreams.v1.BlockRef last_valid_block = 1; + string last_valid_cursor = 2; +} + +message BlockScopedData { + MapModuleOutput output = 1; + sf.substreams.v1.Clock clock = 2; + string cursor = 3; + + // Non-deterministic, allows substreams-sink to let go of their undo data. + uint64 final_block_height = 4; + + repeated MapModuleOutput debug_map_outputs = 10; + repeated StoreModuleOutput debug_store_outputs = 11; +} + +message SessionInit { + string trace_id = 1; +} + +message InitialSnapshotComplete { + string cursor = 1; +} + +message InitialSnapshotData { + string module_name = 1; + repeated StoreDelta deltas = 2; + uint64 sent_keys = 4; + uint64 total_keys = 3; +} + +message MapModuleOutput { + string name = 1; + google.protobuf.Any map_output = 2; + // DebugOutputInfo is available in non-production mode only + OutputDebugInfo debug_info = 10; +} + +// StoreModuleOutput are produced for store modules in development mode. +// It is not possible to retrieve store models in production, with parallelization +// enabled. If you need the deltas directly, write a pass through mapper module +// that will get them down to you. +message StoreModuleOutput { + string name = 1; + repeated StoreDelta debug_store_deltas = 2; + OutputDebugInfo debug_info = 10; +} + +message OutputDebugInfo { + repeated string logs = 1; + // LogsTruncated is a flag that tells you if you received all the logs or if they + // were truncated because you logged too much (fixed limit currently is set to 128 KiB). + bool logs_truncated = 2; + bool cached = 3; +} + +message ModulesProgress { + repeated ModuleProgress modules = 1; +} + +message ModuleProgress { + string name = 1; + + oneof type { + ProcessedRanges processed_ranges = 2; + InitialState initial_state = 3; + ProcessedBytes processed_bytes = 4; + Failed failed = 5; + } + + message ProcessedRanges { + repeated BlockRange processed_ranges = 1; + } + message InitialState { + uint64 available_up_to_block = 2; + } + message ProcessedBytes { + uint64 total_bytes_read = 1; + uint64 total_bytes_written = 2; + uint64 bytes_read_delta = 3; + uint64 bytes_written_delta = 4; + uint64 nano_seconds_delta = 5; + } + message Failed { + string reason = 1; + repeated string logs = 2; + // FailureLogsTruncated is a flag that tells you if you received all the logs or if they + // were truncated because you logged too much (fixed limit currently is set to 128 KiB). + bool logs_truncated = 3; + } +} + +message BlockRange { + uint64 start_block = 2; + uint64 end_block = 3; +} + +message StoreDelta { + enum Operation { + UNSET = 0; + CREATE = 1; + UPDATE = 2; + DELETE = 3; + } + Operation operation = 1; + uint64 ordinal = 2; + string key = 3; + bytes old_value = 4; + bytes new_value = 5; +} + diff --git a/graph/proto/substreams.proto b/graph/proto/substreams.proto index e860e3bfdb1..a03d77c9f46 100644 --- a/graph/proto/substreams.proto +++ b/graph/proto/substreams.proto @@ -1,196 +1,42 @@ -// File generated using this command at the root of `graph-node` project -// and assuming `substreams` repository is a sibling of `graph-node` (note that you -// might need to adjust the `head -nN` and `skip N` values in the commands below to skip -// more/less lines): -// -// ``` -// cat graph/proto/substreams.proto | head -n16 > /tmp/substreams.proto && mv /tmp/substreams.proto graph/proto/substreams.proto -// cat ../substreams/proto/sf/substreams/v1/substreams.proto | grep -Ev 'import *"sf/substreams' >> graph/proto/substreams.proto -// cat ../substreams/proto/sf/substreams/v1/modules.proto | skip 6 >> graph/proto/substreams.proto -// cat ../substreams/proto/sf/substreams/v1/package.proto | skip 9 >> graph/proto/substreams.proto -// cat ../substreams/proto/sf/substreams/v1/clock.proto | skip 7 >> graph/proto/substreams.proto -// # Manually add line `import "google/protobuf/descriptor.proto";` below `import "google/protobuf/timestamp.proto";` -// ``` -// -// FIXME: We copy over and inline most of the substreams files, this is bad and we need a better way to -// generate that, outside of doing this copying over. syntax = "proto3"; package sf.substreams.v1; -option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/v1;pbsubstreams"; -import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/descriptor.proto"; +import "google/protobuf/any.proto"; -service Stream { - rpc Blocks(Request) returns (stream Response); -} - -message Request { - int64 start_block_num = 1; - string start_cursor = 2; - uint64 stop_block_num = 3; - repeated ForkStep fork_steps = 4; - string irreversibility_condition = 5; - - // By default, the engine runs in developer mode, with richer and deeper output, - // * support for multiple `output_modules`, of `store` and `map` kinds - // * support for `initial_store_snapshot_for_modules` - // * log outputs for output modules - // - // With `production_mode`, however, you trade off functionality for high speed, where it: - // * restricts the possible requested `output_modules` to a single mapper module, - // * turns off support for `initial_store_snapshot_for_modules`, - // * still streams output linearly, with a cursor, but at higher speeds - // * and purges log outputs from responses. - bool production_mode = 9; - - Modules modules = 6; - repeated string output_modules = 7; - repeated string initial_store_snapshot_for_modules = 8; -} - -message Response { - oneof message { - SessionInit session = 5; // Always sent first - ModulesProgress progress = 1; // Progress of data preparation, before sending in the stream of `data` events. - InitialSnapshotData snapshot_data = 2; - InitialSnapshotComplete snapshot_complete = 3; - BlockScopedData data = 4; - } -} - -enum ForkStep { - STEP_UNKNOWN = 0; - // Block is new head block of the chain, that is linear with the previous block - STEP_NEW = 1; - // Block is now forked and should be undone, it's not the head block of the chain anymore - STEP_UNDO = 2; - // Removed, was STEP_REDO - reserved 3; - // Block is now irreversible and can be committed to (finality is chain specific, see chain documentation for more details) - STEP_IRREVERSIBLE = 4; - // Removed, was STEP_STALLED - reserved 5; -} - -message SessionInit { - string trace_id = 1; -} - -message InitialSnapshotComplete { - string cursor = 1; -} - -message InitialSnapshotData { - string module_name = 1; - StoreDeltas deltas = 2; - uint64 sent_keys = 4; - uint64 total_keys = 3; -} - -message BlockScopedData { - repeated ModuleOutput outputs = 1; - Clock clock = 3; - ForkStep step = 6; - string cursor = 10; -} - -message ModuleOutput { - string name = 1; - - oneof data { - google.protobuf.Any map_output = 2; - - // StoreDeltas are produced for store modules in development mode. - // It is not possible to retrieve store models in production, with parallelization - // enabled. If you need the deltas directly, write a pass through mapper module - // that will get them down to you. - StoreDeltas debug_store_deltas = 3; - } - repeated string debug_logs = 4; - // LogsTruncated is a flag that tells you if you received all the logs or if they - // were truncated because you logged too much (fixed limit currently is set to 128 KiB). - bool debug_logs_truncated = 5; - - bool cached = 6; -} - -// think about: -// message ModuleOutput { ... -// ModuleOutputDebug debug_info = 6; -// ...} -//message ModuleOutputDebug { -// StoreDeltas store_deltas = 3; -// repeated string logs = 4; -// // LogsTruncated is a flag that tells you if you received all the logs or if they -// // were truncated because you logged too much (fixed limit currently is set to 128 KiB). -// bool logs_truncated = 5; -//} - -message ModulesProgress { - repeated ModuleProgress modules = 1; -} - -message ModuleProgress { - string name = 1; +message Package { + // Needs to be one so this file can be used _directly_ as a + // buf `Image` andor a ProtoSet for grpcurl and other tools + repeated google.protobuf.FileDescriptorProto proto_files = 1; + reserved 2 to 4; // Reserved for future: in case protosets adds fields - oneof type { - ProcessedRange processed_ranges = 2; - InitialState initial_state = 3; - ProcessedBytes processed_bytes = 4; - Failed failed = 5; - } + uint64 version = 5; + sf.substreams.v1.Modules modules = 6; + repeated ModuleMetadata module_meta = 7; + repeated PackageMetadata package_meta = 8; - message ProcessedRange { - repeated BlockRange processed_ranges = 1; - } - message InitialState { - uint64 available_up_to_block = 2; - } - message ProcessedBytes { - uint64 total_bytes_read = 1; - uint64 total_bytes_written = 2; - } - message Failed { - string reason = 1; - repeated string logs = 2; - // FailureLogsTruncated is a flag that tells you if you received all the logs or if they - // were truncated because you logged too much (fixed limit currently is set to 128 KiB). - bool logs_truncated = 3; - } -} + // Source network for Substreams to fetch its data from. + string network = 9; -message BlockRange { - uint64 start_block = 2; - uint64 end_block = 3; + google.protobuf.Any sink_config = 10; + string sink_module = 11; } -message StoreDeltas { - repeated StoreDelta deltas = 1; +message PackageMetadata { + string version = 1; + string url = 2; + string name = 3; + string doc = 4; } -message StoreDelta { - enum Operation { - UNSET = 0; - CREATE = 1; - UPDATE = 2; - DELETE = 3; - } - Operation operation = 1; - uint64 ordinal = 2; - string key = 3; - bytes old_value = 4; - bytes new_value = 5; +message ModuleMetadata { + // Corresponds to the index in `Package.metadata.package_meta` + uint64 package_index = 1; + string doc = 2; } -message Output { - uint64 block_num = 1; - string block_id = 2; - google.protobuf.Timestamp timestamp = 4; - google.protobuf.Any value = 10; -} message Modules { repeated Module modules = 1; repeated Binary binaries = 2; @@ -255,6 +101,7 @@ message Module { Source source = 1; Map map = 2; Store store = 3; + Params params = 4; } message Source { @@ -273,40 +120,25 @@ message Module { DELTAS = 2; } } + message Params { + string value = 1; + } } message Output { string type = 1; } } -message Package { - // Needs to be one so this file can be used _directly_ as a - // buf `Image` andor a ProtoSet for grpcurl and other tools - repeated google.protobuf.FileDescriptorProto proto_files = 1; - reserved 2; // In case protosets add a field some day. - reserved 3; // In case protosets add a field some day. - reserved 4; // In case protosets add a field some day. - - uint64 version = 5; - sf.substreams.v1.Modules modules = 6; - repeated ModuleMetadata module_meta = 7; - repeated PackageMetadata package_meta = 8; -} -message PackageMetadata { - string version = 1; - string url = 2; - string name = 3; - string doc = 4; -} - -message ModuleMetadata { - // Corresponds to the index in `Package.metadata.package_meta` - uint64 package_index = 1; - string doc = 2; -} +// Clock is a pointer to a block with added timestamp message Clock { string id = 1; uint64 number = 2; google.protobuf.Timestamp timestamp = 3; } + +// BlockRef is a pointer to a block to which we don't know the timestamp +message BlockRef { + string id = 1; + uint64 number = 2; +} diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 9522734c8a1..301b85f610a 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -11,7 +11,7 @@ use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; -use crate::substreams::BlockScopedData; +use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; pub struct BufferedBlockStream { @@ -317,7 +317,7 @@ pub trait SubstreamsMapper: Send + Sync { async fn to_block_stream_event( &self, logger: &Logger, - response: &BlockScopedData, + response: Option, // adapter: &Arc>, // filter: &C::TriggerFilter, ) -> Result>, SubstreamsError>; @@ -338,6 +338,10 @@ pub enum FirehoseError { pub enum SubstreamsError { #[error("response is missing the clock information")] MissingClockError, + + #[error("invalid undo message")] + InvalidUndoError, + /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index f1872c7f4aa..2470cb7a2b5 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -3,9 +3,8 @@ use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; use crate::blockchain::Blockchain; use crate::firehose::FirehoseEndpoint; use crate::prelude::*; -use crate::substreams::response::Message; -use crate::substreams::ForkStep::{StepNew, StepUndo}; -use crate::substreams::{Modules, Request, Response}; +use crate::substreams::Modules; +use crate::substreams_rpc::{Request, Response}; use crate::util::backoff::ExponentialBackoff; use async_stream::try_stream; use futures03::{Stream, StreamExt}; @@ -203,10 +202,8 @@ fn stream_blocks>( start_block_num, start_cursor: latest_cursor.clone(), stop_block_num, - fork_steps: vec![StepNew as i32, StepUndo as i32], - irreversibility_condition: "".to_string(), modules: modules.clone(), - output_modules: vec![module_name.clone()], + output_module: module_name.clone(), production_mode: true, ..Default::default() }; @@ -298,25 +295,21 @@ async fn process_substreams_response>( Err(e) => return Err(anyhow!("An error occurred while streaming blocks: {:#}", e)), }; - match response.message { - Some(Message::Data(block_scoped_data)) => { - match mapper - .to_block_stream_event(logger, &block_scoped_data) - .await - .context("Mapping block to BlockStreamEvent failed")? - { - Some(event) => Ok(Some(BlockResponse::Proceed( - event, - block_scoped_data.cursor.to_string(), - ))), - None => Ok(None), + match mapper + .to_block_stream_event(logger, response.message) + .await + .context("Mapping message to BlockStreamEvent failed")? + { + Some(event) => { + let cursor = match &event { + BlockStreamEvent::Revert(_, cursor) => cursor, + BlockStreamEvent::ProcessBlock(_, cursor) => cursor, } + .to_string(); + + return Ok(Some(BlockResponse::Proceed(event, cursor))); } - None => { - warn!(&logger, "Got None on substream message"); - Ok(None) - } - _ => Ok(None), + None => Ok(None), // some progress responses are ignored within to_block_stream_event } } diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index d0b02755e3c..fd80571af6e 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -7,7 +7,7 @@ use crate::{ endpoint::{ConnectionType, EndpointMetrics, Provider, RequestLabels}, firehose::decode_firehose_block, prelude::{anyhow, debug, info}, - substreams, + substreams_rpc, }; use crate::firehose::fetch_client::FetchClient; @@ -239,7 +239,7 @@ impl FirehoseEndpoint { fn new_substreams_client( &self, - ) -> substreams::stream_client::StreamClient< + ) -> substreams_rpc::stream_client::StreamClient< InterceptedService, impl tonic::service::Interceptor>, > { let metrics = MetricsInterceptor { @@ -252,9 +252,11 @@ impl FirehoseEndpoint { }, }; - let mut client = - substreams::stream_client::StreamClient::with_interceptor(metrics, self.auth.clone()) - .accept_compressed(CompressionEncoding::Gzip); + let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( + metrics, + self.auth.clone(), + ) + .accept_compressed(CompressionEncoding::Gzip); if self.compression_enabled { client = client.send_compressed(CompressionEncoding::Gzip); @@ -399,8 +401,8 @@ impl FirehoseEndpoint { pub async fn substreams( self: Arc, - request: substreams::Request, - ) -> Result, anyhow::Error> { + request: substreams_rpc::Request, + ) -> Result, anyhow::Error> { let mut client = self.new_substreams_client(); let response_stream = client.blocks(request).await?; let block_stream = response_stream.into_inner(); diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 05e7e300a37..9f1b7d76ec0 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -28,6 +28,8 @@ pub mod firehose; pub mod substreams; +pub mod substreams_rpc; + pub mod endpoint; pub mod schema; diff --git a/graph/src/substreams/sf.substreams.v1.rs b/graph/src/substreams/sf.substreams.v1.rs index 99ab0c0cf58..f8af149f6e4 100644 --- a/graph/src/substreams/sf.substreams.v1.rs +++ b/graph/src/substreams/sf.substreams.v1.rs @@ -1,274 +1,46 @@ #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct Request { - #[prost(int64, tag = "1")] - pub start_block_num: i64, - #[prost(string, tag = "2")] - pub start_cursor: ::prost::alloc::string::String, - #[prost(uint64, tag = "3")] - pub stop_block_num: u64, - #[prost(enumeration = "ForkStep", repeated, tag = "4")] - pub fork_steps: ::prost::alloc::vec::Vec, - #[prost(string, tag = "5")] - pub irreversibility_condition: ::prost::alloc::string::String, - /// By default, the engine runs in developer mode, with richer and deeper output, - /// * support for multiple `output_modules`, of `store` and `map` kinds - /// * support for `initial_store_snapshot_for_modules` - /// * log outputs for output modules - /// - /// With `production_mode`, however, you trade off functionality for high speed, where it: - /// * restricts the possible requested `output_modules` to a single mapper module, - /// * turns off support for `initial_store_snapshot_for_modules`, - /// * still streams output linearly, with a cursor, but at higher speeds - /// * and purges log outputs from responses. - #[prost(bool, tag = "9")] - pub production_mode: bool, +pub struct Package { + /// Needs to be one so this file can be used _directly_ as a + /// buf `Image` andor a ProtoSet for grpcurl and other tools + #[prost(message, repeated, tag = "1")] + pub proto_files: ::prost::alloc::vec::Vec<::prost_types::FileDescriptorProto>, + #[prost(uint64, tag = "5")] + pub version: u64, #[prost(message, optional, tag = "6")] pub modules: ::core::option::Option, - #[prost(string, repeated, tag = "7")] - pub output_modules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(string, repeated, tag = "8")] - pub initial_store_snapshot_for_modules: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Response { - #[prost(oneof = "response::Message", tags = "5, 1, 2, 3, 4")] - pub message: ::core::option::Option, -} -/// Nested message and enum types in `Response`. -pub mod response { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Message { - /// Always sent first - #[prost(message, tag = "5")] - Session(super::SessionInit), - /// Progress of data preparation, before sending in the stream of `data` events. - #[prost(message, tag = "1")] - Progress(super::ModulesProgress), - #[prost(message, tag = "2")] - SnapshotData(super::InitialSnapshotData), - #[prost(message, tag = "3")] - SnapshotComplete(super::InitialSnapshotComplete), - #[prost(message, tag = "4")] - Data(super::BlockScopedData), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SessionInit { - #[prost(string, tag = "1")] - pub trace_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitialSnapshotComplete { - #[prost(string, tag = "1")] - pub cursor: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitialSnapshotData { - #[prost(string, tag = "1")] - pub module_name: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub deltas: ::core::option::Option, - #[prost(uint64, tag = "4")] - pub sent_keys: u64, - #[prost(uint64, tag = "3")] - pub total_keys: u64, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockScopedData { - #[prost(message, repeated, tag = "1")] - pub outputs: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "3")] - pub clock: ::core::option::Option, - #[prost(enumeration = "ForkStep", tag = "6")] - pub step: i32, - #[prost(string, tag = "10")] - pub cursor: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModuleOutput { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(string, repeated, tag = "4")] - pub debug_logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// LogsTruncated is a flag that tells you if you received all the logs or if they - /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). - #[prost(bool, tag = "5")] - pub debug_logs_truncated: bool, - #[prost(bool, tag = "6")] - pub cached: bool, - #[prost(oneof = "module_output::Data", tags = "2, 3")] - pub data: ::core::option::Option, -} -/// Nested message and enum types in `ModuleOutput`. -pub mod module_output { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Data { - #[prost(message, tag = "2")] - MapOutput(::prost_types::Any), - /// StoreDeltas are produced for store modules in development mode. - /// It is not possible to retrieve store models in production, with parallelization - /// enabled. If you need the deltas directly, write a pass through mapper module - /// that will get them down to you. - #[prost(message, tag = "3")] - DebugStoreDeltas(super::StoreDeltas), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModulesProgress { - #[prost(message, repeated, tag = "1")] - pub modules: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub module_meta: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub package_meta: ::prost::alloc::vec::Vec, + /// Source network for Substreams to fetch its data from. + #[prost(string, tag = "9")] + pub network: ::prost::alloc::string::String, + #[prost(message, optional, tag = "10")] + pub sink_config: ::core::option::Option<::prost_types::Any>, + #[prost(string, tag = "11")] + pub sink_module: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModuleProgress { +pub struct PackageMetadata { #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(oneof = "module_progress::Type", tags = "2, 3, 4, 5")] - pub r#type: ::core::option::Option, -} -/// Nested message and enum types in `ModuleProgress`. -pub mod module_progress { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ProcessedRange { - #[prost(message, repeated, tag = "1")] - pub processed_ranges: ::prost::alloc::vec::Vec, - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct InitialState { - #[prost(uint64, tag = "2")] - pub available_up_to_block: u64, - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ProcessedBytes { - #[prost(uint64, tag = "1")] - pub total_bytes_read: u64, - #[prost(uint64, tag = "2")] - pub total_bytes_written: u64, - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Failed { - #[prost(string, tag = "1")] - pub reason: ::prost::alloc::string::String, - #[prost(string, repeated, tag = "2")] - pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// FailureLogsTruncated is a flag that tells you if you received all the logs or if they - /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). - #[prost(bool, tag = "3")] - pub logs_truncated: bool, - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Type { - #[prost(message, tag = "2")] - ProcessedRanges(ProcessedRange), - #[prost(message, tag = "3")] - InitialState(InitialState), - #[prost(message, tag = "4")] - ProcessedBytes(ProcessedBytes), - #[prost(message, tag = "5")] - Failed(Failed), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockRange { - #[prost(uint64, tag = "2")] - pub start_block: u64, - #[prost(uint64, tag = "3")] - pub end_block: u64, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StoreDeltas { - #[prost(message, repeated, tag = "1")] - pub deltas: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StoreDelta { - #[prost(enumeration = "store_delta::Operation", tag = "1")] - pub operation: i32, - #[prost(uint64, tag = "2")] - pub ordinal: u64, + pub version: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub url: ::prost::alloc::string::String, #[prost(string, tag = "3")] - pub key: ::prost::alloc::string::String, - #[prost(bytes = "vec", tag = "4")] - pub old_value: ::prost::alloc::vec::Vec, - #[prost(bytes = "vec", tag = "5")] - pub new_value: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `StoreDelta`. -pub mod store_delta { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Operation { - Unset = 0, - Create = 1, - Update = 2, - Delete = 3, - } - impl Operation { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Operation::Unset => "UNSET", - Operation::Create => "CREATE", - Operation::Update => "UPDATE", - Operation::Delete => "DELETE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSET" => Some(Self::Unset), - "CREATE" => Some(Self::Create), - "UPDATE" => Some(Self::Update), - "DELETE" => Some(Self::Delete), - _ => None, - } - } - } + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub doc: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct Output { +pub struct ModuleMetadata { + /// Corresponds to the index in `Package.metadata.package_meta` #[prost(uint64, tag = "1")] - pub block_num: u64, + pub package_index: u64, #[prost(string, tag = "2")] - pub block_id: ::prost::alloc::string::String, - #[prost(message, optional, tag = "4")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - #[prost(message, optional, tag = "10")] - pub value: ::core::option::Option<::prost_types::Any>, + pub doc: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -392,7 +164,7 @@ pub mod module { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Input { - #[prost(oneof = "input::Input", tags = "1, 2, 3")] + #[prost(oneof = "input::Input", tags = "1, 2, 3, 4")] pub input: ::core::option::Option, } /// Nested message and enum types in `Input`. @@ -462,6 +234,12 @@ pub mod module { } } #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Params { + #[prost(string, tag = "1")] + pub value: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Input { #[prost(message, tag = "1")] @@ -470,6 +248,8 @@ pub mod module { Map(Map), #[prost(message, tag = "3")] Store(Store), + #[prost(message, tag = "4")] + Params(Params), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -487,43 +267,7 @@ pub mod module { KindStore(KindStore), } } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Package { - /// Needs to be one so this file can be used _directly_ as a - /// buf `Image` andor a ProtoSet for grpcurl and other tools - #[prost(message, repeated, tag = "1")] - pub proto_files: ::prost::alloc::vec::Vec<::prost_types::FileDescriptorProto>, - #[prost(uint64, tag = "5")] - pub version: u64, - #[prost(message, optional, tag = "6")] - pub modules: ::core::option::Option, - #[prost(message, repeated, tag = "7")] - pub module_meta: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "8")] - pub package_meta: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PackageMetadata { - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub url: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub doc: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModuleMetadata { - /// Corresponds to the index in `Package.metadata.package_meta` - #[prost(uint64, tag = "1")] - pub package_index: u64, - #[prost(string, tag = "2")] - pub doc: ::prost::alloc::string::String, -} +/// Clock is a pointer to a block with added timestamp #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Clock { @@ -534,284 +278,12 @@ pub struct Clock { #[prost(message, optional, tag = "3")] pub timestamp: ::core::option::Option<::prost_types::Timestamp>, } -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ForkStep { - StepUnknown = 0, - /// Block is new head block of the chain, that is linear with the previous block - StepNew = 1, - /// Block is now forked and should be undone, it's not the head block of the chain anymore - StepUndo = 2, - /// Block is now irreversible and can be committed to (finality is chain specific, see chain documentation for more details) - StepIrreversible = 4, -} -impl ForkStep { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ForkStep::StepUnknown => "STEP_UNKNOWN", - ForkStep::StepNew => "STEP_NEW", - ForkStep::StepUndo => "STEP_UNDO", - ForkStep::StepIrreversible => "STEP_IRREVERSIBLE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STEP_UNKNOWN" => Some(Self::StepUnknown), - "STEP_NEW" => Some(Self::StepNew), - "STEP_UNDO" => Some(Self::StepUndo), - "STEP_IRREVERSIBLE" => Some(Self::StepIrreversible), - _ => None, - } - } -} -/// Generated client implementations. -pub mod stream_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct StreamClient { - inner: tonic::client::Grpc, - } - impl StreamClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: std::convert::TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl StreamClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> StreamClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - StreamClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - pub async fn blocks( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.substreams.v1.Stream/Blocks", - ); - self.inner.server_streaming(request.into_request(), path, codec).await - } - } -} -/// Generated server implementations. -pub mod stream_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. - #[async_trait] - pub trait Stream: Send + Sync + 'static { - /// Server streaming response type for the Blocks method. - type BlocksStream: futures_core::Stream< - Item = Result, - > - + Send - + 'static; - async fn blocks( - &self, - request: tonic::Request, - ) -> Result, tonic::Status>; - } - #[derive(Debug)] - pub struct StreamServer { - inner: _Inner, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - } - struct _Inner(Arc); - impl StreamServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - } - impl tonic::codegen::Service> for StreamServer - where - T: Stream, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); - match req.uri().path() { - "/sf.substreams.v1.Stream/Blocks" => { - #[allow(non_camel_case_types)] - struct BlocksSvc(pub Arc); - impl tonic::server::ServerStreamingService - for BlocksSvc { - type Response = super::Response; - type ResponseStream = T::BlocksStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).blocks(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = BlocksSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } - } - } - } - impl Clone for StreamServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - } - } - } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(self.0.clone()) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for StreamServer { - const NAME: &'static str = "sf.substreams.v1.Stream"; - } +/// BlockRef is a pointer to a block to which we don't know the timestamp +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockRef { + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub number: u64, } diff --git a/graph/src/substreams_rpc/codec.rs b/graph/src/substreams_rpc/codec.rs new file mode 100644 index 00000000000..d70a9e53762 --- /dev/null +++ b/graph/src/substreams_rpc/codec.rs @@ -0,0 +1,5 @@ +#[rustfmt::skip] +#[path = "sf.substreams.rpc.v2.rs"] +mod pbsubstreamsrpc; + +pub use pbsubstreamsrpc::*; diff --git a/graph/src/substreams_rpc/mod.rs b/graph/src/substreams_rpc/mod.rs new file mode 100644 index 00000000000..38e96fd598d --- /dev/null +++ b/graph/src/substreams_rpc/mod.rs @@ -0,0 +1,3 @@ +mod codec; + +pub use codec::*; diff --git a/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs b/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs new file mode 100644 index 00000000000..5b42a97d30f --- /dev/null +++ b/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs @@ -0,0 +1,542 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Request { + #[prost(int64, tag = "1")] + pub start_block_num: i64, + #[prost(string, tag = "2")] + pub start_cursor: ::prost::alloc::string::String, + #[prost(uint64, tag = "3")] + pub stop_block_num: u64, + /// With final_block_only, you only receive blocks that are irreversible: + /// 'final_block_height' will be equal to current block and no 'undo_signal' will ever be sent + #[prost(bool, tag = "4")] + pub final_blocks_only: bool, + /// Substreams has two mode when executing your module(s) either development mode or production + /// mode. Development and production modes impact the execution of Substreams, important aspects + /// of execution include: + /// * The time required to reach the first byte. + /// * The speed that large ranges get executed. + /// * The module logs and outputs sent back to the client. + /// + /// By default, the engine runs in developer mode, with richer and deeper output. Differences + /// between production and development modes include: + /// * Forward parallel execution is enabled in production mode and disabled in development mode + /// * The time required to reach the first byte in development mode is faster than in production mode. + /// + /// Specific attributes of development mode include: + /// * The client will receive all of the executed module's logs. + /// * It's possible to request specific store snapshots in the execution tree (via `debug_initial_store_snapshot_for_modules`). + /// * Multiple module's output is possible. + /// + /// With production mode`, however, you trade off functionality for high speed enabling forward + /// parallel execution of module ahead of time. + #[prost(bool, tag = "5")] + pub production_mode: bool, + #[prost(string, tag = "6")] + pub output_module: ::prost::alloc::string::String, + #[prost(message, optional, tag = "7")] + pub modules: ::core::option::Option, + /// Available only in developer mode + #[prost(string, repeated, tag = "10")] + pub debug_initial_store_snapshot_for_modules: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Response { + #[prost(oneof = "response::Message", tags = "1, 2, 3, 4, 10, 11")] + pub message: ::core::option::Option, +} +/// Nested message and enum types in `Response`. +pub mod response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Message { + /// Always sent first + #[prost(message, tag = "1")] + Session(super::SessionInit), + /// Progress of data preparation, before sending in the stream of `data` events. + #[prost(message, tag = "2")] + Progress(super::ModulesProgress), + #[prost(message, tag = "3")] + BlockScopedData(super::BlockScopedData), + #[prost(message, tag = "4")] + BlockUndoSignal(super::BlockUndoSignal), + /// Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set. + #[prost(message, tag = "10")] + DebugSnapshotData(super::InitialSnapshotData), + /// Available only in developer mode, and only if `debug_initial_store_snapshot_for_modules` is set. + #[prost(message, tag = "11")] + DebugSnapshotComplete(super::InitialSnapshotComplete), + } +} +/// BlockUndoSignal informs you that every bit of data +/// with a block number above 'last_valid_block' has been reverted +/// on-chain. Delete that data and restart from 'last_valid_cursor' +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockUndoSignal { + #[prost(message, optional, tag = "1")] + pub last_valid_block: ::core::option::Option, + #[prost(string, tag = "2")] + pub last_valid_cursor: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockScopedData { + #[prost(message, optional, tag = "1")] + pub output: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub clock: ::core::option::Option, + #[prost(string, tag = "3")] + pub cursor: ::prost::alloc::string::String, + /// Non-deterministic, allows substreams-sink to let go of their undo data. + #[prost(uint64, tag = "4")] + pub final_block_height: u64, + #[prost(message, repeated, tag = "10")] + pub debug_map_outputs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "11")] + pub debug_store_outputs: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SessionInit { + #[prost(string, tag = "1")] + pub trace_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InitialSnapshotComplete { + #[prost(string, tag = "1")] + pub cursor: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InitialSnapshotData { + #[prost(string, tag = "1")] + pub module_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub deltas: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "4")] + pub sent_keys: u64, + #[prost(uint64, tag = "3")] + pub total_keys: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapModuleOutput { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub map_output: ::core::option::Option<::prost_types::Any>, + /// DebugOutputInfo is available in non-production mode only + #[prost(message, optional, tag = "10")] + pub debug_info: ::core::option::Option, +} +/// StoreModuleOutput are produced for store modules in development mode. +/// It is not possible to retrieve store models in production, with parallelization +/// enabled. If you need the deltas directly, write a pass through mapper module +/// that will get them down to you. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StoreModuleOutput { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub debug_store_deltas: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "10")] + pub debug_info: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OutputDebugInfo { + #[prost(string, repeated, tag = "1")] + pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// LogsTruncated is a flag that tells you if you received all the logs or if they + /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). + #[prost(bool, tag = "2")] + pub logs_truncated: bool, + #[prost(bool, tag = "3")] + pub cached: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModulesProgress { + #[prost(message, repeated, tag = "1")] + pub modules: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModuleProgress { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(oneof = "module_progress::Type", tags = "2, 3, 4, 5")] + pub r#type: ::core::option::Option, +} +/// Nested message and enum types in `ModuleProgress`. +pub mod module_progress { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ProcessedRanges { + #[prost(message, repeated, tag = "1")] + pub processed_ranges: ::prost::alloc::vec::Vec, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct InitialState { + #[prost(uint64, tag = "2")] + pub available_up_to_block: u64, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ProcessedBytes { + #[prost(uint64, tag = "1")] + pub total_bytes_read: u64, + #[prost(uint64, tag = "2")] + pub total_bytes_written: u64, + #[prost(uint64, tag = "3")] + pub bytes_read_delta: u64, + #[prost(uint64, tag = "4")] + pub bytes_written_delta: u64, + #[prost(uint64, tag = "5")] + pub nano_seconds_delta: u64, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Failed { + #[prost(string, tag = "1")] + pub reason: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "2")] + pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// FailureLogsTruncated is a flag that tells you if you received all the logs or if they + /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). + #[prost(bool, tag = "3")] + pub logs_truncated: bool, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Type { + #[prost(message, tag = "2")] + ProcessedRanges(ProcessedRanges), + #[prost(message, tag = "3")] + InitialState(InitialState), + #[prost(message, tag = "4")] + ProcessedBytes(ProcessedBytes), + #[prost(message, tag = "5")] + Failed(Failed), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockRange { + #[prost(uint64, tag = "2")] + pub start_block: u64, + #[prost(uint64, tag = "3")] + pub end_block: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StoreDelta { + #[prost(enumeration = "store_delta::Operation", tag = "1")] + pub operation: i32, + #[prost(uint64, tag = "2")] + pub ordinal: u64, + #[prost(string, tag = "3")] + pub key: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "4")] + pub old_value: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "5")] + pub new_value: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `StoreDelta`. +pub mod store_delta { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Operation { + Unset = 0, + Create = 1, + Update = 2, + Delete = 3, + } + impl Operation { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Operation::Unset => "UNSET", + Operation::Create => "CREATE", + Operation::Update => "UPDATE", + Operation::Delete => "DELETE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSET" => Some(Self::Unset), + "CREATE" => Some(Self::Create), + "UPDATE" => Some(Self::Update), + "DELETE" => Some(Self::Delete), + _ => None, + } + } + } +} +/// Generated client implementations. +pub mod stream_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct StreamClient { + inner: tonic::client::Grpc, + } + impl StreamClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl StreamClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> StreamClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + StreamClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + pub async fn blocks( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sf.substreams.rpc.v2.Stream/Blocks", + ); + self.inner.server_streaming(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod stream_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. + #[async_trait] + pub trait Stream: Send + Sync + 'static { + /// Server streaming response type for the Blocks method. + type BlocksStream: futures_core::Stream< + Item = Result, + > + + Send + + 'static; + async fn blocks( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + #[derive(Debug)] + pub struct StreamServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl StreamServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for StreamServer + where + T: Stream, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/sf.substreams.rpc.v2.Stream/Blocks" => { + #[allow(non_camel_case_types)] + struct BlocksSvc(pub Arc); + impl tonic::server::ServerStreamingService + for BlocksSvc { + type Response = super::Response; + type ResponseStream = T::BlocksStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).blocks(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = BlocksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for StreamServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for StreamServer { + const NAME: &'static str = "sf.substreams.rpc.v2.Stream"; + } +} From 9d013f75f2a565e3d126737593e3a30d1b2f212e Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Fri, 28 Apr 2023 22:01:49 +0100 Subject: [PATCH 0175/2104] graph: Add adapter balancing to substreams (#4578) --- chain/substreams/examples/substreams.rs | 9 +++- chain/substreams/src/block_stream.rs | 4 +- .../src/blockchain/substreams_block_stream.rs | 54 +++++++++---------- 3 files changed, 35 insertions(+), 32 deletions(-) diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index e946fd957ef..619aaf0398f 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -1,8 +1,9 @@ use anyhow::{format_err, Context, Error}; use graph::blockchain::block_stream::BlockStreamEvent; +use graph::blockchain::client::ChainClient; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::endpoint::EndpointMetrics; -use graph::firehose::SubgraphLimit; +use graph::firehose::{FirehoseEndpoints, SubgraphLimit}; use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; use graph::tokio_stream::StreamExt; use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; @@ -57,10 +58,14 @@ async fn main() -> Result<(), Error> { Arc::new(endpoint_metrics), )); + let client = Arc::new(ChainClient::new_firehose(FirehoseEndpoints::from(vec![ + firehose, + ]))); + let mut stream: SubstreamsBlockStream = SubstreamsBlockStream::new( DeploymentHash::new("substreams".to_string()).unwrap(), - firehose.clone(), + client, None, None, Arc::new(Mapper {}), diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index 1dca7429b1a..736db1fcd7e 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -40,8 +40,6 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { filter: Arc, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { - let firehose_endpoint = chain.chain_client().firehose_endpoint()?; - let mapper = Arc::new(Mapper {}); let logger = chain @@ -51,7 +49,7 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { Ok(Box::new(SubstreamsBlockStream::new( deployment.hash, - firehose_endpoint, + chain.chain_client(), subgraph_current_block, block_cursor.as_ref().clone(), mapper, diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 2470cb7a2b5..19f767312b5 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -1,7 +1,7 @@ use super::block_stream::SubstreamsMapper; +use super::client::ChainClient; use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; use crate::blockchain::Blockchain; -use crate::firehose::FirehoseEndpoint; use crate::prelude::*; use crate::substreams::Modules; use crate::substreams_rpc::{Request, Response}; @@ -15,7 +15,6 @@ use tonic::Status; struct SubstreamsBlockStreamMetrics { deployment: DeploymentHash, - provider: String, restarts: CounterVec, connect_duration: GaugeVec, time_between_responses: HistogramVec, @@ -23,15 +22,9 @@ struct SubstreamsBlockStreamMetrics { } impl SubstreamsBlockStreamMetrics { - pub fn new( - registry: Arc, - deployment: DeploymentHash, - provider: String, - ) -> Self { + pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { Self { deployment, - provider, - restarts: registry .global_counter_vec( "deployment_substreams_blockstream_restarts", @@ -66,36 +59,36 @@ impl SubstreamsBlockStreamMetrics { } } - fn observe_successful_connection(&self, time: &mut Instant) { + fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[&self.deployment, &self.provider, "true"]) + .with_label_values(&[&self.deployment, &provider, "true"]) .inc(); self.connect_duration - .with_label_values(&[&self.deployment, &self.provider]) + .with_label_values(&[&self.deployment, &provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp *time = Instant::now(); } - fn observe_failed_connection(&self, time: &mut Instant) { + fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { self.restarts - .with_label_values(&[&self.deployment, &self.provider, "false"]) + .with_label_values(&[&self.deployment, &provider, "false"]) .inc(); self.connect_duration - .with_label_values(&[&self.deployment, &self.provider]) + .with_label_values(&[&self.deployment, &provider]) .set(time.elapsed().as_secs_f64()); // Reset last connection timestamp *time = Instant::now(); } - fn observe_response(&self, kind: &str, time: &mut Instant) { + fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { self.time_between_responses - .with_label_values(&[&self.deployment, &self.provider]) + .with_label_values(&[&self.deployment, &provider]) .observe(time.elapsed().as_secs_f64()); self.responses - .with_label_values(&[&self.deployment, &self.provider, kind]) + .with_label_values(&[&self.deployment, &provider, kind]) .inc(); // Reset last response timestamp @@ -115,7 +108,7 @@ where { pub fn new( deployment: DeploymentHash, - endpoint: Arc, + client: Arc>, subgraph_current_block: Option, cursor: Option, mapper: Arc, @@ -133,13 +126,13 @@ where let manifest_end_block_num = end_blocks.into_iter().min().unwrap_or(0); - let metrics = - SubstreamsBlockStreamMetrics::new(registry, deployment, endpoint.provider.to_string()); + let metrics = SubstreamsBlockStreamMetrics::new(registry, deployment.clone()); SubstreamsBlockStream { stream: Box::pin(stream_blocks( - endpoint, + client, cursor, + deployment, mapper, modules, module_name, @@ -154,8 +147,9 @@ where } fn stream_blocks>( - endpoint: Arc, + client: Arc>, cursor: Option, + deployment: DeploymentHash, mapper: Arc, modules: Option, module_name: String, @@ -185,13 +179,18 @@ fn stream_blocks>( let mut skip_backoff = false; try_stream! { + let endpoint = client.firehose_endpoint()?; + let logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); + loop { info!( &logger, "Blockstreams disconnected, connecting"; "endpoint_uri" => format_args!("{}", endpoint), + "subgraph" => &deployment, "start_block" => start_block_num, "cursor" => &latest_cursor, + "provider_err_count" => endpoint.current_error_count(), ); // We just reconnected, assume that we want to back off on errors @@ -208,6 +207,7 @@ fn stream_blocks>( ..Default::default() }; + let result = endpoint.clone().substreams(request).await; match result { @@ -215,7 +215,7 @@ fn stream_blocks>( info!(&logger, "Blockstreams connected"); // Track the time it takes to set up the block stream - metrics.observe_successful_connection(&mut connect_start); + metrics.observe_successful_connection(&mut connect_start, &endpoint.provider); let mut last_response_time = Instant::now(); let mut expected_stream_end = false; @@ -233,7 +233,7 @@ fn stream_blocks>( // Reset backoff because we got a good value from the stream backoff.reset(); - metrics.observe_response("proceed", &mut last_response_time); + metrics.observe_response("proceed", &mut last_response_time, &endpoint.provider); yield event; @@ -249,7 +249,7 @@ fn stream_blocks>( // An example of this situation is if we get invalid block or transaction data // that cannot be decoded properly. - metrics.observe_response("error", &mut last_response_time); + metrics.observe_response("error", &mut last_response_time, &endpoint.provider); error!(logger, "{:#}", err); expected_stream_end = true; @@ -267,7 +267,7 @@ fn stream_blocks>( // case where we actually _want_ to back off in case we keep // having connection errors. - metrics.observe_failed_connection(&mut connect_start); + metrics.observe_failed_connection(&mut connect_start, &endpoint.provider); error!(logger, "Unable to connect to endpoint: {:#}", e); } From e4ba16d36a2a5af6da34478682e598fd0b5a0d2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 May 2023 18:16:29 +0100 Subject: [PATCH 0176/2104] build(deps): bump tokio from 1.27.0 to 1.28.0 (#4590) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.27.0...tokio-1.28.0) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 82 +++++++++++++++++++++++++++++++++++++++++++----- graph/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 76 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58e0ca2a4a4..e5e63ba1053 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4539,9 +4539,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg", "bytes", @@ -4553,7 +4553,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4568,9 +4568,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", @@ -5586,7 +5586,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] @@ -5595,21 +5604,42 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.32.0" @@ -5622,6 +5652,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.32.0" @@ -5634,6 +5670,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.32.0" @@ -5646,6 +5688,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.32.0" @@ -5658,12 +5706,24 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.32.0" @@ -5676,6 +5736,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winreg" version = "0.10.1" diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 96dfb595b0d..b4d99b48ccc 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -44,7 +44,7 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" -tokio = { version = "1.27.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.28.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.12", features = ["sync"] } tokio-retry = "0.3.0" url = "2.3.1" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f64b82e87a0..0824c8728ec 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,7 +22,7 @@ hyper = "0.14" serde = "1.0" serde_yaml = "0.8" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } -tokio = { version = "1.27.0", features = ["rt", "macros", "process"] } +tokio = { version = "1.28.0", features = ["rt", "macros", "process"] } uuid = { version = "1.3.1", features = ["v4"] } [dev-dependencies] From c852b880a4a12fcdd5a22b8bdbc5277d88a8e7ce Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Thu, 4 May 2023 11:13:53 +0100 Subject: [PATCH 0177/2104] Limit big int (#4594) * runtime, graph: Limit bigint size * fix(runtime): make gas complexity of big int and decimal quadratic --- chain/ethereum/src/runtime/abi.rs | 6 +- graph/examples/stress.rs | 4 +- graph/src/data/query/error.rs | 7 - graph/src/data/store/ethereum.rs | 14 +- graph/src/data/store/mod.rs | 4 +- graph/src/data/store/scalar.rs | 205 +++++++++++------- graph/src/runtime/gas/size_of.rs | 17 -- runtime/test/src/test.rs | 28 ++- runtime/test/src/test/abi.rs | 2 +- .../api_version_0_0_5/big_int_size_limit.ts | 33 +++ .../api_version_0_0_5/big_int_size_limit.wasm | Bin 0 -> 8962 bytes .../api_version_0_0_5/common/types.ts | 8 +- runtime/wasm/src/host_exports.rs | 4 +- runtime/wasm/src/module/mod.rs | 2 +- runtime/wasm/src/to_from/external.rs | 6 +- store/test-store/tests/postgres/relational.rs | 2 +- .../tests/postgres/relational_bytes.rs | 2 +- 17 files changed, 213 insertions(+), 131 deletions(-) create mode 100644 runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.ts create mode 100644 runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.wasm diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index 92a18f499da..d88bf2b22d7 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -489,7 +489,7 @@ impl ToAscObj for EthereumTransactionData { ) -> Result { Ok(AscEthereumTransaction_0_0_1 { hash: asc_new(heap, &self.hash, gas)?, - index: asc_new(heap, &BigInt::from(self.index), gas)?, + index: asc_new(heap, &BigInt::from_unsigned_u128(self.index), gas)?, from: asc_new(heap, &self.from, gas)?, to: self .to @@ -510,7 +510,7 @@ impl ToAscObj for EthereumTransactionData { ) -> Result { Ok(AscEthereumTransaction_0_0_2 { hash: asc_new(heap, &self.hash, gas)?, - index: asc_new(heap, &BigInt::from(self.index), gas)?, + index: asc_new(heap, &BigInt::from_unsigned_u128(self.index), gas)?, from: asc_new(heap, &self.from, gas)?, to: self .to @@ -532,7 +532,7 @@ impl ToAscObj for EthereumTransactionData { ) -> Result { Ok(AscEthereumTransaction_0_0_6 { hash: asc_new(heap, &self.hash, gas)?, - index: asc_new(heap, &BigInt::from(self.index), gas)?, + index: asc_new(heap, &BigInt::from_unsigned_u128(self.index), gas)?, from: asc_new(heap, &self.from, gas)?, to: self .to diff --git a/graph/examples/stress.rs b/graph/examples/stress.rs index 2e3d77d57a6..5fbfb2f82ad 100644 --- a/graph/examples/stress.rs +++ b/graph/examples/stress.rs @@ -249,7 +249,7 @@ impl Template for BigInt { } None => 1, }; - BigInt::from(3u64).pow(size as u8) * BigInt::from(f) + BigInt::from(3u64).pow(size as u8).unwrap() * BigInt::from(f) } fn sample(&self, size: usize, rng: Option<&mut SmallRng>) -> Box { @@ -274,7 +274,7 @@ impl Template for BigDecimal { Some(rng) => rng.gen_range(-100..=100), None => 1, }; - let bi = BigInt::from(3u64).pow(size as u8) * BigInt::from(f); + let bi = BigInt::from(3u64).pow(size as u8).unwrap() * BigInt::from(f); BigDecimal::new(bi, exp) } diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index 3e64d37e5c4..c50220f6012 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -1,6 +1,5 @@ use graphql_parser::Pos; use hex::FromHexError; -use num_bigint; use serde::ser::*; use std::collections::HashMap; use std::error::Error; @@ -292,12 +291,6 @@ impl From for QueryExecutionError { } } -impl From for QueryExecutionError { - fn from(e: num_bigint::ParseBigIntError) -> Self { - QueryExecutionError::ValueParseError("BigInt".to_string(), format!("{}", e)) - } -} - impl From for QueryExecutionError { fn from(e: bigdecimal::ParseBigDecimalError) -> Self { QueryExecutionError::ValueParseError("BigDecimal".to_string(), format!("{}", e)) diff --git a/graph/src/data/store/ethereum.rs b/graph/src/data/store/ethereum.rs index ada156e36fb..7bbf2f17542 100644 --- a/graph/src/data/store/ethereum.rs +++ b/graph/src/data/store/ethereum.rs @@ -1,12 +1,6 @@ use super::scalar; use crate::prelude::*; -use web3::types::{Address, Bytes, H2048, H256, H64, U128, U256, U64}; - -impl From for Value { - fn from(n: U128) -> Value { - Value::BigInt(scalar::BigInt::from_signed_u256(&n.into())) - } -} +use web3::types::{Address, Bytes, H2048, H256, H64, U64}; impl From
for Value { fn from(address: Address) -> Value { @@ -43,9 +37,3 @@ impl From for Value { Value::BigInt(BigInt::from(n)) } } - -impl From for Value { - fn from(n: U256) -> Value { - Value::BigInt(BigInt::from_unsigned_u256(&n)) - } -} diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 62aa3298eda..dcaca923529 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -313,7 +313,9 @@ impl Value { // just a string. match n.as_str() { BYTES_SCALAR => Value::Bytes(scalar::Bytes::from_str(s)?), - BIG_INT_SCALAR => Value::BigInt(scalar::BigInt::from_str(s)?), + BIG_INT_SCALAR => Value::BigInt(scalar::BigInt::from_str(s).map_err(|e| { + QueryExecutionError::ValueParseError("BigInt".to_string(), format!("{}", e)) + })?), BIG_DECIMAL_SCALAR => Value::BigDecimal(scalar::BigDecimal::from_str(s)?), _ => Value::String(s.clone()), } diff --git a/graph/src/data/store/scalar.rs b/graph/src/data/store/scalar.rs index ad5b2df360e..43d9b168004 100644 --- a/graph/src/data/store/scalar.rs +++ b/graph/src/data/store/scalar.rs @@ -19,6 +19,7 @@ use std::str::FromStr; pub use num_bigint::Sign as BigIntSign; use crate::blockchain::BlockHash; +use crate::runtime::gas::{Gas, GasSizeOf, SaturatingInto}; use crate::util::stable_hash_glue::{impl_stable_hash, AsBytes}; /// All operations on `BigDecimal` return a normalized value. @@ -47,7 +48,7 @@ impl BigDecimal { pub fn new(digits: BigInt, exp: i64) -> Self { // bigdecimal uses `scale` as the opposite of the power of ten, so negate `exp`. - Self::from(bigdecimal::BigDecimal::new(digits.0, -exp)) + Self::from(bigdecimal::BigDecimal::new(digits.inner(), -exp)) } pub fn parse_bytes(bytes: &[u8]) -> Option { @@ -207,7 +208,11 @@ impl stable_hash_legacy::StableHash for BigDecimal { // This only allows for backward compatible changes between // BigDecimal and unsigned ints stable_hash_legacy::StableHash::stable_hash(&exp, sequence_number.next_child(), state); - stable_hash_legacy::StableHash::stable_hash(&BigInt(int), sequence_number, state); + stable_hash_legacy::StableHash::stable_hash( + &BigInt::unchecked_new(int), + sequence_number, + state, + ); } } @@ -218,17 +223,88 @@ impl StableHash for BigDecimal { let (int, exp) = self.as_bigint_and_exponent(); StableHash::stable_hash(&exp, field_address.child(1), state); // Normally it would be a red flag to pass field_address in after having used a child slot. - // But, we know the implementation of StableHash for BigInt will not use child(1) and that + // But, we know the implemecntation of StableHash for BigInt will not use child(1) and that // it will not in the future due to having no forward schema evolutions for ints and the // stability guarantee. // // For reference, ints use child(0) for the sign and write the little endian bytes to the parent slot. - BigInt(int).stable_hash(field_address, state); + BigInt::unchecked_new(int).stable_hash(field_address, state); + } +} + +impl GasSizeOf for BigDecimal { + fn gas_size_of(&self) -> Gas { + let (int, _) = self.as_bigint_and_exponent(); + BigInt::unchecked_new(int).gas_size_of() } } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct BigInt(num_bigint::BigInt); +// Use a private module to ensure a constructor is used. +pub use big_int::BigInt; +mod big_int { + use std::{ + f32::consts::LOG2_10, + fmt::{self, Display, Formatter}, + }; + + #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] + pub struct BigInt(num_bigint::BigInt); + + impl Display for BigInt { + fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { + self.0.fmt(f) + } + } + + impl BigInt { + // Postgres `numeric` has a limit documented here [https://www.postgresql.org/docs/current/datatype-numeric.htm]: + // "Up to 131072 digits before the decimal point; up to 16383 digits after the decimal point" + // So based on this we adopt a limit of 131072 decimal digits for big int, converted here to bits. + pub const MAX_BITS: u32 = (131072.0 * LOG2_10) as u32 + 1; // 435_412 + + pub fn new(inner: num_bigint::BigInt) -> Result { + // `inner.bits()` won't include the sign bit, so we add 1 to account for it. + let bits = inner.bits() + 1; + if bits > Self::MAX_BITS as usize { + anyhow::bail!( + "BigInt is too big, total bits {} (max {})", + bits, + Self::MAX_BITS + ); + } + Ok(Self(inner)) + } + + /// Creates a BigInt without checking the digit limit. + pub(super) fn unchecked_new(inner: num_bigint::BigInt) -> Self { + Self(inner) + } + + pub fn sign(&self) -> num_bigint::Sign { + self.0.sign() + } + + pub fn to_bytes_le(&self) -> (super::BigIntSign, Vec) { + self.0.to_bytes_le() + } + + pub fn to_bytes_be(&self) -> (super::BigIntSign, Vec) { + self.0.to_bytes_be() + } + + pub fn to_signed_bytes_le(&self) -> Vec { + self.0.to_signed_bytes_le() + } + + pub fn bits(&self) -> usize { + self.0.bits() + } + + pub(super) fn inner(self) -> num_bigint::BigInt { + self.0 + } + } +} impl stable_hash_legacy::StableHash for BigInt { #[inline] @@ -238,7 +314,7 @@ impl stable_hash_legacy::StableHash for BigInt { state: &mut H, ) { stable_hash_legacy::utils::AsInt { - is_negative: self.0.sign() == BigIntSign::Minus, + is_negative: self.sign() == BigIntSign::Minus, little_endian: &self.to_bytes_le().1, } .stable_hash(sequence_number, state) @@ -248,7 +324,7 @@ impl stable_hash_legacy::StableHash for BigInt { impl StableHash for BigInt { fn stable_hash(&self, field_address: H::Addr, state: &mut H) { AsInt { - is_negative: self.0.sign() == BigIntSign::Minus, + is_negative: self.sign() == BigIntSign::Minus, little_endian: &self.to_bytes_le().1, } .stable_hash(field_address, state) @@ -301,31 +377,19 @@ impl fmt::Debug for BigInt { } impl BigInt { - pub fn from_unsigned_bytes_le(bytes: &[u8]) -> Self { - BigInt(num_bigint::BigInt::from_bytes_le( + pub fn from_unsigned_bytes_le(bytes: &[u8]) -> Result { + BigInt::new(num_bigint::BigInt::from_bytes_le( num_bigint::Sign::Plus, bytes, )) } - pub fn from_signed_bytes_le(bytes: &[u8]) -> Self { - BigInt(num_bigint::BigInt::from_signed_bytes_le(bytes)) - } - - pub fn from_signed_bytes_be(bytes: &[u8]) -> Self { - BigInt(num_bigint::BigInt::from_signed_bytes_be(bytes)) + pub fn from_signed_bytes_le(bytes: &[u8]) -> Result { + BigInt::new(num_bigint::BigInt::from_signed_bytes_le(bytes)) } - pub fn to_bytes_le(&self) -> (BigIntSign, Vec) { - self.0.to_bytes_le() - } - - pub fn to_bytes_be(&self) -> (BigIntSign, Vec) { - self.0.to_bytes_be() - } - - pub fn to_signed_bytes_le(&self) -> Vec { - self.0.to_signed_bytes_le() + pub fn from_signed_bytes_be(bytes: &[u8]) -> Result { + BigInt::new(num_bigint::BigInt::from_signed_bytes_be(bytes)) } /// Deprecated. Use try_into instead @@ -333,16 +397,24 @@ impl BigInt { self.try_into().unwrap() } + pub fn from_unsigned_u128(n: U128) -> Self { + let mut bytes: [u8; 16] = [0; 16]; + n.to_little_endian(&mut bytes); + // Unwrap: 128 bits is much less than BigInt::MAX_BITS + BigInt::from_unsigned_bytes_le(&bytes).unwrap() + } + pub fn from_unsigned_u256(n: &U256) -> Self { let mut bytes: [u8; 32] = [0; 32]; n.to_little_endian(&mut bytes); - BigInt::from_unsigned_bytes_le(&bytes) + // Unwrap: 256 bits is much less than BigInt::MAX_BITS + BigInt::from_unsigned_bytes_le(&bytes).unwrap() } pub fn from_signed_u256(n: &U256) -> Self { let mut bytes: [u8; 32] = [0; 32]; n.to_little_endian(&mut bytes); - BigInt::from_signed_bytes_le(&bytes) + BigInt::from_signed_bytes_le(&bytes).unwrap() } pub fn to_signed_u256(&self) -> U256 { @@ -370,44 +442,28 @@ impl BigInt { U256::from_little_endian(&bytes) } - pub fn pow(self, exponent: u8) -> Self { + pub fn pow(self, exponent: u8) -> Result { use num_traits::pow::Pow; - BigInt(self.0.pow(&exponent)) - } - - pub fn bits(&self) -> usize { - self.0.bits() - } -} - -impl Display for BigInt { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - self.0.fmt(f) - } -} - -impl From for BigInt { - fn from(big_int: num_bigint::BigInt) -> BigInt { - BigInt(big_int) + BigInt::new(self.inner().pow(&exponent)) } } impl From for BigInt { fn from(i: i32) -> BigInt { - BigInt(i.into()) + BigInt::unchecked_new(i.into()) } } impl From for BigInt { fn from(i: u64) -> BigInt { - BigInt(i.into()) + BigInt::unchecked_new(i.into()) } } impl From for BigInt { fn from(i: i64) -> BigInt { - BigInt(i.into()) + BigInt::unchecked_new(i.into()) } } @@ -422,24 +478,13 @@ impl From for BigInt { } } -impl From for BigInt { - /// This implementation assumes that U128 represents an unsigned U128, - /// and not a signed U128 (aka int128 in Solidity). Right now, this is - /// all we need (for block numbers). If it ever becomes necessary to - /// handle signed U128s, we should add the same - /// `{to,from}_{signed,unsigned}_u128` methods that we have for U256. - fn from(n: U128) -> BigInt { - let mut bytes: [u8; 16] = [0; 16]; - n.to_little_endian(&mut bytes); - BigInt::from_unsigned_bytes_le(&bytes) - } -} - impl FromStr for BigInt { - type Err = ::Err; + type Err = anyhow::Error; fn from_str(s: &str) -> Result { - num_bigint::BigInt::from_str(s).map(BigInt) + num_bigint::BigInt::from_str(s) + .map_err(anyhow::Error::from) + .and_then(BigInt::new) } } @@ -462,7 +507,7 @@ impl Add for BigInt { type Output = BigInt; fn add(self, other: BigInt) -> BigInt { - BigInt(self.0.add(other.0)) + BigInt::unchecked_new(self.inner().add(other.inner())) } } @@ -470,7 +515,7 @@ impl Sub for BigInt { type Output = BigInt; fn sub(self, other: BigInt) -> BigInt { - BigInt(self.0.sub(other.0)) + BigInt::unchecked_new(self.inner().sub(other.inner())) } } @@ -478,7 +523,7 @@ impl Mul for BigInt { type Output = BigInt; fn mul(self, other: BigInt) -> BigInt { - BigInt(self.0.mul(other.0)) + BigInt::unchecked_new(self.inner().mul(other.inner())) } } @@ -490,7 +535,7 @@ impl Div for BigInt { panic!("Cannot divide by zero-valued `BigInt`!") } - BigInt(self.0.div(other.0)) + BigInt::unchecked_new(self.inner().div(other.inner())) } } @@ -498,7 +543,7 @@ impl Rem for BigInt { type Output = BigInt; fn rem(self, other: BigInt) -> BigInt { - BigInt(self.0.rem(other.0)) + BigInt::unchecked_new(self.inner().rem(other.inner())) } } @@ -506,7 +551,7 @@ impl BitOr for BigInt { type Output = Self; fn bitor(self, other: Self) -> Self { - Self::from(self.0.bitor(other.0)) + BigInt::unchecked_new(self.inner().bitor(other.inner())) } } @@ -514,7 +559,7 @@ impl BitAnd for BigInt { type Output = Self; fn bitand(self, other: Self) -> Self { - Self::from(self.0.bitand(other.0)) + BigInt::unchecked_new(self.inner().bitand(other.inner())) } } @@ -522,7 +567,7 @@ impl Shl for BigInt { type Output = Self; fn shl(self, bits: u8) -> Self { - Self::from(self.0.shl(bits.into())) + BigInt::unchecked_new(self.inner().shl(bits.into())) } } @@ -530,7 +575,16 @@ impl Shr for BigInt { type Output = Self; fn shr(self, bits: u8) -> Self { - Self::from(self.0.shr(bits.into())) + BigInt::unchecked_new(self.inner().shr(bits.into())) + } +} + +impl GasSizeOf for BigInt { + fn gas_size_of(&self) -> Gas { + // Add one to always have an upper bound on the number of bytes required to represent the + // number, and so that `0` has a size of 1. + let n_bytes = self.bits() / 8 + 1; + n_bytes.saturating_into() } } @@ -658,7 +712,10 @@ mod test { same_stable_hash(1, BigInt::from(1u64)); same_stable_hash(1u64 << 20, BigInt::from(1u64 << 20)); - same_stable_hash(-1, BigInt::from_signed_bytes_le(&(-1i32).to_le_bytes())); + same_stable_hash( + -1, + BigInt::from_signed_bytes_le(&(-1i32).to_le_bytes()).unwrap(), + ); } #[test] diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index 8f4e535a1fd..59a5b2cba47 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -3,7 +3,6 @@ use crate::{ components::store::{EntityKey, EntityType, LoadRelatedRequest}, data::store::{scalar::Bytes, Value}, - prelude::{BigDecimal, BigInt}, }; use super::{Gas, GasSizeOf, SaturatingInto as _}; @@ -50,22 +49,6 @@ where } } -impl GasSizeOf for BigInt { - fn gas_size_of(&self) -> Gas { - // Add one to always have an upper bound on the number of bytes required to represent the - // number, and so that `0` has a size of 1. - let n_bytes = self.bits() / 8 + 1; - n_bytes.saturating_into() - } -} - -impl GasSizeOf for BigDecimal { - fn gas_size_of(&self) -> Gas { - let (int, _) = self.as_bigint_and_exponent(); - BigInt::from(int).gas_size_of() - } -} - impl GasSizeOf for str { fn gas_size_of(&self) -> Gas { self.len().saturating_into() diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index ed6f02c7a60..2fb234fdd39 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -71,6 +71,7 @@ async fn test_valid_module_and_store_with_timeout( "type User @entity { id: ID!, name: String, + count: BigInt, } type Thing @entity { @@ -299,7 +300,7 @@ async fn test_json_conversions(api_version: Version, gas_used: u64) { assert_eq!( scalar::BigInt::from_str(number).unwrap(), - scalar::BigInt::from_signed_bytes_le(&bytes) + scalar::BigInt::from_signed_bytes_le(&bytes).unwrap() ); assert_eq!(module.gas_used(), gas_used); @@ -677,6 +678,31 @@ async fn test_big_int_to_hex(api_version: Version, gas_used: u64) { assert_eq!(module.gas_used(), gas_used); } +#[tokio::test] +async fn test_big_int_size_limit() { + let module = test_module( + "BigIntSizeLimit", + mock_data_source( + &wasm_file_path("big_int_size_limit.wasm", API_VERSION_0_0_5), + API_VERSION_0_0_5, + ), + API_VERSION_0_0_5, + ) + .await; + + let len = BigInt::MAX_BITS / 8; + module + .invoke_export1_val_void("bigIntWithLength", len) + .unwrap(); + + let len = BigInt::MAX_BITS / 8 + 1; + assert!(module + .invoke_export1_val_void("bigIntWithLength", len) + .unwrap_err() + .to_string() + .contains("BigInt is too big, total bits 435416 (max 435412)")); +} + #[tokio::test] async fn big_int_to_hex_v0_0_4() { test_big_int_to_hex(API_VERSION_0_0_4, 53113760).await; diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 274d44395fc..5d8e2a1b864 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -350,7 +350,7 @@ async fn test_abi_store_value(api_version: Version) { let new_value: Value = module.asc_get(new_value_ptr).unwrap(); assert_eq!( new_value, - Value::BigInt(::graph::data::store::scalar::BigInt::from_unsigned_bytes_le(bytes)) + Value::BigInt(::graph::data::store::scalar::BigInt::from_unsigned_bytes_le(bytes).unwrap()) ); } diff --git a/runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.ts b/runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.ts new file mode 100644 index 00000000000..33700277740 --- /dev/null +++ b/runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.ts @@ -0,0 +1,33 @@ +export * from './common/global' +import { Entity, BigDecimal, Value, BigInt } from './common/types' + +/** Definitions copied from graph-ts/index.ts */ +declare namespace store { + function get(entity: string, id: string): Entity | null + function set(entity: string, id: string, data: Entity): void + function remove(entity: string, id: string): void +} + +/** Host interface for BigInt arithmetic */ +declare namespace bigInt { + function plus(x: BigInt, y: BigInt): BigInt + function minus(x: BigInt, y: BigInt): BigInt + function times(x: BigInt, y: BigInt): BigInt + function dividedBy(x: BigInt, y: BigInt): BigInt + function dividedByDecimal(x: BigInt, y: BigDecimal): BigDecimal + function mod(x: BigInt, y: BigInt): BigInt +} + +/** + * Test functions + */ +export function bigIntWithLength(bytes: u32): void { + let user = new Entity(); + user.set("id", Value.fromString("jhon")); + + let array = new Uint8Array(bytes); + array.fill(127); + let big_int = changetype(array); + user.set("count", Value.fromBigInt(big_int)); + store.set("User", "jhon", user); +} diff --git a/runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.wasm b/runtime/test/wasm_test/api_version_0_0_5/big_int_size_limit.wasm new file mode 100644 index 0000000000000000000000000000000000000000..400e92bc0a5f7ce2ae143ee56e0b1f28cc77feb3 GIT binary patch literal 8962 zcmeHNOK%*<5$?y#?(EKvI20*~Z}#j`5-E|?(-bYsHk&d>uh{ZS@hz*Bw34_am*R5B zM1f%{Mv#+pFp?jj0J#LP4}nhsa_}iY03*mLw*Urm)gj0srvS=VJu~~@!?Xd%fPpO0 zRo&Iq)m7EiJzXua%K8dtjPZH*w%FQYxA_*|q8gywA_5~CyKSKXL^Q+j0lRGziCU(Q zZ;44EtMyI0a%Z&>vCP3cix0|+^{BkQ_|s~6d2wYi%B@GMjq3DzHDXfEUa%RrEKJUX zlm;=*nZr33JSPNWSt*6c2Z*OtU+!!#bG0h1G0+N75 zz$TC(kR|YS`JDZkzmcdM$;%UP2^0u;1P&1BBH$C~CUB5I4}o3+hX@=daD>1hfgu9J z1V#vq5>Nz+1WE+P2plDFjKFaM;{+xMoFH(Lz$Aew0@DOe5jaiY41u!*&JoyVuW1_a z96#gSe;SYf8or~Y$^J5b(MK#?t4HrGMh|~btv`q!va{a1j~`X9-J5tT zmI_qu#8hI?PE17>dlGfR<<&}*vX(liDeqW1{ruvC*Q$3HS1QZ$XlkL+aDH`lSspvw z?uj;4kN3BCEJP2hjq1kA4=c+XRXN@<=pEW-amU1=_CeRzSL-qTL~4zhI9Y^~9a0uH ztM%y3N~5wOC&$~RQ2pNPl}8KpsPQ-++7di9u@7sTa?^bY?>j4%Ms#@<@+xgN}&ZM>_e8#P3k8}=hX?(6X>FIRo`BZ~8g`7krzsPv$cfHj&9?MO>2|$;>fNXEfE(dcV!?Vt=V>8y_Bx(+tu}r{aAron+DSQgR?c-&X3k69A?DhwobO<{1~HdA zDG>8oCk0{_Iw&D{!cGc!u5<)@+n&6dU%XddyQtM_mCJWmmzS$|BX+?pmm5*ESiVzPugaU5mFmiBQ#LHn+E4WdT=Q`;qRd=`Ovsz7`0@l(=Ed5n9sEpUWBn>eBvZAQdlKN)~Np zDf{|hU@2=3$MRM1%cR0w*HvtcyEz6Kd`2+$U2bhDrvHOOT0*p#(@KBIbfAZQ#$4u> zc)&|sTeUz4Fk%*oMpGwkExDLUD68fR#g*ktCEO8KdUG4s7%O37&3tRXwU}==Rj`^h zr&%jBYrgvir+M|WZ3_MkpSD;-?JHLEEqF&j8>1levrv=yj)GJC93b=a==N*w@1@9Y z@ec%?7}$!F?g3aezdit(T(IT}!d14CwIBmzU1UsUG+&BqQj6D94s`LDLx4jh92qT} zyqR`h2_~!A<-}c@DSJuFwIB;g*%~yGDjVDI{u=b!($9HaS%7QHfzwjU!P3R@Oik)U z9ymb2`V(@%QCOlH@j6LO>b|C0=qkj#evEo!)Cc~d=E%Ngt+!e8K8u`@?uWHv0B{>aw>;lv{n9sgx!S5Bp{j{sE^pldRr^_qQDWZ_({gy?lfQ( z;ub6GH6vvA`1zP4uCbsD3#xsc_1%CW^fB^)fn5e7azN>JA<}FBEjHsl@RaMna{VW! zFU2v2ttGTmL~JX0V^E8Tb{B@afvs8T#+3o6%nc)JB}`m_E(;D5QEgnXKxmGQquIiM z63`OWCz1yI8@I`LfQtjcgLK`yG%kG2H{H*0U&o$?5FSy?!>Oj0!9EmM2jE%jCQc<= zXV(yj*|o+3yz7#1M3Oy$;K=~IooqzykPHMk?N|(5GHYYq^ysvsAviy*VitQC9a$Hl znx!Z_#Tj-~hEPs9Dt84$prtPCel22=Kp7K-n0|pA1D!)G1qTx%C$zRS$033mw9|~J z{tz8RSn?qVj7f+LuqfdVFSyrnnrV2=n1b^hF7yYk(xFA>nKl>)L57GCs6-q@>B7kk zU*i}~6-u{+MOtA|yDbZE|K5i zTpP;4_#zI%S^)36Xz{qk(VAk6j!CGul&A;!a*Z4Wqez_gZ^mM1Mp#5g5;CwFCb1}t z;Zqzd4!E&KkMuD@9`r2@g2=(b~yx`tg}F{UYf{biBk# zRYU@f2QvK}{l+79>=8Tmh-u-Yka)yJKnNUsjB5X^uwJEUhwb;a_a-s$;5!bfq!S|FF&!SFzd(zHaIt+f^ zG16;}#IkN5xyQ=FCG6XE3QQI)lMl8`7OCz~+$WR$)T&HVabq$%!I5_6(_yj6og)u4 zM;enmNA6{^_+}=gjbEGk- zbL3tI;ZAGLF8^!onQtSm|1X11rPew0{qYB#mvokYe>DDXG)=!*-_30M&qDgpH*$Z@ z{W(b1*ZiKkzmvE*;w#yfyCHS3aL>T4h_X1kJtC!2a!J$2i$_aHc8EnuzXTqtUZaay za?!*MldhTRMTP3PMP@L@q=Ffjgdin>88+P^>0X$nkG;83*36AEzW$hNnK_1W-rh`v zU^d)TDcu1v&TPzW6<<`0HABo5Rmph3{?jD-n4!3x?O_Je*PWIixr8l6iO(!pN`?zNZp^p@;uZ`) z)k-`JEI5Ljd(!r|hK%A!!LU9SN|Lt4Ih%R6IIn{=O;H(ol0a_mOQZ*Q7NKw`nne4? zAl?gb3#0k`F{~`EGtk1tazQFCxV14JkEHh!_hpk4?RbLtNAn2+X8KEbhu}5;Xz~t$ zi*@qi@P|0t85a(s1urDZB$=u1aZAkUrzYj>(Wg`W$s&|IhH>@;O>&cq;FO2iT);5L_mECr-Bx(%;p3|oPW~5$!1>P z?&K$y?$IwMPKE+ek&-7jfNmy{2ZpuxPa)le?N_lYP^lI3niXA@SMK#eL?o)YA{r`xb&xVI zE9JDdaax_6hzFz;c!p3GJ@{dHYqZoj3jKnM6RRWf&$TE1m${@TfkOd@El-4gD*S$b z0#0E5v|0A&@M!4`|1=VOEFssQ(^+~3EMHn+hG)3@cgw7YNV1&&1%^YRV5y(dCSDq# zw27Ankv0Jhg&q_u4Tl~!SZO5mux(1Cp@*}gq(To1Q3^uO0V;-G4yY7*d7!ZnvCd0J zL$3gIEc85}II@2)zlQF!W9UT?xIDKvzR=vM}Y+ zs|7ts=sXHiQ(ZvuP#ly4C|xK%N;k?ulpd5`ltU;-Q2I~?Q1BL^hfzjQMo|<>fN~hc zLg`15C`A-I!6o2hDB~y-C?`-(qD-Ppp-iKkLOG3c2IVZuITYy2+zbVyg=b^zGKt4P zDhF8UEo%-OeC#bh+xi>K*$+~A| zuY&RbG9E(FKkhPW0Ha!SldcVnf83f^Kf}F-WT!uxi~a$fW@!HOM`9PAEa zLmr`}K-(}bRd6Y_>uA%mRnVp}3g*ysEe8-~9TZZZI3BmnXR7*{+-b})bnWKOJDBf2 z{;DvjV@dTV)=tOeA?2=d1lfU~{$dv{a_=qZNHWb&VMgurH=k6}`P8=de)LpM%9rAl zp2|zxd*SWlw)3TWewcI!*hfG5k0SJ&C+FbzVS)wxLO($(NEr4TOxp~(this.data as u32) } toBigDecimal(): BigDecimal { - assert(this.kind == ValueKind.BIGDECIMAL, 'Value is not a BigDecimal.') + assert(this.kind == ValueKind.BIG_DECIMAL, 'Value is not a BigDecimal.') return changetype(this.data as u32) } @@ -197,8 +197,8 @@ export class Value { static fromBigInt(n: BigInt): Value { let value = new Value() - value.kind = ValueKind.BIGINT - value.data = n as u64 + value.kind = ValueKind.BIG_INT + value.data = changetype(n) as u64 return value } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index e6f0d411193..1f58bf96644 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -512,7 +512,7 @@ impl HostExports { gas::BIG_MATH_GAS_OP .with_args(complexity::Exponential, (&x, (exp as f32).log2() as u8)), )?; - Ok(x.pow(exp)) + Ok(x.pow(exp)?) } pub(crate) fn big_int_from_string( @@ -638,7 +638,7 @@ impl HostExports { x: BigDecimal, gas: &GasCounter, ) -> Result { - gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(complexity::Size, &x))?; + gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(complexity::Mul, (&x, &x)))?; Ok(x.to_string()) } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 2cc6c1959bc..b9712e3035a 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -1177,7 +1177,7 @@ impl WasmInstanceContext { big_int_ptr: AscPtr, ) -> Result, HostExportError> { let n: BigInt = asc_get(self, big_int_ptr, gas)?; - gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &n))?; + gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Mul, (&n, &n)))?; asc_new(self, &n.to_string(), gas) } diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index 4f4ec4d01be..4e36823bc77 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -87,7 +87,7 @@ impl FromAscObj for BigInt { depth: usize, ) -> Result { let bytes = >::from_asc_obj(array_buffer, heap, gas, depth)?; - Ok(BigInt::from_signed_bytes_le(&bytes)) + Ok(BigInt::from_signed_bytes_le(&bytes)?) } } @@ -102,7 +102,7 @@ impl ToAscObj for BigDecimal { let (digits, negative_exp) = self.as_bigint_and_exponent(); Ok(AscBigDecimal { exp: asc_new(heap, &BigInt::from(-negative_exp), gas)?, - digits: asc_new(heap, &BigInt::from(digits), gas)?, + digits: asc_new(heap, &BigInt::new(digits)?, gas)?, }) } } @@ -275,7 +275,7 @@ impl FromAscObj> for store::Value { StoreValueKind::BigInt => { let ptr: AscPtr = AscPtr::from(payload); let array: Vec = asc_get(heap, ptr, gas, depth)?; - Value::BigInt(store::scalar::BigInt::from_signed_bytes_le(&array)) + Value::BigInt(store::scalar::BigInt::from_signed_bytes_le(&array)?) } }) } diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 2f5dbb599d0..5b94e0a8bff 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -155,7 +155,7 @@ const THINGS_GQL: &str = r#" lazy_static! { static ref THINGS_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("things").unwrap(); static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17); + static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 7de87497f5a..5c6c55bd637 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -42,7 +42,7 @@ const THINGS_GQL: &str = " lazy_static! { static ref THINGS_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("things").unwrap(); - static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17); + static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); static ref BYTES_VALUE: H256 = H256::from(hex!( From e5f4fe3c1748c20b61aa85325face01c721e1a93 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Thu, 4 May 2023 11:14:13 +0100 Subject: [PATCH 0178/2104] runtime: Adjust gas cost and limit input to `json.fromBytes` (#4595) --- graph/src/runtime/gas/costs.rs | 7 +++++++ runtime/test/src/test.rs | 20 ++++++++++++++------ runtime/wasm/src/host_exports.rs | 14 ++++++++++++-- 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/graph/src/runtime/gas/costs.rs b/graph/src/runtime/gas/costs.rs index a4593a0d253..951532aeaab 100644 --- a/graph/src/runtime/gas/costs.rs +++ b/graph/src/runtime/gas/costs.rs @@ -74,3 +74,10 @@ pub const STORE_GET: GasOp = GasOp { }; pub const STORE_REMOVE: GasOp = STORE_SET; + +// Deeply nested JSON can take over 100x the memory of the serialized format, so multiplying the +// size cost by 100 makes sense. +pub const JSON_FROM_BYTES: GasOp = GasOp { + base_cost: DEFAULT_BASE_COST, + size_mult: DEFAULT_GAS_PER_BYTE * 100, +}; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 2fb234fdd39..65981c06c09 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -327,6 +327,15 @@ async fn test_json_parsing(api_version: Version, gas_used: u64) { ) .await; + // Parse valid JSON and get it back + let s = "\"foo\""; // Valid because there are quotes around `foo` + let bytes: &[u8] = s.as_ref(); + let return_value: AscPtr = module.invoke_export1("handleJsonError", bytes); + + let output: String = module.asc_get(return_value).unwrap(); + assert_eq!(output, "OK: foo, ERROR: false"); + assert_eq!(module.gas_used(), gas_used); + // Parse invalid JSON and handle the error gracefully let s = "foo"; // Invalid because there are no quotes around `foo` let bytes: &[u8] = s.as_ref(); @@ -334,24 +343,23 @@ async fn test_json_parsing(api_version: Version, gas_used: u64) { let output: String = module.asc_get(return_value).unwrap(); assert_eq!(output, "ERROR: true"); - // Parse valid JSON and get it back - let s = "\"foo\""; // Valid because there are quotes around `foo` + // Parse JSON that's too long and handle the error gracefully + let s = format!("\"f{}\"", "o".repeat(10_000_000)); let bytes: &[u8] = s.as_ref(); let return_value: AscPtr = module.invoke_export1("handleJsonError", bytes); let output: String = module.asc_get(return_value).unwrap(); - assert_eq!(output, "OK: foo, ERROR: false"); - assert_eq!(module.gas_used(), gas_used); + assert_eq!(output, "ERROR: true"); } #[tokio::test] async fn json_parsing_v0_0_4() { - test_json_parsing(API_VERSION_0_0_4, 2722284).await; + test_json_parsing(API_VERSION_0_0_4, 4373087).await; } #[tokio::test] async fn json_parsing_v0_0_5() { - test_json_parsing(API_VERSION_0_0_5, 3862933).await; + test_json_parsing(API_VERSION_0_0_5, 5153540).await; } async fn test_ipfs_cat(api_version: Version) { diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 1f58bf96644..44b3b8a018a 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -766,8 +766,18 @@ impl HostExports { bytes: &Vec, gas: &GasCounter, ) -> Result { - gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &bytes))?; - serde_json::from_reader(bytes.as_slice()) + // Max JSON size is 10MB. + const MAX_JSON_SIZE: usize = 10_000_000; + + gas.consume_host_fn(gas::JSON_FROM_BYTES.with_args(gas::complexity::Size, &bytes))?; + + if bytes.len() > MAX_JSON_SIZE { + return Err(DeterministicHostError::Other( + anyhow!("JSON size exceeds max size of {}", MAX_JSON_SIZE).into(), + )); + } + + serde_json::from_slice(bytes.as_slice()) .map_err(|e| DeterministicHostError::from(Error::from(e))) } From 76c4716a3520010efb1cfc034b19b21ef90520fd Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 10:59:02 -0700 Subject: [PATCH 0179/2104] runtime: Add tests around store.set's behavior for setting the id --- runtime/test/src/test.rs | 184 ++++++++++++++++++++++++++++++- runtime/wasm/src/host_exports.rs | 58 ++++++++++ 2 files changed, 240 insertions(+), 2 deletions(-) diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 65981c06c09..095e1f235da 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -3,12 +3,15 @@ use graph::data::subgraph::*; use graph::data::value::Word; use graph::prelude::web3::types::U256; use graph::prelude::*; -use graph::runtime::{AscIndexId, AscType}; +use graph::runtime::gas::GasCounter; +use graph::runtime::{AscIndexId, AscType, HostExportError}; use graph::runtime::{AscPtr, ToAscObj}; use graph::{components::store::*, ipfs_client::IpfsClient}; use graph_chain_ethereum::{Chain, DataSource}; use graph_runtime_wasm::asc_abi::class::{Array, AscBigInt, AscEntity, AscString, Uint8Array}; -use graph_runtime_wasm::{ExperimentalFeatures, ValidModule, WasmInstance}; +use graph_runtime_wasm::{ + host_exports, ExperimentalFeatures, MappingContext, ValidModule, WasmInstance, +}; use semver::Version; use std::collections::{BTreeMap, HashMap}; @@ -1209,3 +1212,180 @@ async fn recursion_limit() { .to_string() .contains("recursion limit reached")); } + +/// Test the various ways in which `store_set` sets the `id` of entities and +/// errors when there are issues +#[tokio::test] +async fn test_store_set_id() { + struct Host { + ctx: MappingContext, + host_exports: host_exports::test_support::HostExports, + stopwatch: StopwatchMetrics, + gas: GasCounter, + } + + impl Host { + async fn new() -> Host { + let version = ENV_VARS.mappings.max_api_version.clone(); + let wasm_file = wasm_file_path("boolean.wasm", API_VERSION_0_0_5); + + let ds = mock_data_source(&wasm_file, version.clone()); + + let store = STORE.clone(); + let deployment = DeploymentHash::new("hostStoreSetId".to_string()).unwrap(); + let deployment = test_store::create_test_subgraph( + &deployment, + "type User @entity { + id: ID!, + name: String, + } + + type Binary @entity { + id: Bytes! + }", + ) + .await; + + let ctx = mock_context(deployment.clone(), ds, store.subgraph_store(), version); + let host_exports = host_exports::test_support::HostExports::new(&ctx); + + let metrics_registry = Arc::new(MetricsRegistry::mock()); + let stopwatch = StopwatchMetrics::new( + ctx.logger.clone(), + deployment.hash.clone(), + "test", + metrics_registry.clone(), + ); + let gas = GasCounter::new(); + + Host { + ctx, + host_exports, + stopwatch, + gas, + } + } + + fn store_set( + &mut self, + entity_type: &str, + id: &str, + data: Vec<(&str, &str)>, + ) -> Result<(), HostExportError> { + let data: Vec<_> = data.into_iter().map(|(k, v)| (k, Value::from(v))).collect(); + self.store_setv(entity_type, id, data) + } + + fn store_setv( + &mut self, + entity_type: &str, + id: &str, + data: Vec<(&str, Value)>, + ) -> Result<(), HostExportError> { + let id = String::from(id); + let data = HashMap::from_iter(data.into_iter().map(|(k, v)| (Word::from(k), v))); + self.host_exports.store_set( + &self.ctx.logger, + &mut self.ctx.state, + &self.ctx.proof_of_indexing, + entity_type.to_string(), + id, + data, + &self.stopwatch, + &self.gas, + ) + } + + fn store_get( + &mut self, + entity_type: &str, + id: &str, + ) -> Result, anyhow::Error> { + let user_id = String::from(id); + self.host_exports.store_get( + &mut self.ctx.state, + entity_type.to_string(), + user_id, + &self.gas, + ) + } + } + + #[track_caller] + fn err_says(err: E, exp: &str) { + let err = err.to_string(); + assert!(err.contains(exp), "expected `{err}` to contain `{exp}`"); + } + + const UID: &str = "u1"; + const USER: &str = "User"; + const BID: &str = "0xdeadbeef"; + const BINARY: &str = "Binary"; + + let mut host = Host::new().await; + + host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) + .expect("setting with same id works"); + + let err = host + .store_set(USER, UID, vec![("id", "ux"), ("name", "user1")]) + .expect_err("setting with different id fails"); + err_says(err, "conflicts with ID passed"); + + host.store_set(USER, UID, vec![("name", "user2")]) + .expect("setting with no id works"); + + let entity = host.store_get(USER, UID).unwrap().unwrap(); + assert_eq!( + "u1", + entity.id().unwrap().as_str(), + "store.set sets id automatically" + ); + + let beef = Value::Bytes("0xbeef".parse().unwrap()); + let err = host + .store_setv(USER, "0xbeef", vec![("id", beef)]) + .expect_err("setting with Bytes id fails"); + err_says(err, "must have type ID! but has type Bytes"); + + host.store_setv(USER, UID, vec![("id", Value::Int(32))]) + .expect_err("id must be a string"); + + // + // Now for bytes id + // + let bid_bytes = Value::Bytes(BID.parse().unwrap()); + + let err = host + .store_set(BINARY, BID, vec![("id", BID), ("name", "user1")]) + .expect_err("setting with string id in values fails"); + err_says(err, "must have type Bytes! but has type String"); + + host.store_setv( + BINARY, + BID, + vec![("id", bid_bytes), ("name", Value::from("user1"))], + ) + .expect("setting with bytes id in values works"); + + let beef = Value::Bytes("0xbeef".parse().unwrap()); + let err = host + .store_setv(BINARY, BID, vec![("id", beef)]) + .expect_err("setting with different id fails"); + err_says(err, "conflicts with ID passed"); + + host.store_set(BINARY, BID, vec![("name", "user2")]) + .expect("setting with no id works"); + + let entity = host.store_get(BINARY, BID).unwrap().unwrap(); + assert_eq!( + BID, + entity.id().unwrap().as_str(), + "store.set sets id automatically" + ); + + let err = host + .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) + .expect_err("id must be Bytes"); + err_says(err, "must have type Bytes! but has type Int"); +} diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 44b3b8a018a..68bd8244cd2 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -860,6 +860,64 @@ fn bytes_to_string(logger: &Logger, bytes: Vec) -> String { s.trim_end_matches('\u{0000}').to_string() } +/// Expose some host functions for testing only +#[cfg(debug_assertions)] +pub mod test_support { + use std::{collections::HashMap, sync::Arc}; + + use graph::{ + blockchain::Blockchain, + components::{store::GetScope, subgraph::SharedProofOfIndexing}, + data::value::Word, + prelude::{BlockState, Entity, StopwatchMetrics, Value}, + runtime::{gas::GasCounter, HostExportError}, + slog::Logger, + }; + + use crate::MappingContext; + + pub struct HostExports(Arc>); + + impl HostExports { + pub fn new(ctx: &MappingContext) -> Self { + HostExports(ctx.host_exports.clone()) + } + + pub fn store_set( + &self, + logger: &Logger, + state: &mut BlockState, + proof_of_indexing: &SharedProofOfIndexing, + entity_type: String, + entity_id: String, + data: HashMap, + stopwatch: &StopwatchMetrics, + gas: &GasCounter, + ) -> Result<(), HostExportError> { + self.0.store_set( + logger, + state, + proof_of_indexing, + entity_type, + entity_id, + data, + stopwatch, + gas, + ) + } + + pub fn store_get( + &self, + state: &mut BlockState, + entity_type: String, + entity_id: String, + gas: &GasCounter, + ) -> Result, anyhow::Error> { + self.0 + .store_get(state, entity_type, entity_id, gas, GetScope::Store) + } + } +} #[test] fn test_string_to_h160_with_0x() { assert_eq!( From eb3ebd73c45529d0cf2e5088a3481761d0ff68e2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 12:09:27 -0700 Subject: [PATCH 0180/2104] graph, runtime: Move logic to auto-set `id` into store.set --- graph/src/components/store/entity_cache.rs | 31 +--------------------- graph/src/data/store/mod.rs | 8 ++++-- runtime/wasm/src/host_exports.rs | 30 ++++++++++++++++++++- 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index aee54d4914b..50c78945b95 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -168,36 +168,7 @@ impl EntityCache { /// with existing data. The entity will be validated against the /// subgraph schema, and any errors will result in an `Err` being /// returned. - pub fn set(&mut self, key: EntityKey, mut entity: Entity) -> Result<(), anyhow::Error> { - fn check_id(key: &EntityKey, prev_id: &str) -> Result<(), anyhow::Error> { - if prev_id != key.entity_id.as_str() { - Err(anyhow!( - "Value of {} attribute 'id' conflicts with ID passed to `store.set()`: \ - {} != {}", - key.entity_type, - prev_id, - key.entity_id, - )) - } else { - Ok(()) - } - } - - // Set the id if there isn't one yet, and make sure that a - // previously set id agrees with the one in the `key` - match entity.get("id") { - Some(s::Value::String(s)) => check_id(&key, s)?, - Some(s::Value::Bytes(b)) => check_id(&key, &b.to_string())?, - Some(_) => { - // The validation will catch the type mismatch - } - None => { - let value = self.schema.id_value(&key)?; - // unwrap: our AtomPool always has an id in it - entity.set("id", value).unwrap(); - } - } - + pub fn set(&mut self, key: EntityKey, entity: Entity) -> Result<(), anyhow::Error> { // check the validate for derived fields let is_valid = entity.validate(&self.schema, &key).is_ok(); diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index dcaca923529..0211ea02d9c 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,7 +1,7 @@ use crate::{ components::store::{DeploymentLocator, EntityKey, EntityType}, data::graphql::ObjectTypeExt, - prelude::{anyhow::Context, q, r, s, CacheWeight, QueryExecutionError}, + prelude::{anyhow::Context, lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, schema::InputSchema, util::intern::AtomPool, @@ -135,7 +135,6 @@ impl AssignmentEvent { /// An entity attribute name is represented as a string. pub type Attribute = String; -pub const ID: &str = "ID"; pub const BYTES_SCALAR: &str = "Bytes"; pub const BIG_INT_SCALAR: &str = "BigInt"; pub const BIG_DECIMAL_SCALAR: &str = "BigDecimal"; @@ -597,6 +596,11 @@ where } } +lazy_static! { + /// The name of the id attribute, `"id"` + pub static ref ID: Word = Word::from("id"); +} + /// An entity is represented as a map of attribute names to values. #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct Entity(Object); diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 68bd8244cd2..0da17e2a61f 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -156,7 +156,7 @@ impl HostExports { proof_of_indexing: &SharedProofOfIndexing, entity_type: String, entity_id: String, - data: HashMap, + mut data: HashMap, stopwatch: &StopwatchMetrics, gas: &GasCounter, ) -> Result<(), HostExportError> { @@ -182,6 +182,34 @@ impl HostExports { gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; + fn check_id(key: &EntityKey, prev_id: &str) -> Result<(), anyhow::Error> { + if prev_id != key.entity_id.as_str() { + Err(anyhow!( + "Value of {} attribute 'id' conflicts with ID passed to `store.set()`: \ + {} != {}", + key.entity_type, + prev_id, + key.entity_id, + )) + } else { + Ok(()) + } + } + + // Set the id if there isn't one yet, and make sure that a + // previously set id agrees with the one in the `key` + match data.get(&store::ID) { + Some(Value::String(s)) => check_id(&key, s)?, + Some(Value::Bytes(b)) => check_id(&key, &b.to_string())?, + Some(_) => { + // The validation will catch the type mismatch + } + None => { + let value = state.entity_cache.schema.id_value(&key)?; + data.insert(store::ID.clone(), value); + } + } + let entity = state .entity_cache .make_entity(data.into_iter().map(|(key, value)| (key, value)))?; From f8a01c1451f996cc43b8be518ec5860454f27fa1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 12:18:53 -0700 Subject: [PATCH 0181/2104] graph: Refuse to create an Entity without id --- graph/src/data/store/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 0211ea02d9c..fe85df36fc5 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,4 +1,5 @@ use crate::{ + bail, components::store::{DeploymentLocator, EntityKey, EntityType}, data::graphql::ObjectTypeExt, prelude::{anyhow::Context, lazy_static, q, r, s, CacheWeight, QueryExecutionError}, @@ -731,6 +732,9 @@ impl Entity { ) })?; } + if !obj.contains_key(&ID) { + bail!("internal error: no id attribute for entity `{obj:?}`"); + } Ok(Entity(obj)) } @@ -744,6 +748,9 @@ impl Entity { obj.insert(key, value) .map_err(|e| anyhow!("unknown attribute {}", e.not_interned()))?; } + if !obj.contains_key(&ID) { + bail!("internal error: no id attribute for entity `{obj:?}`"); + } Ok(Entity(obj)) } From 39f18a6c58dc07ae7d25965f094a6ef43df9faff Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 14:01:14 -0700 Subject: [PATCH 0182/2104] all: Changes to entity! macro * Make it a test-only helper, not usable from production code * Remove the schemaless forms of it * Do not export entity! in graph::prelude --- core/src/subgraph/runner.rs | 14 +- graph/src/data/store/mod.rs | 134 ++----- graph/src/lib.rs | 1 - graph/src/schema/ast.rs | 46 ++- graph/src/schema/input_schema.rs | 12 + runtime/test/src/test.rs | 13 +- .../tests/chain/ethereum/manifest.rs | 14 +- store/test-store/tests/core/interfaces.rs | 364 ++++++++---------- store/test-store/tests/graph/entity_cache.rs | 28 +- store/test-store/tests/graphql/query.rs | 4 +- store/test-store/tests/postgres/graft.rs | 4 +- store/test-store/tests/postgres/relational.rs | 19 +- .../tests/postgres/relational_bytes.rs | 23 +- store/test-store/tests/postgres/store.rs | 57 +-- store/test-store/tests/postgres/writable.rs | 9 +- 15 files changed, 332 insertions(+), 410 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 9dc5e139ac6..861e9eeff7e 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -16,6 +16,7 @@ use graph::data::subgraph::{ schema::{SubgraphError, SubgraphHealth, POI_OBJECT}, SubgraphFeature, }; +use graph::data::value::Word; use graph::data_source::{ offchain, CausalityRegion, DataSource, DataSourceCreationError, DataSourceTemplate, TriggerData, }; @@ -1061,11 +1062,14 @@ async fn update_proof_of_indexing( // Put this onto an entity with the same digest attribute // that was expected before when reading. - let new_poi_entity = entity! { - entity_cache.schema => - id: entity_key.entity_id.to_string(), - digest: updated_proof_of_indexing, - }?; + let data = vec![ + ( + graph::data::store::ID.clone(), + Value::from(entity_key.entity_id.to_string()), + ), + (Word::from("digest"), Value::from(updated_proof_of_indexing)), + ]; + let new_poi_entity = entity_cache.make_entity(data)?; entity_cache.set(entity_key, new_poi_entity)?; } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index fe85df36fc5..502ba21d6bd 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -614,16 +614,11 @@ pub trait TryIntoEntityIterator: IntoIterator impl>> TryIntoEntityIterator for T {} -/// The `entity!` macro is a convenient way to create entities. It comes in -/// two forms, one where a schema is provided and one where it is not. The -/// schema-less form can only be used in tests, since it creates an -/// `AtomPool` just for this entity behind the scenes. +/// The `entity!` macro is a convenient way to create entities in tests. It +/// can not be used in production code since it panics when creating the +/// entity goes wrong. /// -/// The form with schema returns a `Result` since it can be -/// used in production code. The schemaless form returns an `Entity` because -/// it unwraps the `Result` for you. -/// -/// Production code should always use the form with the schema +/// The macro takes a schema and a list of attribute names and values: /// ``` /// use graph::entity; /// use graph::schema::InputSchema; @@ -632,88 +627,18 @@ impl>> TryIntoEntityIterator< /// let id = DeploymentHash::new("Qm123").unwrap(); /// let schema = InputSchema::parse("type User @entity { id: String!, name: String! }", id).unwrap(); /// -/// let entity = entity! { schema => id: "1", name: "John Doe" }.unwrap(); -/// ``` -/// -/// Test code which often doesn't have access to an `InputSchema` can use -/// the form without the schema -/// ``` -/// use graph::entity; -/// let entity = entity! { id: "1", name: "John Doe" }; -/// ``` -/// -/// In the test form, it is also possible to provide additional names after -/// a `;` that should be put into the `AtomPool` so that they can be set -/// later in the test -/// ``` -/// use graph::entity; -/// let entity = entity! { id: "1", name: "John Doe"; phone, email }; +/// let entity = entity! { schema => id: "1", name: "John Doe" }; /// ``` #[cfg(debug_assertions)] #[macro_export] macro_rules! entity { - () => { - { - let pairs = Vec::new(); - let pool = $crate::util::intern::AtomPool::new(); - Entity::make(std::sync::Arc::new(pool), pairs) - } - }; - ($($name:ident: $value:expr,)*) => { - { - let mut pairs = Vec::new(); - let mut pool = $crate::util::intern::AtomPool::new(); - $( - pool.intern(stringify!($name)); - pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); - )* - $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs).unwrap() - } - }; - ($($name:ident: $value:expr),*) => { - entity! {$($name: $value,)*} - }; - ($($name:ident: $value:expr,)*; $($extra:ident,)*) => { - { - let mut pairs = Vec::new(); - let mut pool = $crate::util::intern::AtomPool::new(); - $( - pool.intern(stringify!($name)); - pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); - )* - $( - pool.intern(stringify!($extra)); - )* - $crate::data::store::Entity::make(std::sync::Arc::new(pool), pairs).unwrap() - } - }; - ($($name:ident: $value:expr),*; $($extra:ident),*) => { - entity! {$($name: $value,)*; $($extra,)*} - }; ($schema:expr => $($name:ident: $value:expr,)*) => { { let mut result = Vec::new(); $( result.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); )* - $schema.make_entity(result) - } - }; - ($schema:expr => $($name:ident: $value:expr),*) => { - entity! {$schema => $($name: $value,)*} - }; -} - -#[cfg(not(debug_assertions))] -#[macro_export] -macro_rules! entity { - ($schema:expr => $($name:ident: $value:expr,)*) => { - { - let mut pairs = Vec::new(); - $( - pairs.push(($crate::data::value::Word::from(stringify!($name)), $crate::data::store::Value::from($value))); - )* - $schema.make_entity(pairs) + $schema.make_entity(result).unwrap() } }; ($schema:expr => $($name:ident: $value:expr),*) => { @@ -1006,34 +931,39 @@ fn value_bigint() { #[test] fn entity_validation() { + const DOCUMENT: &str = " + enum Color { red, yellow, blue } + interface Stuff { id: ID!, name: String! } + type Cruft @entity { + id: ID!, + thing: Thing! + } + type Thing @entity { + id: ID!, + name: String!, + favorite_color: Color, + stuff: Stuff, + things: [Thing!]! + # Make sure we do not validate derived fields; it's ok + # to store a thing with a null Cruft + cruft: Cruft! @derivedFrom(field: \"thing\") + }"; + + lazy_static! { + static ref SUBGRAPH: DeploymentHash = DeploymentHash::new("doesntmatter").unwrap(); + static ref SCHEMA: InputSchema = + InputSchema::parse(DOCUMENT, SUBGRAPH.clone()).expect("Failed to parse test schema"); + } + fn make_thing(name: &str) -> Entity { - entity! { id: name, name: name, stuff: "less", favorite_color: "red", things: Value::List(vec![]); cruft } + entity! { SCHEMA => id: name, name: name, stuff: "less", favorite_color: "red", things: Value::List(vec![]) } } fn check(thing: Entity, errmsg: &str) { - const DOCUMENT: &str = " - enum Color { red, yellow, blue } - interface Stuff { id: ID!, name: String! } - type Cruft @entity { - id: ID!, - thing: Thing! - } - type Thing @entity { - id: ID!, - name: String!, - favorite_color: Color, - stuff: Stuff, - things: [Thing!]! - # Make sure we do not validate derived fields; it's ok - # to store a thing with a null Cruft - cruft: Cruft! @derivedFrom(field: \"thing\") - }"; - let subgraph = DeploymentHash::new("doesntmatter").unwrap(); - let schema = InputSchema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); let id = thing.id().unwrap_or("none".to_owned()); let key = EntityKey::data("Thing".to_owned(), id.clone()); - let err = thing.validate(&schema, &key); + let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { assert!( err.is_ok(), diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 9f1b7d76ec0..fa88c192b87 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -64,7 +64,6 @@ pub use url; /// use graph::prelude::*; /// ``` pub mod prelude { - pub use super::entity; pub use ::anyhow; pub use anyhow::{anyhow, Context as _, Error}; pub use async_trait::async_trait; diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 550e43df7f4..70db5da3d62 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -411,34 +411,38 @@ fn entity_validation() { use crate::prelude::{DeploymentHash, Entity}; use crate::schema::InputSchema; + const DOCUMENT: &str = " + enum Color { red, yellow, blue } + interface Stuff { id: ID!, name: String! } + type Cruft @entity { + id: ID!, + thing: Thing! + } + type Thing @entity { + id: ID!, + name: String!, + favorite_color: Color, + stuff: Stuff, + things: [Thing!]! + # Make sure we do not validate derived fields; it's ok + # to store a thing with a null Cruft + cruft: Cruft! @derivedFrom(field: \"thing\") + }"; + + lazy_static! { + static ref SUBGRAPH: DeploymentHash = DeploymentHash::new("doesntmatter").unwrap(); + static ref SCHEMA: InputSchema = InputSchema::raw(DOCUMENT, "doesntmatter"); + } + fn make_thing(name: &str) -> Entity { - entity! { id: name, name: name, stuff: "less", favorite_color: "red", things: store::Value::List(vec![]); cruft} + entity! { SCHEMA => id: name, name: name, stuff: "less", favorite_color: "red", things: store::Value::List(vec![]) } } fn check(thing: Entity, errmsg: &str) { - const DOCUMENT: &str = " - enum Color { red, yellow, blue } - interface Stuff { id: ID!, name: String! } - type Cruft @entity { - id: ID!, - thing: Thing! - } - type Thing @entity { - id: ID!, - name: String!, - favorite_color: Color, - stuff: Stuff, - things: [Thing!]! - # Make sure we do not validate derived fields; it's ok - # to store a thing with a null Cruft - cruft: Cruft! @derivedFrom(field: \"thing\") - }"; - let subgraph = DeploymentHash::new("doesntmatter").unwrap(); - let schema = InputSchema::parse(DOCUMENT, subgraph).expect("Failed to parse test schema"); let id = thing.id().unwrap_or("none".to_owned()); let key = EntityKey::data("Thing".to_owned(), id.clone()); - let err = thing.validate(&schema, &key); + let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { assert!( err.is_ok(), diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 1dcd5c2dd2a..e3f76381909 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -83,6 +83,18 @@ impl InputSchema { Ok(Self::create(schema)) } + /// Convenience for tests to construct an `InputSchema` + /// + /// # Panics + /// + /// If the `document` or `hash` can not be successfully converted + #[cfg(debug_assertions)] + #[track_caller] + pub fn raw(document: &str, hash: &str) -> Self { + let hash = DeploymentHash::new(hash).unwrap(); + Self::parse(document, hash).unwrap() + } + /// Generate the `ApiSchema` for use with GraphQL queries for this /// `InputSchema` pub fn api_schema(&self) -> Result { diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 095e1f235da..180db0100e0 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -2,11 +2,12 @@ use graph::data::store::scalar; use graph::data::subgraph::*; use graph::data::value::Word; use graph::prelude::web3::types::U256; -use graph::prelude::*; use graph::runtime::gas::GasCounter; use graph::runtime::{AscIndexId, AscType, HostExportError}; use graph::runtime::{AscPtr, ToAscObj}; +use graph::schema::InputSchema; use graph::{components::store::*, ipfs_client::IpfsClient}; +use graph::{entity, prelude::*}; use graph_chain_ethereum::{Chain, DataSource}; use graph_runtime_wasm::asc_abi::class::{Array, AscBigInt, AscEntity, AscString, Uint8Array}; use graph_runtime_wasm::{ @@ -431,7 +432,11 @@ async fn test_ipfs_block() { const USER_DATA: &str = "user_data"; fn make_thing(id: &str, value: &str) -> (String, EntityModification) { - let data = entity! { id: id, value: value, extra: USER_DATA }; + const DOCUMENT: &str = " type Thing @entity { id: String!, value: String!, extra: String }"; + lazy_static! { + static ref SCHEMA: InputSchema = InputSchema::raw(DOCUMENT, "doesntmatter"); + } + let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA }; let key = EntityKey::data("Thing".to_string(), id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), @@ -960,8 +965,8 @@ async fn test_entity_store(api_version: Version) { let schema = store.input_schema(&deployment.hash).unwrap(); - let alex = entity! { id: "alex", name: "Alex" }; - let steve = entity! { id: "steve", name: "Steve" }; + let alex = entity! { schema => id: "alex", name: "Alex" }; + let steve = entity! { schema => id: "steve", name: "Steve" }; let user_type = EntityType::from("User"); test_store::insert_entities( &deployment, diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index cc87a237195..274750133b7 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -8,7 +8,7 @@ use graph::data_source::DataSourceTemplate; use graph::entity; use graph::prelude::{ anyhow, async_trait, serde_yaml, tokio, DeploymentHash, Link, Logger, SubgraphManifest, - SubgraphManifestValidationError, UnvalidatedSubgraphManifest, + SubgraphManifestValidationError, SubgraphStore, UnvalidatedSubgraphManifest, }; use graph::{ blockchain::NodeCapabilities as _, @@ -200,9 +200,13 @@ specVersion: 0.0.2 // Creates base subgraph at block 0 (genesis). let deployment = test_store::create_test_subgraph(&subgraph, GQL_SCHEMA).await; + let schema = store + .subgraph_store() + .input_schema(&deployment.hash) + .unwrap(); // Adds an example entity. - let thing = entity! { id: "datthing" }; + let thing = entity! { schema => id: "datthing" }; test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) .await .unwrap(); @@ -276,6 +280,10 @@ specVersion: 0.0.2 // Validation against subgraph that hasn't synced anything fails // let deployment = test_store::create_test_subgraph(&subgraph, GQL_SCHEMA).await; + let schema = store + .subgraph_store() + .input_schema(&deployment.hash) + .unwrap(); // This check is awkward since the test manifest has other problems // that the validation complains about as setting up a valid manifest // would be a bit more work; we just want to make sure that @@ -294,7 +302,7 @@ specVersion: 0.0.2 msg ); - let thing = entity! { id: "datthing" }; + let thing = entity! { schema => id: "datthing" }; test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) .await .unwrap(); diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index 33a77faa967..43c4317d660 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -1,5 +1,7 @@ // Tests for graphql interfaces. +use graph::entity; +use graph::schema::InputSchema; use pretty_assertions::assert_eq; use graph::{components::store::EntityType, data::graphql::object}; @@ -63,14 +65,15 @@ async fn one_interface_zero_entities() { #[tokio::test] async fn one_interface_one_entity() { let subgraph_id = "oneInterfaceOneEntity"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int }"; + let schema = InputSchema::raw(document, subgraph_id); - let entity = ("Animal", entity! { id: "1", legs: 3 }); + let entity = ("Animal", entity! { schema => id: "1", legs: 3 }); // Collection query. let query = "query { leggeds(first: 100) { legs } }"; - let res = insert_and_query(subgraph_id, schema, vec![entity], query) + let res = insert_and_query(subgraph_id, document, vec![entity], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -79,7 +82,7 @@ async fn one_interface_one_entity() { // Query by ID. let query = "query { legged(id: \"1\") { legs } }"; - let res = insert_and_query(subgraph_id, schema, vec![], query) + let res = insert_and_query(subgraph_id, document, vec![], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -90,14 +93,15 @@ async fn one_interface_one_entity() { #[tokio::test] async fn one_interface_one_entity_typename() { let subgraph_id = "oneInterfaceOneEntityTypename"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int }"; + let schema = InputSchema::raw(document, subgraph_id); - let entity = ("Animal", entity! { id: "1", legs: 3 }); + let entity = ("Animal", entity! { schema => id: "1", legs: 3 }); let query = "query { leggeds(first: 100) { __typename } }"; - let res = insert_and_query(subgraph_id, schema, vec![entity], query) + let res = insert_and_query(subgraph_id, document, vec![entity], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -108,17 +112,18 @@ async fn one_interface_one_entity_typename() { #[tokio::test] async fn one_interface_multiple_entities() { let subgraph_id = "oneInterfaceMultipleEntities"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int } type Furniture implements Legged @entity { id: ID!, legs: Int } "; + let schema = InputSchema::raw(document, subgraph_id); - let animal = ("Animal", entity! { id: "1", legs: 3 }); - let furniture = ("Furniture", entity! { id: "2", legs: 4 }); + let animal = ("Animal", entity! { schema => id: "1", legs: 3 }); + let furniture = ("Furniture", entity! { schema => id: "2", legs: 4 }); let query = "query { leggeds(first: 100, orderBy: legs) { legs } }"; - let res = insert_and_query(subgraph_id, schema, vec![animal, furniture], query) + let res = insert_and_query(subgraph_id, document, vec![animal, furniture], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -127,7 +132,7 @@ async fn one_interface_multiple_entities() { // Test for support issue #32. let query = "query { legged(id: \"2\") { legs } }"; - let res = insert_and_query(subgraph_id, schema, vec![], query) + let res = insert_and_query(subgraph_id, document, vec![], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -138,16 +143,17 @@ async fn one_interface_multiple_entities() { #[tokio::test] async fn reference_interface() { let subgraph_id = "ReferenceInterface"; - let schema = "type Leg @entity { id: ID! } + let document = "type Leg @entity { id: ID! } interface Legged { leg: Leg } type Animal implements Legged @entity { id: ID!, leg: Leg }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { leggeds(first: 100) { leg { id } } }"; - let leg = ("Leg", entity! { id: "1" }); - let animal = ("Animal", entity! { id: "1", leg: 1 }); + let leg = ("Leg", entity! { schema => id: "1" }); + let animal = ("Animal", entity! { schema => id: "1", leg: 1 }); - let res = insert_and_query(subgraph_id, schema, vec![leg, animal], query) + let res = insert_and_query(subgraph_id, document, vec![leg, animal], query) .await .unwrap(); @@ -161,7 +167,7 @@ async fn reference_interface_derived() { // Test the different ways in which interface implementations // can reference another entity let subgraph_id = "ReferenceInterfaceDerived"; - let schema = " + let document = " type Transaction @entity { id: ID!, buyEvent: BuyEvent!, @@ -191,20 +197,24 @@ async fn reference_interface_derived() { # Store the transaction directly transaction: Transaction! }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { events { id transaction { id } } }"; - let buy = ("BuyEvent", entity! { id: "buy" }); - let sell1 = ("SellEvent", entity! { id: "sell1" }); - let sell2 = ("SellEvent", entity! { id: "sell2" }); - let gift = ("GiftEvent", entity! { id: "gift", transaction: "txn" }); + let buy = ("BuyEvent", entity! { schema => id: "buy" }); + let sell1 = ("SellEvent", entity! { schema => id: "sell1" }); + let sell2 = ("SellEvent", entity! { schema => id: "sell2" }); + let gift = ( + "GiftEvent", + entity! { schema => id: "gift", transaction: "txn" }, + ); let txn = ( "Transaction", - entity! {id: "txn", buyEvent: "buy", sellEvents: vec!["sell1", "sell2"] }, + entity! { schema => id: "txn", buyEvent: "buy", sellEvents: vec!["sell1", "sell2"] }, ); let entities = vec![buy, sell1, sell2, gift, txn]; - let res = insert_and_query(subgraph_id, schema, entities.clone(), query) + let res = insert_and_query(subgraph_id, document, entities.clone(), query) .await .unwrap(); @@ -256,22 +266,26 @@ async fn follow_interface_reference_invalid() { #[tokio::test] async fn follow_interface_reference() { let subgraph_id = "FollowInterfaceReference"; - let schema = "interface Legged { id: ID!, legs: Int! } + let document = "interface Legged { id: ID!, legs: Int! } type Animal implements Legged @entity { id: ID! legs: Int! parent: Legged }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { legged(id: \"child\") { ... on Animal { parent { id } } } }"; let parent = ( "Animal", - entity! { id: "parent", legs: 4, parent: Value::Null }, + entity! { schema => id: "parent", legs: 4, parent: Value::Null }, + ); + let child = ( + "Animal", + entity! { schema => id: "child", legs: 3, parent: "parent" }, ); - let child = ("Animal", entity! { id: "child", legs: 3, parent: "parent" }); - let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, child], query) .await .unwrap(); @@ -285,17 +299,18 @@ async fn follow_interface_reference() { #[tokio::test] async fn conflicting_implementors_id() { let subgraph_id = "ConflictingImplementorsId"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, legs: Int } type Furniture implements Legged @entity { id: ID!, legs: Int } "; + let schema = InputSchema::raw(document, subgraph_id); - let animal = ("Animal", entity! { id: "1", legs: 3 }); - let furniture = ("Furniture", entity! { id: "1", legs: 3 }); + let animal = ("Animal", entity! { schema => id: "1", legs: 3 }); + let furniture = ("Furniture", entity! { schema => id: "1", legs: 3 }); let query = "query { leggeds(first: 100) { legs } }"; - let res = insert_and_query(subgraph_id, schema, vec![animal, furniture], query).await; + let res = insert_and_query(subgraph_id, document, vec![animal, furniture], query).await; let msg = res.unwrap_err().to_string(); // We don't know in which order the two entities get inserted; the two @@ -313,17 +328,18 @@ async fn conflicting_implementors_id() { #[tokio::test] async fn derived_interface_relationship() { let subgraph_id = "DerivedInterfaceRelationship"; - let schema = "interface ForestDweller { id: ID!, forest: Forest } + let document = "interface ForestDweller { id: ID!, forest: Forest } type Animal implements ForestDweller @entity { id: ID!, forest: Forest } type Forest @entity { id: ID!, dwellers: [ForestDweller]! @derivedFrom(field: \"forest\") } "; + let schema = InputSchema::raw(document, subgraph_id); - let forest = ("Forest", entity! { id: "1" }); - let animal = ("Animal", entity! { id: "1", forest: "1" }); + let forest = ("Forest", entity! { schema => id: "1" }); + let animal = ("Animal", entity! { schema => id: "1", forest: "1" }); let query = "query { forests(first: 100) { dwellers(first: 100) { id } } }"; - let res = insert_and_query(subgraph_id, schema, vec![forest, animal], query) + let res = insert_and_query(subgraph_id, document, vec![forest, animal], query) .await .unwrap(); let data = extract_data!(res); @@ -336,7 +352,7 @@ async fn derived_interface_relationship() { #[tokio::test] async fn two_interfaces() { let subgraph_id = "TwoInterfaces"; - let schema = "interface IFoo { foo: String! } + let document = "interface IFoo { foo: String! } interface IBar { bar: Int! } type A implements IFoo @entity { id: ID!, foo: String! } @@ -344,16 +360,17 @@ async fn two_interfaces() { type AB implements IFoo & IBar @entity { id: ID!, foo: String!, bar: Int! } "; + let schema = InputSchema::raw(document, subgraph_id); - let a = ("A", entity! { id: "1", foo: "bla" }); - let b = ("B", entity! { id: "1", bar: 100 }); - let ab = ("AB", entity! { id: "2", foo: "ble", bar: 200 }); + let a = ("A", entity! { schema => id: "1", foo: "bla" }); + let b = ("B", entity! { schema => id: "1", bar: 100 }); + let ab = ("AB", entity! { schema => id: "2", foo: "ble", bar: 200 }); let query = "query { ibars(first: 100, orderBy: bar) { bar } ifoos(first: 100, orderBy: foo) { foo } }"; - let res = insert_and_query(subgraph_id, schema, vec![a, b, ab], query) + let res = insert_and_query(subgraph_id, document, vec![a, b, ab], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -367,14 +384,18 @@ async fn two_interfaces() { #[tokio::test] async fn interface_non_inline_fragment() { let subgraph_id = "interfaceNonInlineFragment"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, name: String, legs: Int }"; + let schema = InputSchema::raw(document, subgraph_id); - let entity = ("Animal", entity! { id: "1", name: "cow", legs: 3 }); + let entity = ( + "Animal", + entity! { schema => id: "1", name: "cow", legs: 3 }, + ); // Query only the fragment. let query = "query { leggeds { ...frag } } fragment frag on Animal { name }"; - let res = insert_and_query(subgraph_id, schema, vec![entity], query) + let res = insert_and_query(subgraph_id, document, vec![entity], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -383,7 +404,7 @@ async fn interface_non_inline_fragment() { // Query the fragment and something else. let query = "query { leggeds { legs, ...frag } } fragment frag on Animal { name }"; - let res = insert_and_query(subgraph_id, schema, vec![], query) + let res = insert_and_query(subgraph_id, document, vec![], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -394,16 +415,20 @@ async fn interface_non_inline_fragment() { #[tokio::test] async fn interface_inline_fragment() { let subgraph_id = "interfaceInlineFragment"; - let schema = "interface Legged { legs: Int } + let document = "interface Legged { legs: Int } type Animal implements Legged @entity { id: ID!, name: String, legs: Int } type Bird implements Legged @entity { id: ID!, airspeed: Int, legs: Int }"; + let schema = InputSchema::raw(document, subgraph_id); - let animal = ("Animal", entity! { id: "1", name: "cow", legs: 4 }); - let bird = ("Bird", entity! { id: "2", airspeed: 24, legs: 2 }); + let animal = ( + "Animal", + entity! { schema => id: "1", name: "cow", legs: 4 }, + ); + let bird = ("Bird", entity! { schema => id: "2", airspeed: 24, legs: 2 }); let query = "query { leggeds(orderBy: legs) { ... on Animal { name } ...on Bird { airspeed } } }"; - let res = insert_and_query(subgraph_id, schema, vec![animal, bird], query) + let res = insert_and_query(subgraph_id, document, vec![animal, bird], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -414,7 +439,7 @@ async fn interface_inline_fragment() { #[tokio::test] async fn interface_inline_fragment_with_subquery() { let subgraph_id = "InterfaceInlineFragmentWithSubquery"; - let schema = " + let document = " interface Legged { legs: Int } type Parent @entity { id: ID! @@ -432,23 +457,24 @@ async fn interface_inline_fragment_with_subquery() { parent: Parent } "; + let schema = InputSchema::raw(document, subgraph_id); - let mama_cow = ("Parent", entity! { id: "mama_cow" }); + let mama_cow = ("Parent", entity! { schema => id: "mama_cow" }); let cow = ( "Animal", - entity! { id: "1", name: "cow", legs: 4, parent: "mama_cow" }, + entity! { schema => id: "1", name: "cow", legs: 4, parent: "mama_cow" }, ); - let mama_bird = ("Parent", entity! { id: "mama_bird" }); + let mama_bird = ("Parent", entity! { schema => id: "mama_bird" }); let bird = ( "Bird", - entity! { id: "2", airspeed: 5, legs: 2, parent: "mama_bird" }, + entity! { schema => id: "2", airspeed: 5, legs: 2, parent: "mama_bird" }, ); let query = "query { leggeds(orderBy: legs) { legs ... on Bird { airspeed parent { id } } } }"; let res = insert_and_query( subgraph_id, - schema, + document, vec![cow, mama_cow, bird, mama_bird], query, ) @@ -497,12 +523,13 @@ async fn invalid_fragment() { #[tokio::test] async fn alias() { let subgraph_id = "Alias"; - let schema = "interface Legged { id: ID!, legs: Int! } + let document = "interface Legged { id: ID!, legs: Int! } type Animal implements Legged @entity { id: ID! legs: Int! parent: Legged }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { l: legged(id: \"child\") { @@ -518,11 +545,14 @@ async fn alias() { let parent = ( "Animal", - entity! { id: "parent", legs: 4, parent: Value::Null }, + entity! { schema => id: "parent", legs: 4, parent: Value::Null }, + ); + let child = ( + "Animal", + entity! { schema => id: "child", legs: 3, parent: "parent" }, ); - let child = ("Animal", entity! { id: "child", legs: 3, parent: "parent" }); - let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, child], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -543,7 +573,7 @@ async fn alias() { #[tokio::test] async fn fragments_dont_panic() { let subgraph_id = "FragmentsDontPanic"; - let schema = " + let document = " type Parent @entity { id: ID! child: Child @@ -553,6 +583,7 @@ async fn fragments_dont_panic() { id: ID! } "; + let schema = InputSchema::raw(document, subgraph_id); let query = " query { @@ -577,28 +608,11 @@ async fn fragments_dont_panic() { "; // The panic manifests if two parents exist. - let parent = ( - "Parent", - entity!( - id: "p", - child: "c", - ), - ); - let parent2 = ( - "Parent", - entity!( - id: "p2", - child: Value::Null, - ), - ); - let child = ( - "Child", - entity!( - id:"c" - ), - ); + let parent = ("Parent", entity! { schema => id: "p", child: "c" }); + let parent2 = ("Parent", entity! { schema => id: "p2", child: Value::Null }); + let child = ("Child", entity! { schema => id:"c" }); - let res = insert_and_query(subgraph_id, schema, vec![parent, parent2, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, parent2, child], query) .await .unwrap(); @@ -624,7 +638,7 @@ async fn fragments_dont_panic() { #[tokio::test] async fn fragments_dont_duplicate_data() { let subgraph_id = "FragmentsDupe"; - let schema = " + let document = " type Parent @entity { id: ID! children: [Child!]! @@ -634,6 +648,7 @@ async fn fragments_dont_duplicate_data() { id: ID! } "; + let schema = InputSchema::raw(document, subgraph_id); let query = " query { @@ -653,28 +668,14 @@ async fn fragments_dont_duplicate_data() { "; // This bug manifests if two parents exist. - let parent = ( - "Parent", - entity!( - id: "p", - children: vec!["c"] - ), - ); + let parent = ("Parent", entity! { schema => id: "p", children: vec!["c"] }); let parent2 = ( "Parent", - entity!( - id: "b", - children: Vec::::new() - ), - ); - let child = ( - "Child", - entity!( - id:"c" - ), + entity! { schema => id: "b", children: Vec::::new() }, ); + let child = ("Child", entity! { schema => id:"c" }); - let res = insert_and_query(subgraph_id, schema, vec![parent, parent2, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, parent2, child], query) .await .unwrap(); @@ -702,11 +703,12 @@ async fn fragments_dont_duplicate_data() { #[tokio::test] async fn redundant_fields() { let subgraph_id = "RedundantFields"; - let schema = "interface Legged { id: ID!, parent: Legged } + let document = "interface Legged { id: ID!, parent: Legged } type Animal implements Legged @entity { id: ID! parent: Legged }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { leggeds { @@ -719,20 +721,14 @@ async fn redundant_fields() { let parent = ( "Animal", - entity!( - id: "parent", - parent: Value::Null, - ), + entity! { schema => id: "parent", parent: Value::Null }, ); let child = ( "Animal", - entity!( - id: "child", - parent: "parent", - ), + entity! { schema => id: "child", parent: "parent" }, ); - let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, child], query) .await .unwrap(); @@ -757,7 +753,7 @@ async fn redundant_fields() { #[tokio::test] async fn fragments_merge_selections() { let subgraph_id = "FragmentsMergeSelections"; - let schema = " + let document = " type Parent @entity { id: ID! children: [Child!]! @@ -768,6 +764,7 @@ async fn fragments_merge_selections() { foo: Int! } "; + let schema = InputSchema::raw(document, subgraph_id); let query = " query { @@ -786,22 +783,10 @@ async fn fragments_merge_selections() { } "; - let parent = ( - "Parent", - entity!( - id: "p", - children: vec!["c"] - ), - ); - let child = ( - "Child", - entity!( - id: "c", - foo: 1, - ), - ); + let parent = ("Parent", entity! { schema => id: "p", children: vec!["c"] }); + let child = ("Child", entity! { schema => id: "c", foo: 1 }); - let res = insert_and_query(subgraph_id, schema, vec![parent, child], query) + let res = insert_and_query(subgraph_id, document, vec![parent, child], query) .await .unwrap(); @@ -826,7 +811,7 @@ async fn fragments_merge_selections() { #[tokio::test] async fn merge_fields_not_in_interface() { let subgraph_id = "MergeFieldsNotInInterface"; - let schema = "interface Iface { id: ID! } + let document = "interface Iface { id: ID! } type Animal implements Iface @entity { id: ID! human: Iface! @@ -836,6 +821,7 @@ async fn merge_fields_not_in_interface() { animal: Iface! } "; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { ifaces { @@ -854,22 +840,10 @@ async fn merge_fields_not_in_interface() { } }"; - let animal = ( - "Animal", - entity!( - id: "cow", - human: "fred", - ), - ); - let human = ( - "Human", - entity!( - id: "fred", - animal: "cow", - ), - ); + let animal = ("Animal", entity! { schema => id: "cow", human: "fred" }); + let human = ("Human", entity! { schema => id: "fred", animal: "cow" }); - let res = insert_and_query(subgraph_id, schema, vec![animal, human], query) + let res = insert_and_query(subgraph_id, document, vec![animal, human], query) .await .unwrap(); @@ -898,7 +872,7 @@ async fn merge_fields_not_in_interface() { #[tokio::test] async fn nested_interface_fragments() { let subgraph_id = "NestedInterfaceFragments"; - let schema = "interface I1face { id: ID!, foo1: Foo! } + let document = "interface I1face { id: ID!, foo1: Foo! } interface I2face { id: ID!, foo2: Foo! } interface I3face { id: ID!, foo3: Foo! } type Foo @entity { @@ -919,6 +893,7 @@ async fn nested_interface_fragments() { foo2: Foo! foo3: Foo! }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { i1Faces { @@ -939,38 +914,18 @@ async fn nested_interface_fragments() { } }"; - let foo = ( - "Foo", - entity!( - id: "foo", - ), - ); - let one = ( - "One", - entity!( - id: "1", - foo1: "foo", - ), - ); + let foo = ("Foo", entity! { schema => id: "foo" }); + let one = ("One", entity! { schema => id: "1", foo1: "foo" }); let two = ( "Two", - entity!( - id: "2", - foo1: "foo", - foo2: "foo", - ), + entity! { schema => id: "2", foo1: "foo", foo2: "foo" }, ); let three = ( "Three", - entity!( - id: "3", - foo1: "foo", - foo2: "foo", - foo3: "foo" - ), + entity! { schema => id: "3", foo1: "foo", foo2: "foo", foo3: "foo" }, ); - let res = insert_and_query(subgraph_id, schema, vec![foo, one, two, three], query) + let res = insert_and_query(subgraph_id, document, vec![foo, one, two, three], query) .await .unwrap(); @@ -1014,7 +969,7 @@ async fn nested_interface_fragments() { #[tokio::test] async fn nested_interface_fragments_overlapping() { let subgraph_id = "NestedInterfaceFragmentsOverlapping"; - let schema = "interface I1face { id: ID!, foo1: Foo! } + let document = "interface I1face { id: ID!, foo1: Foo! } interface I2face { id: ID!, foo1: Foo! } type Foo @entity { id: ID! @@ -1027,6 +982,7 @@ async fn nested_interface_fragments_overlapping() { id: ID! foo1: Foo! }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { i1Faces { @@ -1039,27 +995,10 @@ async fn nested_interface_fragments_overlapping() { } }"; - let foo = ( - "Foo", - entity!( - id: "foo", - ), - ); - let one = ( - "One", - entity!( - id: "1", - foo1: "foo", - ), - ); - let two = ( - "Two", - entity!( - id: "2", - foo1: "foo", - ), - ); - let res = insert_and_query(subgraph_id, schema, vec![foo, one, two], query) + let foo = ("Foo", entity! { schema => id: "foo" }); + let one = ("One", entity! { schema => id: "1", foo1: "foo" }); + let two = ("Two", entity! { schema => id: "2", foo1: "foo" }); + let res = insert_and_query(subgraph_id, document, vec![foo, one, two], query) .await .unwrap(); @@ -1095,7 +1034,7 @@ async fn nested_interface_fragments_overlapping() { } }"; - let res = insert_and_query(subgraph_id, schema, vec![], query) + let res = insert_and_query(subgraph_id, document, vec![], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -1124,7 +1063,7 @@ async fn nested_interface_fragments_overlapping() { async fn enums() { use r::Value::Enum; let subgraph_id = "enums"; - let schema = r#" + let document = r#" enum Direction { NORTH EAST @@ -1137,20 +1076,21 @@ async fn enums() { direction: Direction! meters: Int! }"#; + let schema = InputSchema::raw(document, subgraph_id); let entities = vec![ ( "Trajectory", - entity! { id: "1", direction: "EAST", meters: 10 }, + entity! { schema => id: "1", direction: "EAST", meters: 10 }, ), ( "Trajectory", - entity! { id: "2", direction: "NORTH", meters: 15 }, + entity! { schema => id: "2", direction: "NORTH", meters: 15 }, ), ]; let query = "query { trajectories { id, direction, meters } }"; - let res = insert_and_query(subgraph_id, schema, entities, query) + let res = insert_and_query(subgraph_id, document, entities, query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -1176,7 +1116,7 @@ async fn enums() { async fn enum_list_filters() { use r::Value::Enum; let subgraph_id = "enum_list_filters"; - let schema = r#" + let document = r#" enum Direction { NORTH EAST @@ -1189,24 +1129,25 @@ async fn enum_list_filters() { direction: Direction! meters: Int! }"#; + let schema = InputSchema::raw(document, subgraph_id); let entities = vec![ ( "Trajectory", - entity! { id: "1", direction: "EAST", meters: 10 }, + entity! { schema => id: "1", direction: "EAST", meters: 10 }, ), ( "Trajectory", - entity! { id: "2", direction: "NORTH", meters: 15 }, + entity! { schema => id: "2", direction: "NORTH", meters: 15 }, ), ( "Trajectory", - entity! { id: "3", direction: "WEST", meters: 20 }, + entity! { schema => id: "3", direction: "WEST", meters: 20 }, ), ]; let query = "query { trajectories(where: { direction_in: [NORTH, EAST] }) { id, direction } }"; - let res = insert_and_query(subgraph_id, schema, entities, query) + let res = insert_and_query(subgraph_id, document, entities, query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -1226,7 +1167,7 @@ async fn enum_list_filters() { ); let query = "query { trajectories(where: { direction_not_in: [EAST] }) { id, direction } }"; - let res = insert_and_query(subgraph_id, schema, vec![], query) + let res = insert_and_query(subgraph_id, document, vec![], query) .await .unwrap(); let data = extract_data!(res).unwrap(); @@ -1323,15 +1264,19 @@ async fn recursive_fragment() { #[tokio::test] async fn mixed_mutability() { let subgraph_id = "MixedMutability"; - let schema = "interface Event { id: String! } + let document = "interface Event { id: String! } type Mutable implements Event @entity { id: String!, name: String! } type Immutable implements Event @entity(immutable: true) { id: String!, name: String! }"; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { events { id } }"; let entities = vec![ - ("Mutable", entity! { id: "mut0", name: "mut0" }), - ("Immutable", entity! { id: "immo0", name: "immo0" }), + ("Mutable", entity! { schema => id: "mut0", name: "mut0" }), + ( + "Immutable", + entity! { schema => id: "immo0", name: "immo0" }, + ), ]; { @@ -1342,7 +1287,7 @@ async fn mixed_mutability() { let id = DeploymentHash::new(subgraph_id).unwrap(); remove_subgraph(&id); } - let res = insert_and_query(subgraph_id, schema, entities, query) + let res = insert_and_query(subgraph_id, document, entities, query) .await .unwrap(); @@ -1354,7 +1299,7 @@ async fn mixed_mutability() { #[tokio::test] async fn derived_interface_bytes() { let subgraph_id = "DerivedInterfaceBytes"; - let schema = r#" type Pool { + let document = r#" type Pool { id: Bytes!, trades: [Trade!]! @derivedFrom(field: "pool") } @@ -1372,16 +1317,17 @@ async fn derived_interface_bytes() { id: Bytes! pool: Pool! }"#; + let schema = InputSchema::raw(document, subgraph_id); let query = "query { pools { trades { id } } }"; let entities = vec![ - ("Pool", entity! { id: "0xf001" }), - ("Sell", entity! { id: "0xc0", pool: "0xf001"}), - ("Buy", entity! { id: "0xb0", pool: "0xf001"}), + ("Pool", entity! { schema => id: "0xf001" }), + ("Sell", entity! { schema => id: "0xc0", pool: "0xf001"}), + ("Buy", entity! { schema => id: "0xb0", pool: "0xf001"}), ]; - let res = insert_and_query(subgraph_id, schema, entities, query) + let res = insert_and_query(subgraph_id, document, entities, query) .await .unwrap(); diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 5cb9fca0f36..27d8a2f342e 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -5,12 +5,12 @@ use graph::components::store::{ }; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; -use graph::prelude::*; use graph::schema::InputSchema; use graph::{ components::store::{DeploymentId, DeploymentLocator}, prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, }; +use graph::{entity, prelude::*}; use hex_literal::hex; use graph::semver::Version; @@ -205,11 +205,11 @@ fn insert_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let mogwai_data = entity! { id: "mogwai", name: "Mogwai" }; + let mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai" }; let mogwai_key = make_band_key("mogwai"); cache.set(mogwai_key.clone(), mogwai_data.clone()).unwrap(); - let sigurros_data = entity! { id: "sigurros", name: "Sigur Ros" }; + let sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros" }; let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone()) @@ -250,8 +250,8 @@ fn overwrite_modifications() { // every set operation as an overwrite. let store = { let entities = vec![ - entity! { id: "mogwai", name: "Mogwai"; founded }, - entity! { id: "sigurros", name: "Sigur Ros"; founded }, + entity! { SCHEMA => id: "mogwai", name: "Mogwai" }, + entity! { SCHEMA => id: "sigurros", name: "Sigur Ros" }, ]; MockStore::new(entity_version_map("Band", entities)) }; @@ -259,11 +259,11 @@ fn overwrite_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let mogwai_data = entity! { id: "mogwai", name: "Mogwai", founded: 1995 }; + let mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 }; let mogwai_key = make_band_key("mogwai"); cache.set(mogwai_key.clone(), mogwai_data.clone()).unwrap(); - let sigurros_data = entity! { id: "sigurros", name: "Sigur Ros", founded: 1994 }; + let sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros", founded: 1994 }; let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone()) @@ -291,7 +291,7 @@ fn consecutive_modifications() { // `Value::Null`. let store = { let entities = - vec![entity! { id: "mogwai", name: "Mogwai", label: "Chemikal Underground"; founded }]; + vec![entity! { SCHEMA => id: "mogwai", name: "Mogwai", label: "Chemikal Underground" }]; MockStore::new(entity_version_map("Band", entities)) }; @@ -300,12 +300,13 @@ fn consecutive_modifications() { let mut cache = EntityCache::new(store); // First, add "founded" and change the "label". - let update_data = entity! { id: "mogwai", founded: 1995, label: "Rock Action Records" }; + let update_data = + entity! { SCHEMA => id: "mogwai", founded: 1995, label: "Rock Action Records" }; let update_key = make_band_key("mogwai"); cache.set(update_key, update_data).unwrap(); // Then, just reset the "label". - let update_data = entity! { id: "mogwai", label: Value::Null }; + let update_data = entity! { SCHEMA => id: "mogwai", label: Value::Null }; let update_key = make_band_key("mogwai"); cache.set(update_key.clone(), update_data).unwrap(); @@ -316,7 +317,7 @@ fn consecutive_modifications() { sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![EntityModification::Overwrite { key: update_key, - data: entity! { id: "mogwai", name: "Mogwai", founded: 1995 } + data: entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 } }]) ); } @@ -459,7 +460,8 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator } fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityOperation { - let test_entity = entity! { id: id, name: name, email: email, age: age }; + let test_entity = + entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age }; EntityOperation::Set { key: EntityKey::data(ACCOUNT.to_owned(), id.to_owned()), @@ -468,7 +470,7 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO } fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { - entity! { id: id, account: account_id, balance: balance } + entity! { LOAD_RELATED_SUBGRAPH => id: id, account: account_id, balance: balance } } fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { let test_wallet = create_wallet_entity(id, account_id, balance); diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 1edf00e6894..6be99be3152 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -344,12 +344,12 @@ async fn insert_test_entities( entity! { is => __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, entity! { is => __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, entity! { is => __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, - ].into_iter().collect::>().unwrap(); + ]; let entities1 = vec![ entity! { is => __typename: "Musician", id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, entity! { is => __typename: "Musician", id: "m4", name: "Valerie", bands: Vec::::new() }, - ].into_iter().collect::>().unwrap(); + ]; async fn insert_at(entities: Vec, deployment: &DeploymentLocator, block_ptr: BlockPtr) { let insert_ops = entities.into_iter().map(|data| EntityOperation::Set { diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 7194cb7ab15..6e168d6d6e4 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -12,8 +12,8 @@ use graph::components::store::{ use graph::data::store::scalar; use graph::data::subgraph::schema::*; use graph::data::subgraph::*; -use graph::prelude::*; use graph::semver::Version; +use graph::{entity, prelude::*}; use graph_store_postgres::{Shard, SubgraphStore as DieselSubgraphStore}; const USER_GQL: &str = " @@ -242,7 +242,7 @@ fn create_test_entity( favorite_color: Option<&str>, ) -> EntityOperation { let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); - let test_entity = entity! { + let test_entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, name: name, bin_name: bin_name, diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 5b94e0a8bff..86f814d0e29 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -154,6 +154,8 @@ const THINGS_GQL: &str = r#" lazy_static! { static ref THINGS_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("things").unwrap(); + static ref THINGS_SCHEMA: InputSchema = + InputSchema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).expect("failed to parse schema"); static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = @@ -170,7 +172,7 @@ lazy_static! { static ref SCALAR_ENTITY: Entity = { let decimal = (*LARGE_DECIMAL).clone(); let big_int = (*LARGE_INT).clone(); - entity! { + entity! { THINGS_SCHEMA => id: "one", bool: true, int: std::i32::MAX, @@ -186,7 +188,7 @@ lazy_static! { } }; static ref EMPTY_NULLABLESTRINGS_ENTITY: Entity = { - entity! { + entity! { THINGS_SCHEMA => id: "one", } }; @@ -338,8 +340,7 @@ fn make_user( weight: BigDecimal::from(weight), coffee: coffee, favorite_color: favorite_color - } - .unwrap(); + }; if let Some(drinks) = drinks { user.insert("drinks", drinks.into()).unwrap(); } @@ -427,7 +428,7 @@ fn insert_pet( name: &str, block: BlockNumber, ) { - let pet = entity! { + let pet = entity! { layout.input_schema => id: id, name: name }; @@ -867,7 +868,7 @@ fn conflicting_entity() { let dog = EntityType::from(dog); let ferret = EntityType::from(ferret); - let fred = entity! { id: id.clone(), name: id.clone() }; + let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone() }; insert_entity(conn, layout, cat.as_str(), vec![fred]); // If we wanted to create Fred the dog, which is forbidden, we'd run this: @@ -904,7 +905,7 @@ fn revert_block() { let id = "fred"; let set_fred = |name, block| { - let fred = entity! { + let fred = entity! { layout.input_schema => id: id, name: name }; @@ -943,7 +944,7 @@ fn revert_block() { let set_marties = |from, to| { for block in from..=to { let id = format!("marty-{}", block); - let marty = entity! { + let marty = entity! { layout.input_schema => id: id, order: block, }; @@ -1804,7 +1805,7 @@ fn check_filters() { conn, layout, "Ferret", - vec![entity! { + vec![entity! { layout.input_schema => id: "a1", name: "Test" }], diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 5c6c55bd637..2550d5aeecd 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -42,6 +42,9 @@ const THINGS_GQL: &str = " lazy_static! { static ref THINGS_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("things").unwrap(); + static ref THINGS_SCHEMA: InputSchema = + InputSchema::parse(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()) + .expect("Failed to parse THINGS_GQL"); static ref LARGE_INT: BigInt = BigInt::from(std::i64::MAX).pow(17).unwrap(); static ref LARGE_DECIMAL: BigDecimal = BigDecimal::from(1) / BigDecimal::new(LARGE_INT.clone(), 1); @@ -54,7 +57,7 @@ lazy_static! { static ref BYTES_VALUE3: H256 = H256::from(hex!( "977c084229c72a0fa377cae304eda9099b6a2cb5d83b25cdf0f0969b69874255" )); - static ref BEEF_ENTITY: Entity = entity! { + static ref BEEF_ENTITY: Entity = entity! { THINGS_SCHEMA => id: scalar::Bytes::from_str("deadbeef").unwrap(), name: "Beef", }; @@ -100,8 +103,7 @@ fn insert_thing(conn: &PgConnection, layout: &Layout, id: &str, name: &str) { entity! { layout.input_schema => id: id, name: name - } - .unwrap(), + }, ); } @@ -372,34 +374,29 @@ fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Ent id: ROOT, name: "root", children: vec!["babe01", "babe02"] - } - .unwrap(); + }; let child1 = entity! { layout.input_schema => id: CHILD1, name: "child1", parent: "dead00", children: vec![GRANDCHILD1] - } - .unwrap(); + }; let child2 = entity! { layout.input_schema => id: CHILD2, name: "child2", parent: "dead00", children: vec![GRANDCHILD1] - } - .unwrap(); + }; let grand_child1 = entity! { layout.input_schema => id: GRANDCHILD1, name: "grandchild1", parent: CHILD1 - } - .unwrap(); + }; let grand_child2 = entity! { layout.input_schema => id: GRANDCHILD2, name: "grandchild2", parent: CHILD2 - } - .unwrap(); + }; insert_entity(conn, layout, "Thing", root.clone()); insert_entity(conn, layout, "Thing", child1.clone()); diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 7326de0c1ca..ce00282de7d 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -13,7 +13,6 @@ use test_store::*; use graph::components::store::{DeploymentLocator, EntityKey, WritableStore}; use graph::data::subgraph::*; -use graph::prelude::*; use graph::{ blockchain::DataSource, components::store::{ @@ -23,6 +22,7 @@ use graph::{ prelude::ethabi::Contract, }; use graph::{data::store::scalar, semver::Version}; +use graph::{entity, prelude::*}; use graph_store_postgres::layout_for_tests::STRING_PREFIX_SIZE; use graph_store_postgres::{Store as DieselStore, SubgraphStore as DieselSubgraphStore}; use web3::types::{Address, H256}; @@ -267,7 +267,7 @@ fn create_test_entity( favorite_color: Option<&str>, ) -> EntityOperation { let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); - let test_entity = entity! { + let test_entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, name: name, bin_name: bin_name, @@ -330,11 +330,13 @@ fn delete_entity() { #[test] fn get_entity_1() { run_test(|_, writable, _| async move { + let schema = writable.input_schema(); + let key = EntityKey::data(USER.to_owned(), "1".to_owned()); let result = writable.get(&key).unwrap(); let bin_name = Value::Bytes("Johnton".as_bytes().into()); - let expected_entity = entity! { + let expected_entity = entity! { schema => id: "1", name: "Johnton", bin_name: bin_name, @@ -355,10 +357,11 @@ fn get_entity_1() { #[test] fn get_entity_3() { run_test(|_, writable, _| async move { + let schema = writable.input_schema(); let key = EntityKey::data(USER.to_owned(), "3".to_owned()); let result = writable.get(&key).unwrap(); - let expected_entity = entity! { + let expected_entity = entity! { schema => id: "3", name: "Shaqueeena", bin_name: Value::Bytes("Shaqueeena".as_bytes().into()), @@ -455,8 +458,9 @@ fn update_existing() { fn partially_update_existing() { run_test(|store, writable, deployment| async move { let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let schema = writable.input_schema(); - let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; let original_entity = writable .get(&entity_key) @@ -1064,8 +1068,9 @@ fn revert_block_with_delete() { fn revert_block_with_partial_update() { run_test(|store, writable, deployment| async move { let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let schema = writable.input_schema(); - let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; let original_entity = writable.get(&entity_key).unwrap().expect("missing entity"); @@ -1155,10 +1160,11 @@ fn mock_abi() -> MappingABI { fn revert_block_with_dynamic_data_source_operations() { run_test(|store, writable, deployment| async move { let subgraph_store = store.subgraph_store(); + let schema = writable.input_schema(); // Create operations to add a user let user_key = EntityKey::data(USER.to_owned(), "1".to_owned()); - let partial_entity = entity! { id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; // Get the original user for comparisons let original_user = writable.get(&user_key).unwrap().expect("missing entity"); @@ -1274,8 +1280,11 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { // Add two entities to the store let added_entities = vec![ - ("1".to_owned(), entity! { id: "1", name: "Johnny Boy" }), - ("2".to_owned(), entity! { id: "2", name: "Tessa" }), + ( + "1".to_owned(), + entity! { schema => id: "1", name: "Johnny Boy" }, + ), + ("2".to_owned(), entity! { schema => id: "2", name: "Tessa" }), ]; transact_entity_operations( &store.subgraph_store(), @@ -1293,7 +1302,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { .unwrap(); // Update an entity in the store - let updated_entity = entity! { id: "1", name: "Johnny" }; + let updated_entity = entity! { schema => id: "1", name: "Johnny" }; let update_op = EntityOperation::Set { key: EntityKey::data(USER.to_owned(), "1".to_owned()), data: updated_entity.clone(), @@ -1483,8 +1492,8 @@ fn handle_large_string_with_index() { const ONE: &str = "large_string_one"; const TWO: &str = "large_string_two"; - fn make_insert_op(id: &str, name: &str) -> EntityModification { - let data = entity! { id: id, name: name }; + fn make_insert_op(id: &str, name: &str, schema: &InputSchema) -> EntityModification { + let data = entity! { schema => id: id, name: name }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); @@ -1492,6 +1501,8 @@ fn handle_large_string_with_index() { } run_test(|store, writable, deployment| async move { + let schema = writable.input_schema(); + // We have to produce a massive string (1_000_000 chars) because // the repeated text compresses so well. This leads to an error // 'index row requires 11488 bytes, maximum size is 8191' if @@ -1512,8 +1523,8 @@ fn handle_large_string_with_index() { TEST_BLOCK_3_PTR.clone(), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_text), - make_insert_op(TWO, &other_text), + make_insert_op(ONE, &long_text, &schema), + make_insert_op(TWO, &other_text, &schema), ], &stopwatch_metrics, Vec::new(), @@ -1572,8 +1583,8 @@ fn handle_large_bytea_with_index() { const ONE: &str = "large_string_one"; const TWO: &str = "large_string_two"; - fn make_insert_op(id: &str, name: &[u8]) -> EntityModification { - let data = entity! { id: id, bin_name: scalar::Bytes::from(name) }; + fn make_insert_op(id: &str, name: &[u8], schema: &InputSchema) -> EntityModification { + let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); @@ -1581,6 +1592,8 @@ fn handle_large_bytea_with_index() { } run_test(|store, writable, deployment| async move { + let schema = writable.input_schema(); + // We have to produce a massive bytea (240_000 bytes) because the // repeated text compresses so well. This leads to an error 'index // row size 2784 exceeds btree version 4 maximum 2704' if used with @@ -1606,8 +1619,8 @@ fn handle_large_bytea_with_index() { TEST_BLOCK_3_PTR.clone(), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_bytea), - make_insert_op(TWO, &other_bytea), + make_insert_op(ONE, &long_bytea, &schema), + make_insert_op(TWO, &other_bytea, &schema), ], &stopwatch_metrics, Vec::new(), @@ -1770,8 +1783,8 @@ impl WindowQuery { #[test] fn window() { - fn make_color_end_age(entity_type: &str, id: &str, color: &str, age: i32) -> EntityOperation { - let entity = entity! { id: id, age: age, favorite_color: color }; + fn make_color_and_age(entity_type: &str, id: &str, color: &str, age: i32) -> EntityOperation { + let entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color }; EntityOperation::Set { key: EntityKey::data(entity_type.to_owned(), id.to_owned()), @@ -1780,11 +1793,11 @@ fn window() { } fn make_user(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_end_age(USER, id, color, age) + make_color_and_age(USER, id, color, age) } fn make_person(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_end_age("Person", id, color, age) + make_color_and_age("Person", id, color, age) } let ops = vec![ diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index fca57ea42bc..caf402f9e82 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -7,8 +7,8 @@ use test_store::*; use graph::components::store::{DeploymentLocator, EntityKey, WritableStore}; use graph::data::subgraph::*; -use graph::prelude::*; use graph::semver::Version; +use graph::{entity, prelude::*}; use graph_store_postgres::layout_for_tests::writable; use graph_store_postgres::{Store as DieselStore, SubgraphStore as DieselSubgraphStore}; use web3::types::H256; @@ -107,7 +107,7 @@ fn count_key(id: &str) -> EntityKey { } async fn insert_count(store: &Arc, deployment: &DeploymentLocator, count: u8) { - let data = entity! { + let data = entity! { TEST_SUBGRAPH_SCHEMA => id: "1", count: count as i32 }; @@ -170,11 +170,12 @@ fn tracker() { fn restart() { run_test(|store, writable, deployment| async move { let subgraph_store = store.subgraph_store(); + let schema = subgraph_store.input_schema(&deployment.hash).unwrap(); // Cause an error by leaving out the non-nullable `count` attribute let entity_ops = vec![EntityOperation::Set { key: count_key("1"), - data: entity! { id: "1" }, + data: entity! { schema => id: "1" }, }]; transact_entity_operations( &subgraph_store, @@ -198,7 +199,7 @@ fn restart() { // Retry our write with correct data let entity_ops = vec![EntityOperation::Set { key: count_key("1"), - data: entity! { id: "1", count: 1 }, + data: entity! { schema => id: "1", count: 1 }, }]; // `SubgraphStore` caches the correct writable so that this call // uses the restarted writable, and is equivalent to using From dbdb599c3a5da40111006e76b89ecb7e5cadbaa4 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 14:47:54 -0700 Subject: [PATCH 0183/2104] graph: Check that an Entity has a reasonable id on construction --- graph/src/data/store/mod.rs | 27 ++++++++++++++++++--------- runtime/test/src/test.rs | 2 +- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 502ba21d6bd..af1f2d72204 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,5 +1,4 @@ use crate::{ - bail, components::store::{DeploymentLocator, EntityKey, EntityType}, data::graphql::ObjectTypeExt, prelude::{anyhow::Context, lazy_static, q, r, s, CacheWeight, QueryExecutionError}, @@ -657,10 +656,9 @@ impl Entity { ) })?; } - if !obj.contains_key(&ID) { - bail!("internal error: no id attribute for entity `{obj:?}`"); - } - Ok(Entity(obj)) + let entity = Entity(obj); + entity.check_id()?; + Ok(entity) } pub fn try_make>( @@ -673,10 +671,9 @@ impl Entity { obj.insert(key, value) .map_err(|e| anyhow!("unknown attribute {}", e.not_interned()))?; } - if !obj.contains_key(&ID) { - bail!("internal error: no id attribute for entity `{obj:?}`"); - } - Ok(Entity(obj)) + let entity = Entity(obj); + entity.check_id()?; + Ok(entity) } pub fn get(&self, key: &str) -> Option<&Value> { @@ -702,6 +699,18 @@ impl Entity { v } + fn check_id(&self) -> Result<(), Error> { + match self.get("id") { + None => Err(anyhow!( + "internal error: no id attribute for entity `{:?}`", + self.0 + )), + Some(Value::String(_)) => Ok(()), + Some(Value::Bytes(_)) => Ok(()), + _ => Err(anyhow!("Entity has non-string `id` attribute")), + } + } + /// Return the ID of this entity. If the ID is a string, return the /// string. If it is `Bytes`, return it as a hex string with a `0x` /// prefix. If the ID is not set or anything but a `String` or `Bytes`, diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 180db0100e0..1571eb4caec 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1392,5 +1392,5 @@ async fn test_store_set_id() { let err = host .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) .expect_err("id must be Bytes"); - err_says(err, "must have type Bytes! but has type Int"); + err_says(err, "Entity has non-string `id` attribute"); } From a9e43b4093e74746d5691c37c9df492f75cbf6d8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 14:54:14 -0700 Subject: [PATCH 0184/2104] all: Entity.id() can now always succeed --- graph/src/data/store/mod.rs | 11 +++++----- graph/src/schema/ast.rs | 2 +- runtime/test/src/test.rs | 8 ++------ server/index-node/src/resolver.rs | 2 +- store/postgres/src/deployment_store.rs | 2 +- store/postgres/src/relational.rs | 6 +++--- store/test-store/tests/graph/entity_cache.rs | 2 +- store/test-store/tests/postgres/graft.rs | 4 ++-- store/test-store/tests/postgres/relational.rs | 8 ++++---- .../tests/postgres/relational_bytes.rs | 8 ++++---- store/test-store/tests/postgres/store.rs | 20 ++++++++----------- 11 files changed, 32 insertions(+), 41 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index af1f2d72204..e330092c85f 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -715,12 +715,11 @@ impl Entity { /// string. If it is `Bytes`, return it as a hex string with a `0x` /// prefix. If the ID is not set or anything but a `String` or `Bytes`, /// return an error - pub fn id(&self) -> Result { + pub fn id(&self) -> String { match self.get("id") { - None => Err(anyhow!("Entity is missing an `id` attribute")), - Some(Value::String(s)) => Ok(s.clone()), - Some(Value::Bytes(b)) => Ok(b.to_string()), - _ => Err(anyhow!("Entity has non-string `id` attribute")), + Some(Value::String(s)) => s.clone(), + Some(Value::Bytes(b)) => b.to_string(), + None | Some(_) => unreachable!("we checked the id when constructing this entity"), } } @@ -969,7 +968,7 @@ fn entity_validation() { } fn check(thing: Entity, errmsg: &str) { - let id = thing.id().unwrap_or("none".to_owned()); + let id = thing.id(); let key = EntityKey::data("Thing".to_owned(), id.clone()); let err = thing.validate(&SCHEMA, &key); diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 70db5da3d62..8c588e63273 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -439,7 +439,7 @@ fn entity_validation() { } fn check(thing: Entity, errmsg: &str) { - let id = thing.id().unwrap_or("none".to_owned()); + let id = thing.id(); let key = EntityKey::data("Thing".to_owned(), id.clone()); let err = thing.validate(&SCHEMA, &key); diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 1571eb4caec..c37e7c9e8d0 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1343,7 +1343,7 @@ async fn test_store_set_id() { let entity = host.store_get(USER, UID).unwrap().unwrap(); assert_eq!( "u1", - entity.id().unwrap().as_str(), + entity.id().as_str(), "store.set sets id automatically" ); @@ -1383,11 +1383,7 @@ async fn test_store_set_id() { .expect("setting with no id works"); let entity = host.store_get(BINARY, BID).unwrap().unwrap(); - assert_eq!( - BID, - entity.id().unwrap().as_str(), - "store.set sets id automatically" - ); + assert_eq!(BID, entity.id().as_str(), "store.set sets id automatically"); let err = host .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index e05e0fa8377..b12bcff6833 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -750,7 +750,7 @@ fn entity_changes_to_graphql(entity_changes: Vec) -> r::Value { let mut deletions_graphql: Vec = Vec::with_capacity(deletions.len()); for (entity_type, mut entities) in updates { - entities.sort_unstable_by_key(|e| e.id().unwrap_or("no-id".to_string())); + entities.sort_unstable_by_key(|e| e.id()); updates_graphql.push(object! { type: entity_type.to_string(), entities: diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 1ee4ad343d1..2a58caa4ac1 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1061,7 +1061,7 @@ impl DeploymentStore { let mut by_causality_region = entities .into_iter() .map(|e| { - let causality_region = e.id()?; + let causality_region = e.id(); let digest = match e.get("digest") { Some(Value::Bytes(b)) => Ok(b.clone()), other => Err(anyhow::anyhow!( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index ee87e0e0b00..ce740547b37 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -553,7 +553,7 @@ impl Layout { let key = EntityKey { entity_type, - entity_id: entity_data.id()?.into(), + entity_id: Word::from(entity_data.id()), causality_region: CausalityRegion::from_entity(&entity_data), }; let overwrite = entities.insert(key, entity_data).is_some(); @@ -581,7 +581,7 @@ impl Layout { let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; let key = EntityKey { entity_type, - entity_id: entity_data.id()?.into(), + entity_id: Word::from(entity_data.id()), causality_region: CausalityRegion::from_entity(&entity_data), }; @@ -615,7 +615,7 @@ impl Layout { for entity_data in inserts_or_updates.into_iter() { let entity_type = entity_data.entity_type(); let data: Entity = entity_data.deserialize_with_layout(self, None, true)?; - let entity_id = Word::from(data.id().expect("Invalid ID for entity.")); + let entity_id = Word::from(data.id()); processed_entities.insert((entity_type.clone(), entity_id.clone())); changes.push(EntityOperation::Set { diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 27d8a2f342e..8b21c081942 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -236,7 +236,7 @@ fn entity_version_map(entity_type: &str, entities: Vec) -> BTreeMap>(); (entities, ids) } @@ -555,7 +555,7 @@ fn prune() { .find(query) .unwrap() .into_iter() - .map(|entity| entity.id().unwrap()) + .map(|entity| entity.id()) .collect(); assert_eq!( act, exp, diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 86f814d0e29..0b01c8f0112 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -220,7 +220,7 @@ fn insert_entity_at( let entities_with_keys_owned = entities .drain(..) .map(|entity| { - let key = EntityKey::data(entity_type.to_owned(), entity.id().unwrap()); + let key = EntityKey::data(entity_type.to_owned(), entity.id()); (key, entity) }) .collect::>(); @@ -259,7 +259,7 @@ fn update_entity_at( let entities_with_keys_owned: Vec<(EntityKey, Entity)> = entities .drain(..) .map(|entity| { - let key = EntityKey::data(entity_type.to_owned(), entity.id().unwrap()); + let key = EntityKey::data(entity_type.to_owned(), entity.id()); (key, entity) }) .collect(); @@ -584,7 +584,7 @@ fn update() { entity.set("string", "updated").unwrap(); entity.remove("strings"); entity.set("bool", Value::Null).unwrap(); - let key = EntityKey::data("Scalar".to_owned(), entity.id().unwrap()); + let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); let mut entities = vec![(&key, Cow::from(&entity))]; @@ -711,7 +711,7 @@ fn serialize_bigdecimal() { let d = BigDecimal::from_str(d).unwrap(); entity.set("bigDecimal", d).unwrap(); - let key = EntityKey::data("Scalar".to_owned(), entity.id().unwrap().clone()); + let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); let mut entities = vec![(&key, Cow::Borrowed(&entity))]; layout diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 2550d5aeecd..41ce0db37e9 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -79,7 +79,7 @@ fn remove_test_data(conn: &PgConnection) { } fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity: Entity) { - let key = EntityKey::data(entity_type.to_owned(), entity.id().unwrap()); + let key = EntityKey::data(entity_type.to_owned(), entity.id()); let entity_type = EntityType::from(entity_type); let mut entities = vec![(&key, Cow::from(&entity))]; @@ -298,9 +298,9 @@ fn update() { // Update the entity let mut entity = BEEF_ENTITY.clone(); entity.set("name", "Moo").unwrap(); - let key = EntityKey::data("Thing".to_owned(), entity.id().unwrap()); + let key = EntityKey::data("Thing".to_owned(), entity.id()); - let entity_id = entity.id().unwrap(); + let entity_id = entity.id(); let entity_type = key.entity_type.clone(); let mut entities = vec![(&key, Cow::from(&entity))]; layout @@ -416,7 +416,7 @@ fn query() { .map(|(entities, _)| entities) .expect("the query succeeds") .into_iter() - .map(|e| e.id().expect("entities have an id")) + .map(|e| e.id()) .collect::>() } diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index ce00282de7d..8f30ca6e229 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -1544,14 +1544,13 @@ fn handle_large_string_with_index() { )) .asc(NAME); - let ids = store + let ids: Vec<_> = store .subgraph_store() .find(query) .expect("Could not find entity") .iter() .map(|e| e.id()) - .collect::, _>>() - .expect("Found entities without an id"); + .collect(); assert_eq!(vec![ONE], ids); @@ -1563,14 +1562,13 @@ fn handle_large_string_with_index() { .filter(EntityFilter::LessOrEqual(NAME.to_owned(), prefix.into())) .asc(NAME); - let ids = store + let ids: Vec<_> = store .subgraph_store() .find(query) .expect("Could not find entity") .iter() .map(|e| e.id()) - .collect::, _>>() - .expect("Found entities without an id"); + .collect(); // Users with name 'Cindini' and 'Johnton' assert_eq!(vec!["2", "1"], ids); @@ -1640,14 +1638,13 @@ fn handle_large_bytea_with_index() { )) .asc(NAME); - let ids = store + let ids: Vec<_> = store .subgraph_store() .find(query) .expect("Could not find entity") .iter() .map(|e| e.id()) - .collect::, _>>() - .expect("Found entities without an id"); + .collect(); assert_eq!(vec![ONE], ids); @@ -1658,14 +1655,13 @@ fn handle_large_bytea_with_index() { .filter(EntityFilter::LessOrEqual(NAME.to_owned(), prefix.into())) .asc(NAME); - let ids = store + let ids: Vec<_> = store .subgraph_store() .find(query) .expect("Could not find entity") .iter() .map(|e| e.id()) - .collect::, _>>() - .expect("Found entities without an id"); + .collect(); // Users with name 'Cindini' and 'Johnton' assert_eq!(vec!["2", "1"], ids); From 618d0fb1dfca7749e4e4d51131e7069845ee2f2e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 15:05:40 -0700 Subject: [PATCH 0185/2104] graph, store: Return Word from Entity.id() --- graph/src/data/store/mod.rs | 6 +++--- graph/src/data/value.rs | 12 ++++++++++++ store/postgres/src/relational.rs | 6 +++--- store/test-store/tests/postgres/graft.rs | 3 ++- store/test-store/tests/postgres/relational_bytes.rs | 3 ++- 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index e330092c85f..151b5513279 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -715,10 +715,10 @@ impl Entity { /// string. If it is `Bytes`, return it as a hex string with a `0x` /// prefix. If the ID is not set or anything but a `String` or `Bytes`, /// return an error - pub fn id(&self) -> String { + pub fn id(&self) -> Word { match self.get("id") { - Some(Value::String(s)) => s.clone(), - Some(Value::Bytes(b)) => b.to_string(), + Some(Value::String(s)) => Word::from(s.clone()), + Some(Value::Bytes(b)) => Word::from(b.to_string()), None | Some(_) => unreachable!("we checked the id when constructing this entity"), } } diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 768ff909343..365c1c7eecc 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -97,6 +97,18 @@ impl AsRef for Word { } } +impl PartialEq<&str> for Word { + fn eq(&self, other: &&str) -> bool { + self.as_str() == *other + } +} + +impl PartialEq for &str { + fn eq(&self, other: &Word) -> bool { + self == &other.as_str() + } +} + #[derive(Clone, Debug, PartialEq)] struct Entry { key: Option, diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index ce740547b37..f83d537f0fd 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -553,7 +553,7 @@ impl Layout { let key = EntityKey { entity_type, - entity_id: Word::from(entity_data.id()), + entity_id: entity_data.id(), causality_region: CausalityRegion::from_entity(&entity_data), }; let overwrite = entities.insert(key, entity_data).is_some(); @@ -581,7 +581,7 @@ impl Layout { let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; let key = EntityKey { entity_type, - entity_id: Word::from(entity_data.id()), + entity_id: entity_data.id(), causality_region: CausalityRegion::from_entity(&entity_data), }; @@ -615,7 +615,7 @@ impl Layout { for entity_data in inserts_or_updates.into_iter() { let entity_type = entity_data.entity_type(); let data: Entity = entity_data.deserialize_with_layout(self, None, true)?; - let entity_id = Word::from(data.id()); + let entity_id = data.id(); processed_entities.insert((entity_type.clone(), entity_id.clone())); changes.push(EntityOperation::Set { diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 26f8e38cc3c..92c8b9afdd7 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,4 +1,5 @@ use graph::blockchain::block_stream::FirehoseCursor; +use graph::data::value::Word; use graph::schema::InputSchema; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; @@ -280,7 +281,7 @@ async fn create_grafted_subgraph( fn find_entities( store: &DieselSubgraphStore, deployment: &DeploymentLocator, -) -> (Vec, Vec) { +) -> (Vec, Vec) { let query = EntityQuery::new( deployment.hash.clone(), BLOCK_NUMBER_MAX, diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 41ce0db37e9..49bd3bc45ae 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -3,6 +3,7 @@ use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; +use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; use graph::prelude::{EntityQuery, MetricsRegistry}; @@ -408,7 +409,7 @@ fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Ent #[test] fn query() { - fn fetch(conn: &PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { + fn fetch(conn: &PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, coll).first(10); layout From 71ce5ecaf3d068ad8f9a67b30a2a3e0373bea7ad Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 15:25:19 -0700 Subject: [PATCH 0186/2104] graph: Remove unused/unneeded trait TryIntoEntity --- graph/src/data/store/mod.rs | 5 ----- graph/src/lib.rs | 3 +-- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 151b5513279..d61b65864da 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -905,11 +905,6 @@ impl GasSizeOf for Entity { } } -/// A value that can (maybe) be converted to an `Entity`. -pub trait TryIntoEntity { - fn try_into_entity(self) -> Result; -} - #[test] fn value_bytes() { let graphql_value = r::Value::String("0x8f494c66afc1d3f8ac1b45df21f02a46".to_owned()); diff --git a/graph/src/lib.rs b/graph/src/lib.rs index fa88c192b87..5acfd460984 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -152,8 +152,7 @@ pub mod prelude { pub use crate::data::store::ethereum::*; pub use crate::data::store::scalar::{BigDecimal, BigInt, BigIntSign}; pub use crate::data::store::{ - AssignmentEvent, Attribute, Entity, NodeId, SubscriptionFilter, TryIntoEntity, Value, - ValueType, + AssignmentEvent, Attribute, Entity, NodeId, SubscriptionFilter, Value, ValueType, }; pub use crate::data::subgraph::schema::SubgraphDeploymentEntity; pub use crate::data::subgraph::{ From 45ce14958c53c5710d8d63c4c33b0b3c897f115f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 16:06:19 -0700 Subject: [PATCH 0187/2104] store: Restructure the fixtures for the query test We want to get rid of the possibility ot set __typename on entities; the fixtures used that. --- store/test-store/tests/graphql/query.rs | 179 +++++++++++++++++------- 1 file changed, 126 insertions(+), 53 deletions(-) diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 6be99be3152..05cd63d6336 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -297,6 +297,29 @@ async fn insert_test_entities( manifest: SubgraphManifest, id_type: IdType, ) -> DeploymentLocator { + fn insert_ops(entities: Vec<(&str, Vec)>) -> Vec { + entities + .into_iter() + .map(|(typename, entities)| { + entities.into_iter().map(|data| EntityOperation::Set { + key: EntityKey::data(typename.to_string(), data.id()), + data, + }) + }) + .flatten() + .collect() + } + + async fn insert_at( + insert_ops: Vec, + deployment: &DeploymentLocator, + block_ptr: BlockPtr, + ) { + test_store::transact_and_wait(&STORE.subgraph_store(), deployment, block_ptr, insert_ops) + .await + .unwrap(); + } + let deployment = DeploymentCreate::new(String::new(), &manifest, None); let name = SubgraphName::new(manifest.id.as_str()).unwrap(); let node_id = NodeId::new("test").unwrap(); @@ -315,60 +338,110 @@ async fn insert_test_entities( let md = id_type.medias(); let is = &manifest.schema; let entities0 = vec![ - entity! { is => __typename: "Musician", id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"] }, - entity! { is => __typename: "Musician", id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"] }, - entity! { is => __typename: "Publisher", id: "0xb1" }, - entity! { is => __typename: "Band", id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, - entity! { is => __typename: "Band", id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, - entity! { is => __typename: "Song", id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, - entity! { is => __typename: "Song", id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, - entity! { is => __typename: "Song", id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, - entity! { is => __typename: "Song", id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, - entity! { is => __typename: "SongStat", id: s[1], played: 10 }, - entity! { is => __typename: "SongStat", id: s[2], played: 15 }, - entity! { is => __typename: "BandReview", id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, - entity! { is => __typename: "BandReview", id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, - entity! { is => __typename: "BandReview", id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, - entity! { is => __typename: "SongReview", id: "r3", body: "Bad", song: s[2], author: "u1" }, - entity! { is => __typename: "SongReview", id: "r4", body: "Good", song: s[3], author: "u2" }, - entity! { is => __typename: "SongReview", id: "r6", body: "Very Bad", song: s[2], author: "u3" }, - entity! { is => __typename: "User", id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, - entity! { is => __typename: "User", id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, - entity! { is => __typename: "AnonymousUser", id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, - entity! { is => __typename: "Photo", id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, - entity! { is => __typename: "Video", id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, - entity! { is => __typename: "Photo", id: md[3], title: "Rock Tune Single Cover", author: "u1" }, - entity! { is => __typename: "Video", id: md[4], title: "Rock Tune Music Video", author: "u2" }, - entity! { is => __typename: "Photo", id: md[5], title: "Pop Tune Single Cover", author: "u1" }, - entity! { is => __typename: "Video", id: md[6], title: "Folk Tune Music Video", author: "u2" }, - entity! { is => __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, - entity! { is => __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, - entity! { is => __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, - ]; - - let entities1 = vec![ - entity! { is => __typename: "Musician", id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, - entity! { is => __typename: "Musician", id: "m4", name: "Valerie", bands: Vec::::new() }, + ( + "Musician", + vec![ + entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"] }, + entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"] }, + ], + ), + ("Publisher", vec![entity! { is => id: "0xb1" }]), + ( + "Band", + vec![ + entity! { is => id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, + entity! { is => id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, + ], + ), + ( + "Song", + vec![ + entity! { is => id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, + entity! { is => id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, + entity! { is => id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, + entity! { is => id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, + ], + ), + ( + "User", + vec![ + entity! { is => id: "u1", name: "User 1", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r3" }, + ], + ), + ( + "SongStat", + vec![ + entity! { is => id: s[1], played: 10 }, + entity! { is => id: s[2], played: 15 }, + ], + ), + ( + "BandReview", + vec![ + entity! { is => id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, + entity! { is => id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, + entity! { is => id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, + ], + ), + ( + "SongReview", + vec![ + entity! { is => id: "r3", body: "Bad", song: s[2], author: "u1" }, + entity! { is => id: "r4", body: "Good", song: s[3], author: "u2" }, + entity! { is => id: "r6", body: "Very Bad", song: s[2], author: "u3" }, + ], + ), + ( + "User", + vec![ + entity! { is => id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, + entity! { is => id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, + ], + ), + ( + "AnonymousUser", + vec![ + entity! { is => id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, + ], + ), + ( + "Photo", + vec![ + entity! { is => id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, + entity! { is => id: md[3], title: "Rock Tune Single Cover", author: "u1" }, + entity! { is => id: md[5], title: "Pop Tune Single Cover", author: "u1" }, + ], + ), + ( + "Video", + vec![ + entity! { is => id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, + entity! { is => id: md[4], title: "Rock Tune Music Video", author: "u2" }, + entity! { is => id: md[6], title: "Folk Tune Music Video", author: "u2" }, + ], + ), + ( + "Album", + vec![entity! { is => id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }], + ), + ( + "Single", + vec![ + entity! { is => id: "rl2", title: "Rock", songs: vec![s[2]] }, + entity! { is => id: "rl3", title: "Cheesy", songs: vec![s[1]] }, + ], + ), ]; - - async fn insert_at(entities: Vec, deployment: &DeploymentLocator, block_ptr: BlockPtr) { - let insert_ops = entities.into_iter().map(|data| EntityOperation::Set { - key: EntityKey::data( - data.get("__typename").unwrap().clone().as_string().unwrap(), - data.get("id").unwrap().clone().as_string().unwrap(), - ), - data, - }); - - test_store::transact_and_wait( - &STORE.subgraph_store(), - deployment, - block_ptr, - insert_ops.collect::>(), - ) - .await - .unwrap(); - } + let entities0 = insert_ops(entities0); + + let entities1 = vec![( + "Musician", + vec![ + entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"] }, + entity! { is => id: "m4", name: "Valerie", bands: Vec::::new() }, + ], + )]; + let entities1 = insert_ops(entities1); insert_at(entities0, &deployment, GENESIS_PTR.clone()).await; insert_at(entities1, &deployment, BLOCK_ONE.clone()).await; From ad8bdd8b25c884eba82a370eea8b1426a51dfdbd Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 16:33:27 -0700 Subject: [PATCH 0188/2104] graph, store: Get rid of unneeded removal of __typename from entity The code in EntityCache that removed `__typename` from entities is not needed since the store does not set that attribute on the code paths that the EntityCache uses. This also adds a test for that. There are some tests that will fail coincidentally if entities contain a `__typename`, but it's better to have an explicit test for that. --- graph/src/components/store/entity_cache.rs | 6 +----- store/test-store/tests/graph/entity_cache.rs | 22 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 50c78945b95..adfa1a495fd 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -325,11 +325,7 @@ impl LfuCache> { ) -> Result, s::QueryExecutionError> { match self.get(key) { None => { - let mut entity = store.get(key)?; - if let Some(entity) = &mut entity { - // `__typename` is for queries not for mappings. - entity.remove("__typename"); - } + let entity = store.get(key)?; self.insert(key.clone(), entity.clone()); Ok(entity) } diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 8b21c081942..974224b1142 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -739,5 +739,27 @@ fn scoped_get() { assert_eq!(Some(&wallet1), act1.as_ref()); let act1 = cache.get(&key1, GetScope::Store).unwrap(); assert_eq!(Some(&wallet1), act1.as_ref()); + }) +} + +/// Entities should never contain a `__typename` or `g$parent_id` field, if +/// they do, that can cause PoI divergences, because entities will differ +/// depending on whether they had to be loaded from the database or stuck +/// around in the cache where they won't have these attributes +#[test] +fn no_internal_keys() { + run_store_test(|mut cache, _, _, writable| async move { + #[track_caller] + fn check(entity: &Entity) { + assert_eq!(None, entity.get("__typename")); + assert_eq!(None, entity.get("g$parent_id")); + } + let key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + + let wallet = writable.get(&key).unwrap().unwrap(); + check(&wallet); + + let wallet = cache.get(&key, GetScope::Store).unwrap().unwrap(); + check(&wallet); }); } From fe6bcee5d022d711243ea51420049dcb7522fdf2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 16:52:36 -0700 Subject: [PATCH 0189/2104] graph, store: Make sure we never set __typename etc. on entities That also makes it possible to remove them from the AtomPool of the InputSchema --- graph/src/schema/input_schema.rs | 3 -- store/postgres/src/relational.rs | 10 +++--- store/postgres/src/relational_queries.rs | 42 +++++++++++++++--------- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index e3f76381909..6856319804d 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -323,9 +323,6 @@ impl InputSchema { /// in the document and the names of all their fields fn atom_pool(document: &s::Document) -> AtomPool { let mut pool = AtomPool::new(); - // These two entries are always required - pool.intern("g$parent_id"); // Used by queries - pool.intern("__typename"); // Mandated by GraphQL pool.intern("digest"); // Attribute of PoI object for definition in &document.definitions { match definition { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index f83d537f0fd..70de559c14c 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -521,7 +521,7 @@ impl Layout { FindQuery::new(table.as_ref(), key, block) .get_result::(conn) .optional()? - .map(|entity_data| entity_data.deserialize_with_layout(self, None, true)) + .map(|entity_data| entity_data.deserialize_with_layout(self, None)) .transpose() } @@ -549,7 +549,7 @@ impl Layout { let mut entities: BTreeMap = BTreeMap::new(); for data in query.load::(conn)? { let entity_type = data.entity_type(); - let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; + let entity_data: Entity = data.deserialize_with_layout(self, None)?; let key = EntityKey { entity_type, @@ -578,7 +578,7 @@ impl Layout { for data in query.load::(conn)? { let entity_type = data.entity_type(); - let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; + let entity_data: Entity = data.deserialize_with_layout(self, None)?; let key = EntityKey { entity_type, entity_id: entity_data.id(), @@ -614,7 +614,7 @@ impl Layout { for entity_data in inserts_or_updates.into_iter() { let entity_type = entity_data.entity_type(); - let data: Entity = entity_data.deserialize_with_layout(self, None, true)?; + let data: Entity = entity_data.deserialize_with_layout(self, None)?; let entity_id = data.id(); processed_entities.insert((entity_type.clone(), entity_id.clone())); @@ -789,7 +789,7 @@ impl Layout { .into_iter() .map(|entity_data| { entity_data - .deserialize_with_layout(self, parent_type.as_ref(), false) + .deserialize_with_layout(self, parent_type.as_ref()) .map_err(|e| e.into()) }) .collect::, _>>() diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index dfe170f5aac..a47212641b4 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -239,6 +239,9 @@ impl ForeignKeyClauses for Column { } pub trait FromEntityData: Sized { + /// Whether to include the internal keys `__typename` and `g$parent_id`. + const WITH_INTERNAL_KEYS: bool; + type Value: FromColumnValue; fn from_data>>( @@ -248,6 +251,8 @@ pub trait FromEntityData: Sized { } impl FromEntityData for Entity { + const WITH_INTERNAL_KEYS: bool = false; + type Value = graph::prelude::Value; fn from_data>>( @@ -259,6 +264,8 @@ impl FromEntityData for Entity { } impl FromEntityData for Object { + const WITH_INTERNAL_KEYS: bool = true; + type Value = r::Value; fn from_data>>( @@ -485,7 +492,6 @@ impl EntityData { self, layout: &Layout, parent_type: Option<&ColumnType>, - remove_typename: bool, ) -> Result { let entity_type = EntityType::new(self.entity.clone()); let table = layout.table_for_entity(&entity_type)?; @@ -494,10 +500,10 @@ impl EntityData { match self.data { j::Object(map) => { let typname = std::iter::once(self.entity).filter_map(move |e| { - if remove_typename { - None - } else { + if T::WITH_INTERNAL_KEYS { Some(Ok((Word::from("__typename"), T::Value::from_string(e)))) + } else { + None } }); let entries = map.into_iter().filter_map(move |(key, json)| { @@ -505,19 +511,23 @@ impl EntityData { // column; those will be things like the block_range that // is used internally for versioning if key == "g$parent_id" { - match &parent_type { - None => { - // A query that does not have parents - // somehow returned parent ids. We have no - // idea how to deserialize that - Some(Err(graph::constraint_violation!( - "query unexpectedly produces parent ids" - ))) + if T::WITH_INTERNAL_KEYS { + match &parent_type { + None => { + // A query that does not have parents + // somehow returned parent ids. We have no + // idea how to deserialize that + Some(Err(graph::constraint_violation!( + "query unexpectedly produces parent ids" + ))) + } + Some(parent_type) => Some( + T::Value::from_column_value(parent_type, json) + .map(|value| (Word::from("g$parent_id"), value)), + ), } - Some(parent_type) => Some( - T::Value::from_column_value(parent_type, json) - .map(|value| (Word::from("g$parent_id"), value)), - ), + } else { + None } } else if let Some(column) = table.column(&SqlName::verbatim(key)) { match T::Value::from_column_value(&column.column_type, json) { From 485562b293dc6ecefaaf3eb7b32394ceb29267a7 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 20 Apr 2023 14:49:31 -0700 Subject: [PATCH 0190/2104] graph, store: Add fulltext fields in bulk in InsertQuery --- graph/src/components/store/err.rs | 10 ++++++++++ graph/src/data/store/mod.rs | 14 ++++++++++++++ store/postgres/src/relational_queries.rs | 9 +++++---- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index 53869bc4241..b07f0c64f3b 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -1,5 +1,7 @@ use super::{BlockNumber, DeploymentHash, DeploymentSchemaVersion}; use crate::prelude::QueryExecutionError; +use crate::util::intern::Error as InternError; + use anyhow::{anyhow, Error}; use diesel::result::Error as DieselError; use thiserror::Error; @@ -122,3 +124,11 @@ impl From for StoreError { StoreError::Unknown(anyhow!("{}", e.to_string())) } } + +impl From for StoreError { + fn from(e: InternError) -> Self { + match e { + InternError::NotInterned(key) => StoreError::UnknownField(key), + } + } +} diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index d61b65864da..398b5ccf901 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -761,6 +761,20 @@ impl Entity { self.0.retain(|_, value| !value.is_null()) } + /// Add the key/value pairs from `iter` to this entity. This is the same + /// as an implementation of `std::iter::Extend` would be, except that + /// this operation is fallible because one of the keys from the iterator + /// might not be in the underlying pool + pub fn merge_iter( + &mut self, + iter: impl IntoIterator, Value)>, + ) -> Result<(), InternError> { + for (key, value) in iter { + self.0.insert(key, value)?; + } + Ok(()) + } + /// Validate that this entity matches the object type definition in the /// schema. An entity that passes these checks can be stored /// successfully in the subgraph's database schema diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index a47212641b4..3cb86ecec29 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1762,6 +1762,7 @@ impl<'a> InsertQuery<'a> { block: BlockNumber, ) -> Result, StoreError> { for (entity_key, entity) in entities.iter_mut() { + let mut fulltext = Vec::new(); for column in table.columns.iter() { if let Some(fields) = column.fulltext_fields.as_ref() { let fulltext_field_values = fields @@ -1770,10 +1771,7 @@ impl<'a> InsertQuery<'a> { .cloned() .collect::>(); if !fulltext_field_values.is_empty() { - entity - .to_mut() - .insert(&column.field, Value::List(fulltext_field_values)) - .map_err(|e| entity_key.unknown_attribute(e))?; + fulltext.push((&column.field, Value::List(fulltext_field_values))); } } if !column.is_nullable() && !entity.contains_key(&column.field) { @@ -1785,6 +1783,9 @@ impl<'a> InsertQuery<'a> { ))); } } + if !fulltext.is_empty() { + entity.to_mut().merge_iter(fulltext)?; + } } let unique_columns = InsertQuery::unique_columns(table, entities); let br_column = BlockRangeColumn::new(table, "", block); From fff74c0e15d387d7ba9ce6e8c60e8b8787d13a9f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 19 Apr 2023 15:23:37 -0700 Subject: [PATCH 0191/2104] graph: Restrict use of single-key changes for Entity to tests --- graph/src/data/store/mod.rs | 42 ++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 398b5ccf901..6d7bc5f8043 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -680,14 +680,6 @@ impl Entity { self.0.get(key) } - pub fn insert(&mut self, key: &str, value: Value) -> Result, InternError> { - self.0.insert(key, value) - } - - pub fn remove(&mut self, key: &str) -> Option { - self.0.remove(key) - } - pub fn contains_key(&self, key: &str) -> bool { self.0.contains_key(key) } @@ -723,15 +715,6 @@ impl Entity { } } - /// Convenience method to save having to `.into()` the arguments. - pub fn set( - &mut self, - name: &str, - value: impl Into, - ) -> Result, InternError> { - self.0.insert(name, value.into()) - } - /// Merges an entity update `update` into this entity. /// /// If a key exists in both entities, the value from `update` is chosen. @@ -749,8 +732,8 @@ impl Entity { pub fn merge_remove_null_fields(&mut self, update: Entity) -> Result<(), InternError> { for (key, value) in update.0.into_iter() { match value { - Value::Null => self.remove(&key), - _ => self.insert(&key, value)?, + Value::Null => self.0.remove(&key), + _ => self.0.insert(&key, value)?, }; } Ok(()) @@ -901,6 +884,27 @@ impl Entity { } } +/// Convenience methods to modify individual attributes for tests. +/// Production code should not use/need this. +#[cfg(debug_assertions)] +impl Entity { + pub fn insert(&mut self, key: &str, value: Value) -> Result, InternError> { + self.0.insert(key, value) + } + + pub fn remove(&mut self, key: &str) -> Option { + self.0.remove(key) + } + + pub fn set( + &mut self, + name: &str, + value: impl Into, + ) -> Result, InternError> { + self.0.insert(name, value.into()) + } +} + impl<'a> From<&'a Entity> for Cow<'a, Entity> { fn from(entity: &'a Entity) -> Self { Cow::Borrowed(entity) From fb34bfac4425aa716e3aae1493d7cf616fe24854 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 5 May 2023 11:34:30 +0200 Subject: [PATCH 0192/2104] all: Introduce a constant for the "digest" attribute of PoI --- core/src/subgraph/runner.rs | 6 +++--- graph/src/data/subgraph/schema.rs | 3 +++ graph/src/schema/input_schema.rs | 3 ++- store/postgres/src/deployment_store.rs | 4 ++-- store/postgres/src/relational.rs | 6 +++--- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 861e9eeff7e..babd6bf43de 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -12,11 +12,11 @@ use graph::components::{ subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, }; use graph::data::store::scalar::Bytes; +use graph::data::subgraph::schema::POI_DIGEST; use graph::data::subgraph::{ schema::{SubgraphError, SubgraphHealth, POI_OBJECT}, SubgraphFeature, }; -use graph::data::value::Word; use graph::data_source::{ offchain, CausalityRegion, DataSource, DataSourceCreationError, DataSourceTemplate, TriggerData, }; @@ -1051,7 +1051,7 @@ async fn update_proof_of_indexing( let prev_poi = entity_cache .get(&entity_key, GetScope::Store) .map_err(Error::from)? - .map(|entity| match entity.get("digest") { + .map(|entity| match entity.get(POI_DIGEST.as_str()) { Some(Value::Bytes(b)) => b.clone(), _ => panic!("Expected POI entity to have a digest and for it to be bytes"), }); @@ -1067,7 +1067,7 @@ async fn update_proof_of_indexing( graph::data::store::ID.clone(), Value::from(entity_key.entity_id.to_string()), ), - (Word::from("digest"), Value::from(updated_proof_of_indexing)), + (POI_DIGEST.clone(), Value::from(updated_proof_of_indexing)), ]; let new_poi_entity = entity_cache.make_entity(data)?; diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index 9f617a6b761..6baae86ec31 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -13,6 +13,7 @@ use super::DeploymentHash; use crate::data::graphql::TryFromValue; use crate::data::store::Value; use crate::data::subgraph::SubgraphManifest; +use crate::data::value::Word; use crate::prelude::*; use crate::util::stable_hash_glue::impl_stable_hash; use crate::{blockchain::Blockchain, components::store::EntityType}; @@ -20,6 +21,8 @@ use crate::{blockchain::Blockchain, components::store::EntityType}; pub const POI_TABLE: &str = "poi2$"; lazy_static! { pub static ref POI_OBJECT: EntityType = EntityType::new("Poi$".to_string()); + /// The name of the digest attribute of POI entities + pub static ref POI_DIGEST: Word = Word::from("digest"); } #[derive(Copy, Clone, PartialEq, Eq, Debug, Deserialize)] diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 6856319804d..51646cf2417 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -10,6 +10,7 @@ use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; use crate::data::store::{self, scalar, IntoEntityIterator, TryIntoEntityIterator}; +use crate::data::subgraph::schema::POI_DIGEST; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; @@ -323,7 +324,7 @@ impl InputSchema { /// in the document and the names of all their fields fn atom_pool(document: &s::Document) -> AtomPool { let mut pool = AtomPool::new(); - pool.intern("digest"); // Attribute of PoI object + pool.intern(POI_DIGEST.as_str()); // Attribute of PoI object for definition in &document.definitions { match definition { s::Definition::TypeDefinition(typedef) => match typedef { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 2a58caa4ac1..c4c8f2d80eb 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -36,7 +36,7 @@ use std::time::{Duration, Instant}; use graph::components::store::EntityCollection; use graph::components::subgraph::{ProofOfIndexingFinisher, ProofOfIndexingVersion}; use graph::constraint_violation; -use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_OBJECT}; +use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_DIGEST, POI_OBJECT}; use graph::prelude::{ anyhow, debug, info, o, warn, web3, AttributeNames, BlockNumber, BlockPtr, CheapClone, DeploymentHash, DeploymentState, Entity, EntityModification, EntityQuery, Error, Logger, @@ -1062,7 +1062,7 @@ impl DeploymentStore { .into_iter() .map(|e| { let causality_region = e.id(); - let digest = match e.get("digest") { + let digest = match e.get(POI_DIGEST.as_str()) { Some(Value::Bytes(b)) => Ok(b.clone()), other => Err(anyhow::anyhow!( "Entity has non-bytes digest attribute: {:?}", diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 70de559c14c..f5a79061c8a 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -53,7 +53,7 @@ use crate::{ use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; use graph::data::store::BYTES_SCALAR; -use graph::data::subgraph::schema::{POI_OBJECT, POI_TABLE}; +use graph::data::subgraph::schema::{POI_DIGEST, POI_OBJECT, POI_TABLE}; use graph::prelude::{ anyhow, info, BlockNumber, DeploymentHash, Entity, EntityChange, EntityOperation, Logger, QueryExecutionError, StoreError, StoreEvent, ValueType, BLOCK_NUMBER_MAX, @@ -394,8 +394,8 @@ impl Layout { name: table_name, columns: vec![ Column { - name: SqlName::from("digest"), - field: "digest".to_owned(), + name: SqlName::from(POI_DIGEST.as_str()), + field: POI_DIGEST.to_string(), field_type: q::Type::NonNullType(Box::new(q::Type::NamedType( BYTES_SCALAR.to_owned(), ))), From 87f26c2b49f0b61d9ee02193b20497dd9293e480 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 5 May 2023 11:40:13 +0200 Subject: [PATCH 0193/2104] core: Move storing PoI in the cache into a helper --- core/src/subgraph/runner.rs | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index babd6bf43de..a8b7c28c24c 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -1029,6 +1029,23 @@ async fn update_proof_of_indexing( stopwatch: &StopwatchMetrics, entity_cache: &mut EntityCache, ) -> Result<(), Error> { + // Helper to store the digest as a PoI entity in the cache + fn store_poi_entity( + entity_cache: &mut EntityCache, + key: EntityKey, + digest: Bytes, + ) -> Result<(), Error> { + let data = vec![ + ( + graph::data::store::ID.clone(), + Value::from(key.entity_id.to_string()), + ), + (POI_DIGEST.clone(), Value::from(digest)), + ]; + let poi = entity_cache.make_entity(data)?; + entity_cache.set(key, poi) + } + let _section_guard = stopwatch.start_section("update_proof_of_indexing"); let mut proof_of_indexing = proof_of_indexing.take(); @@ -1062,16 +1079,7 @@ async fn update_proof_of_indexing( // Put this onto an entity with the same digest attribute // that was expected before when reading. - let data = vec![ - ( - graph::data::store::ID.clone(), - Value::from(entity_key.entity_id.to_string()), - ), - (POI_DIGEST.clone(), Value::from(updated_proof_of_indexing)), - ]; - let new_poi_entity = entity_cache.make_entity(data)?; - - entity_cache.set(entity_key, new_poi_entity)?; + store_poi_entity(entity_cache, entity_key, updated_proof_of_indexing)?; } Ok(()) From 9aab7bd81f4fab2b0a8ea218de1e39999f392ac3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:07:56 +0200 Subject: [PATCH 0194/2104] build(deps): bump uuid from 1.3.1 to 1.3.2 (#4588) Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/uuid-rs/uuid/releases) - [Commits](https://github.com/uuid-rs/uuid/compare/1.3.1...1.3.2) --- updated-dependencies: - dependency-name: uuid dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- chain/ethereum/Cargo.toml | 2 +- core/Cargo.toml | 2 +- runtime/wasm/Cargo.toml | 2 +- server/websocket/Cargo.toml | 2 +- store/postgres/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5e63ba1053..d185753de29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5083,9 +5083,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" dependencies = [ "getrandom", ] diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index 48befcf1ffa..cd8b76512e4 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -26,7 +26,7 @@ graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] base64 = "0.20.0" -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } [build-dependencies] tonic-build = { workspace = true } diff --git a/core/Cargo.toml b/core/Cargo.toml index a7f27b9d887..809b165d3c6 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -36,4 +36,4 @@ ipfs-api-backend-hyper = "0.6" ipfs-api = { version = "0.17.0", features = [ "with-hyper-rustls", ], default-features = false } -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index 191d48ba620..74ed60080e9 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -14,7 +14,7 @@ bs58 = "0.4.0" graph-runtime-derive = { path = "../derive" } semver = "1.0.16" lazy_static = "1.4" -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } strum = "0.21.0" strum_macros = "0.21.1" bytes = "1.0" diff --git a/server/websocket/Cargo.toml b/server/websocket/Cargo.toml index 76b13559f3f..e154decdf15 100644 --- a/server/websocket/Cargo.toml +++ b/server/websocket/Cargo.toml @@ -12,5 +12,5 @@ lazy_static = "1.2.0" serde = "1.0" serde_derive = "1.0" tokio-tungstenite = "0.17" -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } anyhow = "1.0" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index f3bf0f4283a..4bcef5a9d6e 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -29,7 +29,7 @@ openssl = "0.10.50" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } diesel_derives = "1.4.1" anyhow = "1.0.70" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 0824c8728ec..f3cb162bbd0 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -23,7 +23,7 @@ serde = "1.0" serde_yaml = "0.8" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } tokio = { version = "1.28.0", features = ["rt", "macros", "process"] } -uuid = { version = "1.3.1", features = ["v4"] } +uuid = { version = "1.3.2", features = ["v4"] } [dev-dependencies] anyhow = "1.0.70" From 55c39d642a0ec684194f152c416ca76a880cd060 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:08:18 +0200 Subject: [PATCH 0195/2104] build(deps): bump clap from 3.2.23 to 3.2.25 (#4586) Bumps [clap](https://github.com/clap-rs/clap) from 3.2.23 to 3.2.25. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/v3.2.25/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v3.2.23...v3.2.25) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- graph/Cargo.toml | 2 +- node/Cargo.toml | 2 +- store/postgres/Cargo.toml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d185753de29..f69cd100ff9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -511,9 +511,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags", @@ -528,9 +528,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index b4d99b48ccc..48b50569d0a 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -66,7 +66,7 @@ web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-pat serde_plain = "1.0.1" [dev-dependencies] -clap = { version = "3.2.23", features = ["derive", "env"] } +clap = { version = "3.2.25", features = ["derive", "env"] } maplit = "1.0.2" hex-literal = "0.4" diff --git a/node/Cargo.toml b/node/Cargo.toml index b001e7f9e5a..b432572d790 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -13,7 +13,7 @@ name = "graphman" path = "src/bin/manager.rs" [dependencies] -clap = { version = "3.2.23", features = ["derive", "env"] } +clap = { version = "3.2.25", features = ["derive", "env"] } env_logger = "0.10.0" git-testament = "0.2" graphql-parser = "0.4.0" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 4bcef5a9d6e..a32268afb29 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -40,5 +40,5 @@ hex = "0.4.3" pretty_assertions = "1.3.0" [dev-dependencies] -clap = "3.2.23" +clap = "3.2.25" graphql-parser = "0.4.0" From d6656f287d036c23d2f5085a5e44fde42dbe92f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:08:35 +0200 Subject: [PATCH 0196/2104] build(deps): bump openssl from 0.10.50 to 0.10.52 (#4584) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.50 to 0.10.52. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.50...openssl-v0.10.52) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- store/postgres/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f69cd100ff9..06063a62670 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2996,9 +2996,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.50" +version = "0.10.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" +checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3028,9 +3028,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.85" +version = "0.9.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" +checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" dependencies = [ "cc", "libc", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index a32268afb29..74a0b3ca38b 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -25,7 +25,7 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.50" +openssl = "0.10.52" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" From 10f4b2afc3a4792796fbde874ff291ac0a036d0b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:09:07 +0200 Subject: [PATCH 0197/2104] build(deps): bump atomic_refcell from 0.1.9 to 0.1.10 (#4566) Bumps [atomic_refcell](https://github.com/bholley/atomic_refcell) from 0.1.9 to 0.1.10. - [Release notes](https://github.com/bholley/atomic_refcell/releases) - [Commits](https://github.com/bholley/atomic_refcell/commits) --- updated-dependencies: - dependency-name: atomic_refcell dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- core/Cargo.toml | 2 +- graph/Cargo.toml | 2 +- runtime/wasm/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06063a62670..8a36a9c93de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,9 +146,9 @@ dependencies = [ [[package]] name = "atomic_refcell" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "857253367827bd9d0fd973f0ef15506a96e79e41b0ad7aa691203a4e3214f6c8" +checksum = "79d6dc922a2792b006573f60b2648076355daeae5ce9cb59507e5908c9625d31" [[package]] name = "atty" diff --git a/core/Cargo.toml b/core/Cargo.toml index 809b165d3c6..02f90833495 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.9" +atomic_refcell = "0.1.10" async-stream = "0.3" bytes = "1.0" futures01 = { package = "futures", version = "0.1.31" } diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 48b50569d0a..4b47d4a5cdd 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true anyhow = "1.0" async-trait = "0.1.50" async-stream = "0.3" -atomic_refcell = "0.1.9" +atomic_refcell = "0.1.10" bigdecimal = { version = "0.1.0", features = ["serde"] } bytes = "1.0.1" cid = "0.10.1" diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index 74ed60080e9..c7787430290 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.9" +atomic_refcell = "0.1.10" ethabi = "17.2" futures = "0.1.21" hex = "0.4.3" From 1406df7aa95ba1bda2faf2560512f09752312821 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:09:22 +0200 Subject: [PATCH 0198/2104] build(deps): bump prost-types from 0.11.8 to 0.11.9 (#4569) Bumps [prost-types](https://github.com/tokio-rs/prost) from 0.11.8 to 0.11.9. - [Release notes](https://github.com/tokio-rs/prost/releases) - [Commits](https://github.com/tokio-rs/prost/compare/v0.11.8...v0.11.9) --- updated-dependencies: - dependency-name: prost-types dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a36a9c93de..22a3d660d73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3434,9 +3434,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] diff --git a/Cargo.toml b/Cargo.toml index a79c476e4b4..34f001ca5e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] prost = "0.11.9" -prost-types = "0.11.8" +prost-types = "0.11.9" tonic = { version = "0.8.3", features = ["tls-roots", "gzip"] } tonic-build = { version = "0.8.4", features = ["prost"] } From 3a7cc4e1aadd29974a6fc88096478abea9e72c76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:09:39 +0200 Subject: [PATCH 0199/2104] build(deps): bump tokio-stream from 0.1.12 to 0.1.14 (#4589) Bumps [tokio-stream](https://github.com/tokio-rs/tokio) from 0.1.12 to 0.1.14. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Changelog](https://github.com/tokio-rs/tokio/blob/tokio-0.1.14/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-stream-0.1.12...tokio-0.1.14) --- updated-dependencies: - dependency-name: tokio-stream dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22a3d660d73..2766cd77a95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4646,9 +4646,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 4b47d4a5cdd..24c00d9683a 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -45,7 +45,7 @@ slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" tokio = { version = "1.28.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } -tokio-stream = { version = "0.1.12", features = ["sync"] } +tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-retry = "0.3.0" url = "2.3.1" prometheus = "0.13.3" From 7ce9cb7c2e8b5acd85c2fea4e2f5d45300112194 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 11:09:58 +0200 Subject: [PATCH 0200/2104] build(deps): bump anyhow from 1.0.70 to 1.0.71 (#4585) Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.70 to 1.0.71. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.70...1.0.71) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- store/postgres/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2766cd77a95..11c9cb97fed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,9 +56,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arc-swap" diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 74a0b3ca38b..6c49072b3ef 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -32,7 +32,7 @@ serde = "1.0" uuid = { version = "1.3.2", features = ["v4"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } diesel_derives = "1.4.1" -anyhow = "1.0.70" +anyhow = "1.0.71" git-testament = "0.2.4" itertools = "0.10.5" pin-utils = "0.1" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f3cb162bbd0..d7930995d2a 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -26,7 +26,7 @@ tokio = { version = "1.28.0", features = ["rt", "macros", "process"] } uuid = { version = "1.3.2", features = ["v4"] } [dev-dependencies] -anyhow = "1.0.70" +anyhow = "1.0.71" bollard = "0.10" lazy_static = "1.4.0" tokio-stream = "0.1" From 0e83179fb6e0ba4361cef5d846fa171a70201924 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 19:56:46 +0200 Subject: [PATCH 0201/2104] build(deps): bump async-stream from 0.3.4 to 0.3.5 (#4529) Bumps [async-stream](https://github.com/tokio-rs/async-stream) from 0.3.4 to 0.3.5. - [Release notes](https://github.com/tokio-rs/async-stream/releases) - [Commits](https://github.com/tokio-rs/async-stream/compare/v0.3.4...v0.3.5) --- updated-dependencies: - dependency-name: async-stream dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- tests/Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11c9cb97fed..961e3ded326 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -113,9 +113,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -124,13 +124,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.12", ] [[package]] diff --git a/tests/Cargo.toml b/tests/Cargo.toml index d7930995d2a..854f898d9fc 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true [dependencies] anyhow = "1.0" assert-json-diff = "2.0.2" -async-stream = "0.3.4" +async-stream = "0.3.5" bollard = "0.10" futures = { version = "0.3", features = ["compat"] } graph = { path = "../graph" } From 0e9f517fd312a90acf6509f6a5e919646ca52bed Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Tue, 9 May 2023 17:50:23 +0100 Subject: [PATCH 0202/2104] fix: Escape control characters in log messages (#4607) --- graph/src/log/mod.rs | 48 +++++++++++++++++++++++++++++- node/src/manager/commands/query.rs | 6 +++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/graph/src/log/mod.rs b/graph/src/log/mod.rs index ffde346c2e5..717c0260aa3 100644 --- a/graph/src/log/mod.rs +++ b/graph/src/log/mod.rs @@ -106,7 +106,9 @@ where write!(decorator, " ")?; decorator.start_msg()?; - write!(decorator, "{}", record.msg())?; + // Escape control characters in the message, including newlines. + let msg = escape_control_chars(record.msg().to_string()); + write!(decorator, "{}", msg)?; // Collect key values from the record let mut serializer = KeyValueSerializer::new(); @@ -385,3 +387,47 @@ fn formatted_timestamp_local(io: &mut impl io::Write) -> io::Result<()> { chrono::Local::now().format(ENV_VARS.log_time_format.as_str()) ) } + +pub fn escape_control_chars(input: String) -> String { + let should_escape = |c: char| c.is_control() && c != '\t'; + + if !input.chars().any(should_escape) { + return input; + } + + let mut escaped = String::new(); + for c in input.chars() { + match c { + '\n' => escaped.push_str("\\n"), + c if should_escape(c) => { + let code = c as u32; + escaped.push_str(&format!("\\u{{{:04x}}}", code)); + } + _ => escaped.push(c), + } + } + escaped +} + +#[test] +fn test_escape_control_chars() { + let test_cases = vec![ + ( + "This is a test\nwith some\tcontrol characters\x1B[1;32m and others.", + "This is a test\\nwith some\tcontrol characters\\u{001b}[1;32m and others.", + ), + ( + "This string has no control characters.", + "This string has no control characters.", + ), + ( + "This string has a tab\tbut no other control characters.", + "This string has a tab\tbut no other control characters.", + ), + ]; + + for (input, expected) in test_cases { + let escaped = escape_control_chars(input.to_string()); + assert_eq!(escaped, expected); + } +} diff --git a/node/src/manager/commands/query.rs b/node/src/manager/commands/query.rs index 0e9a6515d11..ff3c5e7952f 100644 --- a/node/src/manager/commands/query.rs +++ b/node/src/manager/commands/query.rs @@ -5,6 +5,7 @@ use std::time::Duration; use std::{collections::HashMap, sync::Arc}; use graph::data::query::Trace; +use graph::log::escape_control_chars; use graph::prelude::r; use graph::{ data::query::QueryTarget, @@ -65,7 +66,10 @@ pub async fn run( if let Some(output) = output { let mut f = File::create(output)?; - let json = serde_json::to_string(&res)?; + + // Escape control characters in the query output, as a precaution against injecting control + // characters in a terminal. + let json = escape_control_chars(serde_json::to_string(&res)?); writeln!(f, "{}", json)?; } From 328357f7c09a220c7ccfc1ac181091c31ad7c52d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 May 2023 13:07:35 +0200 Subject: [PATCH 0203/2104] build(deps): bump quote from 1.0.26 to 1.0.27 (#4605) Bumps [quote](https://github.com/dtolnay/quote) from 1.0.26 to 1.0.27. - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.26...1.0.27) --- updated-dependencies: - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 961e3ded326..1f8b523a778 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3500,9 +3500,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] From 88c727325726edfda41d3dabe9c03b77b0f6781e Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Wed, 10 May 2023 13:08:50 +0200 Subject: [PATCH 0204/2104] scripts: delete release.sh (#4602) --- scripts/README.md | 19 -------- scripts/release.sh | 111 --------------------------------------------- 2 files changed, 130 deletions(-) delete mode 100644 scripts/README.md delete mode 100755 scripts/release.sh diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index c749a79d70f..00000000000 --- a/scripts/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Scripts - -## `release.sh` - -1. Checks that all workspace crates use the same version via `version.workspace = true`. -2. Updates the version in the root `Cargo.toml` as indicated by the user: `major`, `minor`, or `patch`. -3. Updates `Cargo.lock` via `cargo check --tests`. -4. Adds the changes in a `Release vX.Y.Z` commit. - -Upon failure, the script will print some kind of error message and stop before committing the changes. - -### Usage - -The only argument it accepts is the type of release you want to do. - -```bash -# E.g. you're on v0.28.0 and must relese v0.28.1. -$ ./scripts/release.sh patch -``` diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index d86e6113e21..00000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# TODO: Maybe we should revert all changes if the script fails halfway through? - -abort () { - local FAIL_MSG=$@ - echo "$FAIL_MSG" - exit 1 -} - -abort_failed_to_update () { - local FILE_NAME=$@ - abort "💀 Failed to update $FILE_NAME. Aborting." -} - -assert_all_cargo_tomls_inherit_version () { - ERROR=0 - # Get all files named Cargo.toml excluding the `integration-tests` folder and - # the root Cargo.toml. - CARGO_TOMLS=$( - find . -name Cargo.toml | \ - grep -v integration-tests | \ - grep -v '\./Cargo.toml' - ) - for CARGO_TOML in $CARGO_TOMLS - do - # Good files have a line that looks like `version.workspace = true`. Bad - # files don't. - VERSION_LINE=$(grep '^version' $CARGO_TOML) - if [[ $VERSION_LINE != "version.workspace = true" ]]; then - echo "⚠️ $CARGO_TOML does not inherit the crate version from the root workspace." - ERROR=1 - fi - done - - if [[ $ERROR == 1 ]]; then - echo "💀 All crates must inherit the workspace's crate version." - echo " " - abort " Aborting." - fi -} - -get_toml_version () { - echo $(grep '^version =' Cargo.toml | cut -d '"' -f2) -} - -main () { - CURRENT_VERSION=$(get_toml_version) - assert_all_cargo_tomls_inherit_version - - # Increment by CLI argument (major, minor, patch) - MAJOR=$(echo $CURRENT_VERSION | cut -d. -f1) - MINOR=$(echo $CURRENT_VERSION | cut -d. -f2) - PATCH=$(echo $CURRENT_VERSION | cut -d. -f3) - - case $1 in - "major") - let "++MAJOR" - MINOR=0 - PATCH=0 - ;; - "minor") - # Preincrement to avoid early exit with set -e: - # https://stackoverflow.com/questions/7247279/bash-set-e-and-i-0let-i-do-not-agree - let "++MINOR" - PATCH=0 - ;; - "patch") - let "++PATCH" - ;; - *) - abort "💀 Bad CLI usage! Version argument should be one of: major, minor or patch" - ;; - esac - - echo " - Current version: \"$CURRENT_VERSION\"" - NEW_VERSION="${MAJOR}.${MINOR}.${PATCH}" - echo " - New version: \"$NEW_VERSION\"" - - echo "⏳ Updating Cargo.toml..." - - # Works both on GNU and BSD sed (for macOS users) - # See: - # - https://unix.stackexchange.com/questions/401905/bsd-sed-vs-gnu-sed-and-i - # - https://stackoverflow.com/a/22084103/5148606 - sed -i.backup "s/^version = \"${CURRENT_VERSION}\"/version = \"${NEW_VERSION}\"/g" Cargo.toml - rm Cargo.toml.backup - - if [[ $(git diff Cargo.toml) ]]; then - echo "✅ Cargo.toml successfully updated." - else - abort_failed_to_update Cargo.toml - fi - - echo "⏳ Updating Cargo.lock..." - cargo check --tests - if [[ $(git diff Cargo.lock) ]]; then - echo "✅ Cargo.lock successfully updated." - else - abort_failed_to_update Cargo.lock - fi - - echo "⏳ Committing changes..." - git add Cargo.lock Cargo.toml - git commit -m "Release v${NEW_VERSION}" - - echo "🎉 Done!" -} - -main "$@" From f41514f64854b08a81ecb6107420acdb57cc227b Mon Sep 17 00:00:00 2001 From: Lorenzo Fontoura Date: Fri, 21 Apr 2023 12:53:13 +1000 Subject: [PATCH 0205/2104] docs: log startup host wait --- docker/start | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/start b/docker/start index bbeabd166a3..02d57748238 100755 --- a/docker/start +++ b/docker/start @@ -42,6 +42,7 @@ wait_for_ipfs() { then [ "$proto" = "https" ] && port=443 || port=80 fi + echo "Waiting for IPFS ($host:$port)" wait_for "$host:$port" -t 120 else echo "invalid IPFS URL: $1" @@ -67,6 +68,7 @@ run_graph_node() { postgres_url="postgresql://$postgres_user:$postgres_pass@$postgres_host:$postgres_port/$postgres_db?sslmode=prefer" wait_for_ipfs "$ipfs" + echo "Waiting for Postgres ($postgres_host:$postgres_port)" wait_for "$postgres_host:$postgres_port" -t 120 sleep 5 From dfeebaaf752b47fe9b18ca7671f7f23209781e29 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Thu, 11 May 2023 10:51:22 +0100 Subject: [PATCH 0206/2104] Cargo.toml: Update serde yaml (#4610) --- Cargo.lock | 28 ++++++++++------------------ core/Cargo.toml | 2 +- graph/Cargo.toml | 2 +- tests/Cargo.toml | 3 +-- 4 files changed, 13 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f8b523a778..9dbd524de31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2636,12 +2636,6 @@ version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" -[[package]] -name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - [[package]] name = "linux-raw-sys" version = "0.3.1" @@ -4025,14 +4019,15 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" dependencies = [ "indexmap", + "itoa 1.0.1", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] @@ -5052,6 +5047,12 @@ dependencies = [ "void", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" + [[package]] name = "unsigned-varint" version = "0.7.1" @@ -5766,15 +5767,6 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "074914ea4eec286eb8d1fd745768504f420a1f7b7919185682a4a267bed7d2e7" -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "0.5.1" diff --git a/core/Cargo.toml b/core/Cargo.toml index 02f90833495..5536e0f46a6 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -23,7 +23,7 @@ lru_time_cache = "0.11" semver = "1.0.16" serde = "1.0" serde_json = "1.0" -serde_yaml = "0.8" +serde_yaml = "0.9.21" # Switch to crates.io once tower 0.5 is released tower = { git = "https://github.com/tower-rs/tower.git", features = ["full"] } graph-runtime-wasm = { path = "../runtime/wasm" } diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 24c00d9683a..6a8b645d307 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -33,7 +33,7 @@ semver = { version = "1.0.16", features = ["serde"] } serde = { version = "1.0.126", features = ["rc"] } serde_derive = "1.0.125" serde_json = { version = "1.0", features = ["arbitrary_precision"] } -serde_yaml = "0.8" +serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } stable-hash = { version = "0.4.2" } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 854f898d9fc..b5f40a9e757 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -20,7 +20,7 @@ graph-server-index-node = { path = "../server/index-node" } graphql-parser = "0.4.0" hyper = "0.14" serde = "1.0" -serde_yaml = "0.8" +serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } tokio = { version = "1.28.0", features = ["rt", "macros", "process"] } uuid = { version = "1.3.2", features = ["v4"] } @@ -30,6 +30,5 @@ anyhow = "1.0.71" bollard = "0.10" lazy_static = "1.4.0" tokio-stream = "0.1" -serde_yaml = "0.8" cid = "0.10.1" graph-chain-near = { path = "../chain/near" } From 63a512d58e3fd815218e134349c97d2f70f9c37a Mon Sep 17 00:00:00 2001 From: Eduard Voiculescu Date: Thu, 11 May 2023 06:24:38 -0400 Subject: [PATCH 0207/2104] chain/substreams: Set id field downstream when making entity change Recent changes to `make_entity` require that entities have an `id` field. This fixes substreams to behave in that way --- chain/substreams/src/trigger.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index e2d92541a9f..0b46efb8c14 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -217,6 +217,8 @@ where logger, ); + data.insert(Word::from("id"), Value::from(&entity_id)); + let entity = state.entity_cache.make_entity(data)?; state.entity_cache.set(key, entity)?; } From 1ef49919b4498ef9e313334b548ca706bc4dbbae Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 11 May 2023 10:55:48 +0100 Subject: [PATCH 0208/2104] core, graph, graphql: Log hit rate for LfuCache --- core/src/subgraph/runner.rs | 5 ++++- graph/src/util/lfu_cache.rs | 33 ++++++++++++++++++++++++++++++ graphql/src/execution/execution.rs | 4 ++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index a8b7c28c24c..3159e89dce4 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -412,7 +412,10 @@ where "evicted_weight" => evict_stats.evicted_weight, "count" => evict_stats.new_count, "evicted_count" => evict_stats.evicted_count, - "stale_update" => evict_stats.stale_update); + "stale_update" => evict_stats.stale_update, + "hit_rate" => format!("{:.0}%", evict_stats.hit_rate_pct()), + "accesses" => evict_stats.accesses, + "evict_time_ms" => evict_stats.evict_time.as_millis()); // Check for offchain events and process them, including their entity modifications in the // set to be transacted. diff --git a/graph/src/util/lfu_cache.rs b/graph/src/util/lfu_cache.rs index eac04f93134..915e793ec59 100644 --- a/graph/src/util/lfu_cache.rs +++ b/graph/src/util/lfu_cache.rs @@ -72,8 +72,23 @@ pub struct EvictStats { pub stale_update: bool, /// How long eviction took pub evict_time: Duration, + /// The total number of cache accesses during this stale period + pub accesses: usize, + /// The total number of cache hits during this stale period + pub hits: usize, } +impl EvictStats { + /// The cache hit rate in percent. The underlying counters are reset at + /// the end of each stale period. + pub fn hit_rate_pct(&self) -> f64 { + if self.accesses > 0 { + self.hits as f64 / self.accesses as f64 * 100.0 + } else { + 100.0 + } + } +} /// Each entry in the cache has a frequency, which is incremented by 1 on access. Entries also have /// a weight, upon eviction first stale entries will be removed and then non-stale entries by order /// of least frequency until the max weight is respected. This cache only removes entries on calls @@ -85,6 +100,8 @@ pub struct LfuCache { total_weight: usize, stale_counter: u64, dead_weight: bool, + accesses: usize, + hits: usize, } impl Default for LfuCache { @@ -94,6 +111,8 @@ impl Default for LfuCache { total_weight: 0, stale_counter: 0, dead_weight: false, + accesses: 0, + hits: 0, } } } @@ -105,6 +124,8 @@ impl total_weight: 0, stale_counter: 0, dead_weight: ENV_VARS.mappings.entity_cache_dead_weight, + accesses: 0, + hits: 0, } } @@ -148,7 +169,9 @@ impl let key_entry = CacheEntry::cache_key(key); self.queue .change_priority_by(&key_entry, |(s, Reverse(f))| (s, Reverse(f + 1))); + self.accesses += 1; self.queue.get_mut(&key_entry).map(|x| { + self.hits += 1; x.0.will_stale = false; x.0 }) @@ -196,6 +219,8 @@ impl evicted_count: 0, stale_update: false, evict_time: Duration::from_millis(0), + accesses: 0, + hits: 0, }) } @@ -223,10 +248,16 @@ impl let start = Instant::now(); + let accesses = self.accesses; + let hits = self.hits; + self.stale_counter += 1; if self.stale_counter == stale_period { self.stale_counter = 0; + self.accesses = 0; + self.hits = 0; + // Entries marked `will_stale` were not accessed in this period. Properly mark them as // stale in their priorities. Also mark all entities as `will_stale` for the _next_ // period so that they will be marked stale next time unless they are updated or looked @@ -260,6 +291,8 @@ impl evicted_count: old_len - self.len(), stale_update: self.stale_counter == 0, evict_time: start.elapsed(), + accesses, + hits, }) } } diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index a7f49eea513..2e256cb76e6 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -160,6 +160,8 @@ fn log_lfu_evict_stats( evicted_count, stale_update, evict_time, + accesses, + hits, }) = evict_stats { { @@ -176,6 +178,8 @@ fn log_lfu_evict_stats( "weight" => new_weight, "weight_evicted" => evicted_weight, "stale_update" => stale_update, + "hit_rate" => format!("{:.0}%", hits as f64 / accesses as f64 * 100.0), + "accesses" => accesses, "evict_time_ms" => evict_time.as_millis() ) }); From 29e43c07e5080d10055393c557193b3f5e64f0c0 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 10 May 2023 09:27:52 +0530 Subject: [PATCH 0209/2104] chain,node: Add support to skip setting gas field for certain chains --- chain/ethereum/src/adapter.rs | 1 + chain/ethereum/src/env.rs | 12 +++++++ chain/ethereum/src/ethereum_adapter.rs | 18 ++++------ chain/ethereum/src/runtime/runtime_adapter.rs | 33 +++++++++++++++++-- node/src/main.rs | 1 + node/src/manager/commands/run.rs | 1 + store/postgres/src/chain_store.rs | 2 ++ 7 files changed, 54 insertions(+), 14 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 2319584a3e6..3f31a7eecf0 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -39,6 +39,7 @@ pub struct EthereumContractCall { pub block_ptr: BlockPtr, pub function: Function, pub args: Vec, + pub gas: Option } #[derive(Error, Debug)] diff --git a/chain/ethereum/src/env.rs b/chain/ethereum/src/env.rs index af7f6a97eb0..a5a3695c12c 100644 --- a/chain/ethereum/src/env.rs +++ b/chain/ethereum/src/env.rs @@ -83,6 +83,10 @@ pub struct EnvVars { /// The time to wait between polls when using polling block ingestor. /// The value is set in millis and the default is 1000. pub ingestor_polling_interval: Duration, + /// Set by the flag `ETHEREUM_CALL_GAS_SKIP`. The default value is empty. + /// This is a comma separated list of chains for which the gas field will not be set + /// when calling `eth_call`. + pub eth_call_skip_gas: Vec, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -124,6 +128,12 @@ impl From for EnvVars { target_triggers_per_block_range: x.target_triggers_per_block_range, genesis_block_number: x.genesis_block_number, ingestor_polling_interval: Duration::from_millis(x.ingestor_polling_interval), + eth_call_skip_gas: x + .eth_call_skip_gas + .split(',') + .filter(|s| !s.is_empty()) + .map(str::to_string) + .collect(), } } } @@ -171,4 +181,6 @@ struct Inner { genesis_block_number: u64, #[envconfig(from = "ETHEREUM_POLLING_INTERVAL", default = "1000")] ingestor_polling_interval: u64, + #[envconfig(from = "ETHEREUM_CALL_SKIP_GAS", default = "")] + eth_call_skip_gas: String, } diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 3b044900e39..14371961356 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -68,16 +68,6 @@ pub struct EthereumAdapter { call_only: bool, } -/// Gas limit for `eth_call`. The value of 50_000_000 is a protocol-wide parameter so this -/// should be changed only for debugging purposes and never on an indexer in the network. This -/// value was chosen because it is the Geth default -/// https://github.com/ethereum/go-ethereum/blob/e4b687cf462870538743b3218906940ae590e7fd/eth/ethconfig/config.go#L91. -/// It is not safe to set something higher because Geth will silently override the gas limit -/// with the default. This means that we do not support indexing against a Geth node with -/// `RPCGasCap` set below 50 million. -// See also f0af4ab0-6b7c-4b68-9141-5b79346a5f61. -const ETH_CALL_GAS: u32 = 50_000_000; - impl CheapClone for EthereumAdapter { fn cheap_clone(&self) -> Self { Self { @@ -425,6 +415,7 @@ impl EthereumAdapter { contract_address: Address, call_data: Bytes, block_ptr: BlockPtr, + gas: Option, ) -> impl Future + Send { let web3 = self.web3.clone(); let logger = Logger::new(&logger, o!("provider" => self.provider.clone())); @@ -447,11 +438,13 @@ impl EthereumAdapter { .run(move || { let call_data = call_data.clone(); let web3 = web3.cheap_clone(); - + // let get_gas = || { + // self. + // } async move { let req = CallRequest { to: Some(contract_address), - gas: Some(web3::types::U256::from(ETH_CALL_GAS)), + gas: gas.map(|val| web3::types::U256::from(val)), data: Some(call_data.clone()), from: None, gas_price: None, @@ -1226,6 +1219,7 @@ impl EthereumAdapterTrait for EthereumAdapter { call.address, Bytes(call_data.clone()), call.block_ptr.clone(), + call.gas, ) .map(move |result| { // Don't block handler execution on writing to the cache. diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index e7f083c82a4..1995c97322f 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -4,9 +4,11 @@ use crate::data_source::MappingABI; use crate::{ capabilities::NodeCapabilities, network::EthereumNetworkAdapters, Chain, DataSource, EthereumAdapter, EthereumAdapterTrait, EthereumContractCall, EthereumContractCallError, + ENV_VARS, }; use anyhow::{Context, Error}; use blockchain::HostFn; +use graph::blockchain::ChainIdentifier; use graph::runtime::gas::Gas; use graph::runtime::{AscIndexId, IndexForAscTypeId}; use graph::{ @@ -24,6 +26,16 @@ use graph_runtime_wasm::asc_abi::class::{AscEnumArray, EthereumValueKind}; use super::abi::{AscUnresolvedContractCall, AscUnresolvedContractCall_0_0_4}; +/// Gas limit for `eth_call`. The value of 50_000_000 is a protocol-wide parameter so this +/// should be changed only for debugging purposes and never on an indexer in the network. This +/// value was chosen because it is the Geth default +/// https://github.com/ethereum/go-ethereum/blob/e4b687cf462870538743b3218906940ae590e7fd/eth/ethconfig/config.go#L91. +/// It is not safe to set something higher because Geth will silently override the gas limit +/// with the default. This means that we do not support indexing against a Geth node with +/// `RPCGasCap` set below 50 million. +// See also f0af4ab0-6b7c-4b68-9141-5b79346a5f61. +const ETH_CALL_GAS: u32 = 50_000_000; + // When making an ethereum call, the maximum ethereum gas is ETH_CALL_GAS which is 50 million. One // unit of Ethereum gas is at least 100ns according to these benchmarks [1], so 1000 of our gas. In // the worst case an Ethereum call could therefore consume 50 billion of our gas. However the @@ -37,6 +49,7 @@ pub const ETHEREUM_CALL: Gas = Gas::new(5_000_000_000); pub struct RuntimeAdapter { pub eth_adapters: Arc, pub call_cache: Arc, + pub chain_identifier: Arc, } impl blockchain::RuntimeAdapter for RuntimeAdapter { @@ -45,6 +58,11 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { let call_cache = self.call_cache.cheap_clone(); let eth_adapters = self.eth_adapters.cheap_clone(); let archive = ds.mapping.requires_archive()?; + let eth_call_gas = ENV_VARS + .eth_call_skip_gas + .contains(&self.chain_identifier.net_version) + .then(|| None) + .unwrap_or(Some(ETH_CALL_GAS)); let ethereum_call = HostFn { name: "ethereum.call", @@ -54,8 +72,15 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { archive, traces: false, }))?; - ethereum_call(ð_adapter, call_cache.cheap_clone(), ctx, wasm_ptr, &abis) - .map(|ptr| ptr.wasm_ptr()) + ethereum_call( + ð_adapter, + call_cache.cheap_clone(), + ctx, + wasm_ptr, + &abis, + eth_call_gas, + ) + .map(|ptr| ptr.wasm_ptr()) }), }; @@ -70,6 +95,7 @@ fn ethereum_call( ctx: HostFnCtx<'_>, wasm_ptr: u32, abis: &[Arc], + eth_call_gas: Option, ) -> Result, HostExportError> { ctx.gas.consume_host_fn(ETHEREUM_CALL)?; @@ -89,6 +115,7 @@ fn ethereum_call( &ctx.block_ptr, call, abis, + eth_call_gas, )?; match result { Some(tokens) => Ok(asc_new(ctx.heap, tokens.as_slice(), &ctx.gas)?), @@ -104,6 +131,7 @@ fn eth_call( block_ptr: &BlockPtr, unresolved_call: UnresolvedContractCall, abis: &[Arc], + eth_call_gas: Option, ) -> Result>, HostExportError> { let start_time = Instant::now(); @@ -163,6 +191,7 @@ fn eth_call( block_ptr: block_ptr.cheap_clone(), function: function.clone(), args: unresolved_call.function_args.clone(), + gas: eth_call_gas, }; // Run Ethereum call in tokio runtime diff --git a/node/src/main.rs b/node/src/main.rs index aad0c9f26e6..cd42d414f9c 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -727,6 +727,7 @@ fn ethereum_networks_as_chains( let runtime_adapter = Arc::new(RuntimeAdapter { eth_adapters: Arc::new(eth_adapters.clone()), call_cache: chain_store.cheap_clone(), + chain_identifier: Arc::new(chain_store.chain_identifier.clone()), }); let chain = ethereum::Chain::new( diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index 856484d082f..ee630685485 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -151,6 +151,7 @@ pub async fn run( Arc::new(EthereumRuntimeAdapter { call_cache: chain_store.cheap_clone(), eth_adapters: Arc::new(eth_adapters2), + chain_identifier: Arc::new(chain_store.chain_identifier.clone()), }), graph::env::ENV_VARS.reorg_threshold, ethereum::ENV_VARS.ingestor_polling_interval, diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 882fd7e6b04..93ffd27bef9 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -1412,6 +1412,7 @@ pub struct ChainStore { pool: ConnectionPool, pub chain: String, pub(crate) storage: data::Storage, + pub chain_identifier: ChainIdentifier, genesis_block_ptr: BlockPtr, status: ChainStatus, chain_head_update_sender: ChainHeadUpdateSender, @@ -1444,6 +1445,7 @@ impl ChainStore { genesis_block_ptr: BlockPtr::new(net_identifier.genesis_block_hash.clone(), 0), status, chain_head_update_sender, + chain_identifier: net_identifier.clone(), recent_blocks_cache, } } From 29b716d6db8bd3f1758dc1a70f0ed5756cffcac6 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 10 May 2023 20:30:48 +0530 Subject: [PATCH 0210/2104] Refactor --- chain/ethereum/src/env.rs | 14 +++++++------- chain/ethereum/src/ethereum_adapter.rs | 4 +--- chain/ethereum/src/runtime/runtime_adapter.rs | 16 +++++++++++----- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/chain/ethereum/src/env.rs b/chain/ethereum/src/env.rs index a5a3695c12c..d85a8b6e0e7 100644 --- a/chain/ethereum/src/env.rs +++ b/chain/ethereum/src/env.rs @@ -83,10 +83,10 @@ pub struct EnvVars { /// The time to wait between polls when using polling block ingestor. /// The value is set in millis and the default is 1000. pub ingestor_polling_interval: Duration, - /// Set by the flag `ETHEREUM_CALL_GAS_SKIP`. The default value is empty. - /// This is a comma separated list of chains for which the gas field will not be set + /// Set by the flag `ETHEREUM_CALL_NO_GAS`. + /// This is a comma separated list of chain ids for which the gas field will not be set /// when calling `eth_call`. - pub eth_call_skip_gas: Vec, + pub eth_call_no_gas: Vec, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -128,8 +128,8 @@ impl From for EnvVars { target_triggers_per_block_range: x.target_triggers_per_block_range, genesis_block_number: x.genesis_block_number, ingestor_polling_interval: Duration::from_millis(x.ingestor_polling_interval), - eth_call_skip_gas: x - .eth_call_skip_gas + eth_call_no_gas: x + .eth_call_no_gas .split(',') .filter(|s| !s.is_empty()) .map(str::to_string) @@ -181,6 +181,6 @@ struct Inner { genesis_block_number: u64, #[envconfig(from = "ETHEREUM_POLLING_INTERVAL", default = "1000")] ingestor_polling_interval: u64, - #[envconfig(from = "ETHEREUM_CALL_SKIP_GAS", default = "")] - eth_call_skip_gas: String, + #[envconfig(from = "GRAPH_ETH_CALL_NO_GAS", default = "")] + eth_call_no_gas: String, } diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 14371961356..6083fa52b25 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -438,9 +438,7 @@ impl EthereumAdapter { .run(move || { let call_data = call_data.clone(); let web3 = web3.cheap_clone(); - // let get_gas = || { - // self. - // } + println!("====================== gas {:?}", gas); async move { let req = CallRequest { to: Some(contract_address), diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 1995c97322f..b7e14017d2c 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -58,11 +58,17 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { let call_cache = self.call_cache.cheap_clone(); let eth_adapters = self.eth_adapters.cheap_clone(); let archive = ds.mapping.requires_archive()?; - let eth_call_gas = ENV_VARS - .eth_call_skip_gas - .contains(&self.chain_identifier.net_version) - .then(|| None) - .unwrap_or(Some(ETH_CALL_GAS)); + + // Check if the current network version is in the eth_call_no_gas list + let should_skip_gas = ENV_VARS + .eth_call_no_gas + .contains(&self.chain_identifier.net_version); + + let eth_call_gas = if should_skip_gas { + None + } else { + Some(ETH_CALL_GAS) + }; let ethereum_call = HostFn { name: "ethereum.call", From 2ebec0ae9b4bc2ff5d3768e11cbe53f09212d193 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 10 May 2023 22:41:57 +0530 Subject: [PATCH 0211/2104] change env name for skipping eth_call gas --- chain/ethereum/src/adapter.rs | 2 +- chain/ethereum/src/env.rs | 4 ++-- chain/ethereum/src/ethereum_adapter.rs | 1 - chain/ethereum/src/runtime/runtime_adapter.rs | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 3f31a7eecf0..18b3432b03b 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -39,7 +39,7 @@ pub struct EthereumContractCall { pub block_ptr: BlockPtr, pub function: Function, pub args: Vec, - pub gas: Option + pub gas: Option, } #[derive(Error, Debug)] diff --git a/chain/ethereum/src/env.rs b/chain/ethereum/src/env.rs index d85a8b6e0e7..b5f82f27353 100644 --- a/chain/ethereum/src/env.rs +++ b/chain/ethereum/src/env.rs @@ -83,7 +83,7 @@ pub struct EnvVars { /// The time to wait between polls when using polling block ingestor. /// The value is set in millis and the default is 1000. pub ingestor_polling_interval: Duration, - /// Set by the flag `ETHEREUM_CALL_NO_GAS`. + /// Set by the flag `GRAPH_ETH_CALL_NO_GAS`. /// This is a comma separated list of chain ids for which the gas field will not be set /// when calling `eth_call`. pub eth_call_no_gas: Vec, @@ -181,6 +181,6 @@ struct Inner { genesis_block_number: u64, #[envconfig(from = "ETHEREUM_POLLING_INTERVAL", default = "1000")] ingestor_polling_interval: u64, - #[envconfig(from = "GRAPH_ETH_CALL_NO_GAS", default = "")] + #[envconfig(from = "GRAPH_ETH_CALL_NO_GAS", default = "421613")] eth_call_no_gas: String, } diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 6083fa52b25..9d9ad7089e3 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -438,7 +438,6 @@ impl EthereumAdapter { .run(move || { let call_data = call_data.clone(); let web3 = web3.cheap_clone(); - println!("====================== gas {:?}", gas); async move { let req = CallRequest { to: Some(contract_address), diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index b7e14017d2c..289dc8b77e4 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -63,7 +63,7 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { let should_skip_gas = ENV_VARS .eth_call_no_gas .contains(&self.chain_identifier.net_version); - + let eth_call_gas = if should_skip_gas { None } else { From 5ba079fff124f621b6beeba9dc39b81920a2f85b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 11 May 2023 11:58:02 +0100 Subject: [PATCH 0212/2104] chain/substreams: Fix setting of id for realz --- chain/substreams/src/trigger.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 0b46efb8c14..3d4b1cf7a62 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -217,7 +217,8 @@ where logger, ); - data.insert(Word::from("id"), Value::from(&entity_id)); + let id = state.entity_cache.schema.id_value(&key)?; + data.insert(Word::from("id"), id); let entity = state.entity_cache.make_entity(data)?; state.entity_cache.set(key, entity)?; From 05b2417deee56843b94e4d60e101d09f23faeada Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 16 May 2023 08:54:48 -0700 Subject: [PATCH 0213/2104] chain: Avoid a clone when decoding Bytes from substreams (#4614) --- chain/substreams/src/trigger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 3d4b1cf7a62..1df42a49bd2 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -276,7 +276,7 @@ fn decode_value(value: &crate::codec::value::Typed) -> Result base64::decode(new_value) - .map(|bs| Value::Bytes(Bytes::from(bs.as_ref()))) + .map(|bs| Value::Bytes(Bytes::from(bs))) .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), From b3e8ad1c1b2446c36b93a47b301bceca69f71dca Mon Sep 17 00:00:00 2001 From: Krishnanand V P <44740264+incrypto32@users.noreply.github.com> Date: Tue, 16 May 2023 22:07:10 +0530 Subject: [PATCH 0214/2104] node: Add start block flag to graphman rewind (#4400) * store, node : Add a method to truncate tables in deployment store, Layout, subgraph store and wire them in graphman rewind * Add graphman rewind block-number and block-hash flags to NEWS.md --- NEWS.md | 2 + node/src/bin/manager.rs | 22 +++++++++- node/src/manager/commands/rewind.rs | 59 ++++++++++++++++++++------ store/postgres/src/deployment_store.rs | 42 +++++++++++++++--- store/postgres/src/relational.rs | 7 +++ store/postgres/src/subgraph_store.rs | 15 +++++++ 6 files changed, 127 insertions(+), 20 deletions(-) diff --git a/NEWS.md b/NEWS.md index f825a7f16f0..7099bc40711 100644 --- a/NEWS.md +++ b/NEWS.md @@ -7,6 +7,8 @@ performing an initial pruning. To avoid ongoing pruning, use `graphman prune --once` ([docs](./docs/implementation/pruning.md)) - the materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) +- `graphman rewind` has changed, block-number and block-hash are now flags instead of arguments +- `graphman rewind` now has an extra flag `--start-block` which will rewind to the startBlock set in manifest or to the genesis block if no startBlock is set ### Fixes - fulltext searches now support additional `where` filters making it possible to narrow the results of a fulltext search with other criteria [#1823](https://github.com/graphprotocol/graph-node/issues/1823) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index ba9ea30fe41..0e6b6cd26e8 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -164,6 +164,9 @@ pub enum Command { /// database #[clap(long, short)] force: bool, + /// Rewind to the start block of the subgraph + #[clap(long)] + start_block: bool, /// Sleep for this many seconds after pausing subgraphs #[clap( long, @@ -173,10 +176,23 @@ pub enum Command { )] sleep: Duration, /// The block hash of the target block - block_hash: String, + #[clap( + required_unless_present = "start-block", + conflicts_with = "start-block", + long, + short = 'H' + )] + block_hash: Option, /// The block number of the target block - block_number: i32, + #[clap( + required_unless_present = "start-block", + conflicts_with = "start-block", + long, + short = 'n' + )] + block_number: Option, /// The deployments to rewind (see `help info`) + #[clap(required = true, min_values = 1)] deployments: Vec, }, /// Deploy and run an arbitrary subgraph up to a certain block @@ -1081,6 +1097,7 @@ async fn main() -> anyhow::Result<()> { block_hash, block_number, deployments, + start_block, } => { let (store, primary) = ctx.store_and_primary(); commands::rewind::run( @@ -1091,6 +1108,7 @@ async fn main() -> anyhow::Result<()> { block_number, force, sleep, + start_block, ) .await } diff --git a/node/src/manager/commands/rewind.rs b/node/src/manager/commands/rewind.rs index b370fa34b0f..60e8a04d271 100644 --- a/node/src/manager/commands/rewind.rs +++ b/node/src/manager/commands/rewind.rs @@ -59,13 +59,19 @@ pub async fn run( primary: ConnectionPool, store: Arc, searches: Vec, - block_hash: String, - block_number: BlockNumber, + block_hash: Option, + block_number: Option, force: bool, sleep: Duration, + start_block: bool, ) -> Result<(), anyhow::Error> { const PAUSED: &str = "paused_"; + // Sanity check + if !start_block && (block_hash.is_none() || block_number.is_none()) { + bail!("--block-hash and --block-number must be specified when --start-block is not set"); + } + let subgraph_store = store.subgraph_store(); let block_store = store.block_store(); @@ -81,15 +87,21 @@ pub async fn run( return Ok(()); } - let block_ptr_to = block_ptr( - block_store, - &searches, - &deployments, - &block_hash, - block_number, - force, - ) - .await?; + let block_ptr_to = if start_block { + None + } else { + Some( + block_ptr( + block_store, + &searches, + &deployments, + block_hash.as_deref().unwrap_or_default(), + block_number.unwrap_or_default(), + force, + ) + .await?, + ) + }; println!("Pausing deployments"); let mut paused = false; @@ -116,8 +128,29 @@ pub async fn run( println!("\nRewinding deployments"); for deployment in &deployments { let loc = deployment.locator(); - subgraph_store.rewind(loc.hash.clone(), block_ptr_to.clone())?; - println!(" ... rewound {}", loc); + let block_store = store.block_store(); + let deployment_details = subgraph_store.load_deployment_by_id(loc.clone().into())?; + let block_ptr_to = block_ptr_to.clone(); + + let start_block = deployment_details.start_block.or_else(|| { + block_store + .chain_store(&deployment.chain) + .and_then(|chain_store| chain_store.genesis_block_ptr().ok()) + }); + + match (block_ptr_to, start_block) { + (Some(block_ptr), _) => { + subgraph_store.rewind(loc.hash.clone(), block_ptr)?; + println!(" ... rewound {}", loc); + } + (None, Some(start_block_ptr)) => { + subgraph_store.truncate(loc.hash.clone(), start_block_ptr)?; + println!(" ... truncated {}", loc); + } + (None, None) => { + println!(" ... Failed to find start block for {}", loc); + } + } } println!("Resuming deployments"); diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index c4c8f2d80eb..32f25153917 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1315,12 +1315,13 @@ impl DeploymentStore { Ok(()) } - fn rewind_with_conn( + fn rewind_or_truncate_with_conn( &self, conn: &PgConnection, site: Arc, block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, + truncate: bool, ) -> Result { let event = deployment::with_lock(conn, &site, || { conn.transaction(|| -> Result<_, StoreError> { @@ -1353,7 +1354,15 @@ impl DeploymentStore { // Revert the data let layout = self.layout(conn, site.clone())?; - let (event, count) = layout.revert_block(conn, block)?; + let event = if truncate { + let event = layout.truncate_tables(conn)?; + deployment::set_entity_count(conn, site.as_ref(), layout.count_query.as_str())?; + event + } else { + let (event, count) = layout.revert_block(conn, block)?; + deployment::update_entity_count(conn, site.as_ref(), count)?; + event + }; // Revert the meta data changes that correspond to this subgraph. // Only certain meta data changes need to be reverted, most @@ -1362,7 +1371,6 @@ impl DeploymentStore { // changes that might need to be reverted Layout::revert_metadata(conn, &site, block)?; - deployment::update_entity_count(conn, site.as_ref(), count)?; Ok(event) }) })?; @@ -1370,6 +1378,30 @@ impl DeploymentStore { Ok(event) } + pub(crate) fn truncate( + &self, + site: Arc, + block_ptr_to: BlockPtr, + ) -> Result { + let conn = self.get_conn()?; + + // Unwrap: If we are reverting then the block ptr is not `None`. + let block_ptr_from = Self::block_ptr_with_conn(&conn, site.cheap_clone())?.unwrap(); + + // Sanity check on block numbers + if block_ptr_from.number <= block_ptr_to.number { + constraint_violation!( + "truncate must go backwards, but would go from block {} to block {}", + block_ptr_from.number, + block_ptr_to.number + ); + } + + // When rewinding, we reset the firehose cursor. That way, on resume, Firehose will start + // from the block_ptr instead (with sanity check to ensure it's resume at the exact block). + self.rewind_or_truncate_with_conn(&conn, site, block_ptr_to, &FirehoseCursor::None, true) + } + pub(crate) fn rewind( &self, site: Arc, @@ -1391,7 +1423,7 @@ impl DeploymentStore { // When rewinding, we reset the firehose cursor. That way, on resume, Firehose will start // from the block_ptr instead (with sanity check to ensure it's resume at the exact block). - self.rewind_with_conn(&conn, site, block_ptr_to, &FirehoseCursor::None) + self.rewind_or_truncate_with_conn(&conn, site, block_ptr_to, &FirehoseCursor::None, false) } pub(crate) fn revert_block_operations( @@ -1409,7 +1441,7 @@ impl DeploymentStore { panic!("revert_block_operations must revert only backward, you are trying to revert forward going from subgraph block {} to new block {}", deployment_head, block_ptr_to); } - self.rewind_with_conn(&conn, site, block_ptr_to, firehose_cursor) + self.rewind_or_truncate_with_conn(&conn, site, block_ptr_to, firehose_cursor, false) } pub(crate) async fn deployment_state_from_id( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index f5a79061c8a..30fbb04e4f1 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -863,6 +863,13 @@ impl Layout { Ok(count) } + pub fn truncate_tables(&self, conn: &PgConnection) -> Result { + for table in self.tables.values() { + conn.execute(&format!("TRUNCATE TABLE {}", table.qualified_name))?; + } + Ok(StoreEvent::new(vec![])) + } + /// Revert the block with number `block` and all blocks with higher /// numbers. After this operation, only entity versions inserted or /// updated at blocks with numbers strictly lower than `block` will diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 0529b1c9954..21cfa6f58af 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -950,6 +950,12 @@ impl SubgraphStoreInner { self.send_store_event(&event) } + pub fn truncate(&self, id: DeploymentHash, block_ptr_to: BlockPtr) -> Result<(), StoreError> { + let (store, site) = self.store(&id)?; + let event = store.truncate(site, block_ptr_to)?; + self.send_store_event(&event) + } + pub(crate) async fn get_proof_of_indexing( &self, id: &DeploymentHash, @@ -1154,6 +1160,15 @@ impl SubgraphStoreInner { let src_store = self.for_site(site)?; src_store.load_deployment(site) } + + pub fn load_deployment_by_id( + &self, + id: DeploymentId, + ) -> Result { + let site = self.find_site(id)?; + let src_store = self.for_site(&site)?; + src_store.load_deployment(&site) + } } const STATE_ENS_NOT_CHECKED: u8 = 0; From 0f7d34ec7dd3d63f9f382a495bafa6d5a0033ed7 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Wed, 17 May 2023 16:48:41 +0200 Subject: [PATCH 0215/2104] `v0.31.0-rc.0` release notes (#4593) * cargo: update workspace crates' version to v0.31.0 * news: v0.31.0 release notes * news: fix wording in release notes entry * news: add links to latest gas pricing PRs * NEWS.md: small change to wording --- Cargo.lock | 42 ++++++++++++++++----------------- Cargo.toml | 2 +- NEWS.md | 68 ++++++++++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 83 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dbd524de31..224374bceac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1537,7 +1537,7 @@ dependencies = [ [[package]] name = "graph" -version = "0.30.0" +version = "0.31.0" dependencies = [ "Inflector", "anyhow", @@ -1603,7 +1603,7 @@ dependencies = [ [[package]] name = "graph-chain-arweave" -version = "0.30.0" +version = "0.31.0" dependencies = [ "base64-url", "diesel", @@ -1619,7 +1619,7 @@ dependencies = [ [[package]] name = "graph-chain-common" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "heck 0.4.1", @@ -1629,7 +1629,7 @@ dependencies = [ [[package]] name = "graph-chain-cosmos" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "graph", @@ -1645,7 +1645,7 @@ dependencies = [ [[package]] name = "graph-chain-ethereum" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "base64 0.20.0", @@ -1671,7 +1671,7 @@ dependencies = [ [[package]] name = "graph-chain-near" -version = "0.30.0" +version = "0.31.0" dependencies = [ "base64 0.20.0", "diesel", @@ -1686,7 +1686,7 @@ dependencies = [ [[package]] name = "graph-chain-substreams" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "async-stream", @@ -1712,7 +1712,7 @@ dependencies = [ [[package]] name = "graph-core" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "async-stream", @@ -1744,7 +1744,7 @@ dependencies = [ [[package]] name = "graph-graphql" -version = "0.30.0" +version = "0.31.0" dependencies = [ "Inflector", "anyhow", @@ -1763,7 +1763,7 @@ dependencies = [ [[package]] name = "graph-node" -version = "0.30.0" +version = "0.31.0" dependencies = [ "clap", "diesel", @@ -1800,7 +1800,7 @@ dependencies = [ [[package]] name = "graph-runtime-derive" -version = "0.30.0" +version = "0.31.0" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -1810,7 +1810,7 @@ dependencies = [ [[package]] name = "graph-runtime-test" -version = "0.30.0" +version = "0.31.0" dependencies = [ "graph", "graph-chain-ethereum", @@ -1825,7 +1825,7 @@ dependencies = [ [[package]] name = "graph-runtime-wasm" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "async-trait", @@ -1851,7 +1851,7 @@ dependencies = [ [[package]] name = "graph-server-http" -version = "0.30.0" +version = "0.31.0" dependencies = [ "futures 0.1.31", "graph", @@ -1865,7 +1865,7 @@ dependencies = [ [[package]] name = "graph-server-index-node" -version = "0.30.0" +version = "0.31.0" dependencies = [ "blake3 1.3.3", "either", @@ -1885,7 +1885,7 @@ dependencies = [ [[package]] name = "graph-server-json-rpc" -version = "0.30.0" +version = "0.31.0" dependencies = [ "graph", "jsonrpsee", @@ -1894,7 +1894,7 @@ dependencies = [ [[package]] name = "graph-server-metrics" -version = "0.30.0" +version = "0.31.0" dependencies = [ "graph", "hyper", @@ -1902,7 +1902,7 @@ dependencies = [ [[package]] name = "graph-server-websocket" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "futures 0.1.31", @@ -1918,7 +1918,7 @@ dependencies = [ [[package]] name = "graph-store-postgres" -version = "0.30.0" +version = "0.31.0" dependencies = [ "Inflector", "anyhow", @@ -1953,7 +1953,7 @@ dependencies = [ [[package]] name = "graph-tests" -version = "0.30.0" +version = "0.31.0" dependencies = [ "anyhow", "assert-json-diff", @@ -4408,7 +4408,7 @@ dependencies = [ [[package]] name = "test-store" -version = "0.30.0" +version = "0.31.0" dependencies = [ "diesel", "graph", diff --git a/Cargo.toml b/Cargo.toml index 34f001ca5e4..ea918860e35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ members = [ ] [workspace.package] -version = "0.30.0" +version = "0.31.0" edition = "2021" authors = ["The Graph core developers & contributors"] readme = "README.md" diff --git a/NEWS.md b/NEWS.md index 7099bc40711..176faf76aef 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,17 +2,71 @@ ## Unreleased -- the behavior for `graphman prune` has changed: running just `graphman - prune` will mark the subgraph for ongoing pruning in addition to - performing an initial pruning. To avoid ongoing pruning, use `graphman - prune --once` ([docs](./docs/implementation/pruning.md)) -- the materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) - `graphman rewind` has changed, block-number and block-hash are now flags instead of arguments - `graphman rewind` now has an extra flag `--start-block` which will rewind to the startBlock set in manifest or to the genesis block if no startBlock is set + + + +## v0.31.0 + +### What's new +- **Fulltext searches can now be combined with `where` filtering**, further narrowing down search results. [#4442](https://github.com/graphprotocol/graph-node/pull/4442) +- Tweaked how RPC provider limiting rules are interpreted from configurations. In particular, node IDs that don't match any rules of a provider won't have access to said provider instead of having access to it for an unlimited number of subgraphs. Read the [docs](https://github.com/graphprotocol/graph-node/pull/4353/files) for more information. [#4353](https://github.com/graphprotocol/graph-node/pull/4353) +- Introduced WASM host function `store.get_in_block`, which is a much faster variant of `store.get` limited to entities created or updated in the current block. [#4540](https://github.com/graphprotocol/graph-node/pull/4540) +- The entity cache that `graph-node` keeps around is much more efficient, meaning more cache entries fit in the same amount of memory resulting in a performance increase under a wide range of workloads. [#4485](https://github.com/graphprotocol/graph-node/pull/4485) +- The `subgraph_deploy` JSON-RPC method now accepts a `history_blocks` parameter, which indexers can use to set default amounts of history to keep. [#4564](https://github.com/graphprotocol/graph-node/pull/4564) +- IPFS requests for polling file data sources are not throttled anymore (also known as concurrency or burst limiting), only rate-limited. [#4570](https://github.com/graphprotocol/graph-node/pull/4570) +- Exponential requests backoff when retrying failed subgraphs is now "jittered", smoothing out request spikes. [#4476](https://github.com/graphprotocol/graph-node/pull/4476) +- RPC provider responses that decrease the chain head block number (non-monotonic) are now ignored, increasing resiliency against inconsistent provider data. [#4354](https://github.com/graphprotocol/graph-node/pull/4354) +- It's now possible to to have a Firehose-only chain with no RPC provider at all in the configuration. [#4508](https://github.com/graphprotocol/graph-node/pull/4508), [#4553](https://github.com/graphprotocol/graph-node/pull/4553) +- The materialized views in the `info` schema (`table_sizes`, `subgraph_sizes`, and `chain_sizes`) that provide information about the size of various database objects are now automatically refreshed every 6 hours. [#4461](https://github.com/graphprotocol/graph-node/pull/4461) +- Adapter selection now takes error rates into account, preferring adapters with lower error rates. [#4468](https://github.com/graphprotocol/graph-node/pull/4468) +- The substreams protocol has been updated to `sf.substreams.rpc.v2.Stream/Blocks`. [#4556](https://github.com/graphprotocol/graph-node/pull/4556) +- Removed support for `GRAPH_ETHEREUM_IS_FIREHOSE_PREFERRED`, `REVERSIBLE_ORDER_BY_OFF`, and `GRAPH_STORE_CONNECTION_TRY_ALWAYS` env. variables. [#4375](https://github.om/graphprotocol/graph-node/pull/4375), [#4436](https://github.com/graphprotocol/graph-node/pull/4436) + +### Bug fixes +- Fixed a bug that would cause subgraphs to fail with a `subgraph writer poisoned by previous error` message following certain database errors. [#4533](https://github.com/graphprotocol/graph-node/pull/4533) +- Fixed a bug that would cause subgraphs to fail with a `store error: no connection to the server` message when database connection e.g. gets killed. [#4435](https://github.com/graphprotocol/graph-node/pull/4435) +- The `subgraph_reassign` JSON-RPC method doesn't fail anymore when multiple deployment copies are found: only the active copy is reassigned, the others are ignored. [#4395](https://github.com/graphprotocol/graph-node/pull/4395) +- Fixed a bug that would cause `on_sync` handlers on copied deployments to fail with the message `Subgraph instance failed to run: deployment not found [...]`. [#4396](https://github.com/graphprotocol/graph-node/pull/4396) +- Fixed a bug that would cause the copying or grafting of a subgraph while pruning it to incorrectly set `earliest_block` in the destination deployment. [#4502](https://github.com/graphprotocol/graph-node/pull/4502) +- Handler timeouts would sometimes be reported as deterministic errors with the error message `Subgraph instance failed to run: Failed to call 'asc_type_id' with [...] wasm backtrace [...]`; this error is now nondeterministic and recoverable. [#4475](https://github.com/graphprotocol/graph-node/pull/4475) +- Fixed faulty exponential request backoff behavior after many minutes of failed requests, caused by an overflow. [#4421](https://github.com/graphprotocol/graph-node/pull/4421) +- `json.fromBytes` and all `BigInt` operations now require more gas, protecting against malicious subgraphs. [#4594](https://github.com/graphprotocol/graph-node/pull/4594), [#4595](https://github.com/graphprotocol/graph-node/pull/4595) +- Fixed faulty `startBlock` selection logic in substreams. [#4463](https://github.com/graphprotocol/graph-node/pull/4463) + +### Graphman +- The behavior for `graphman prune` has changed: running just `graphman prune` will mark the subgraph for ongoing pruning in addition to performing an initial pruning. To avoid ongoing pruning, use `graphman prune --once` ([docs](./docs/implementation/pruning.md)). [#4429](https://github.com/graphprotocol/graph-node/pull/4429) +- The env. var. `GRAPH_STORE_HISTORY_COPY_THRESHOLD` –which serves as a configuration setting for `graphman prune`– has been renamed to `GRAPH_STORE_HISTORY_REBUILD_THRESHOLD`. [#4505](https://github.com/graphprotocol/graph-node/pull/4505) +- You can now list all existing deployments via `graphman info --all`. [#4347](https://github.com/graphprotocol/graph-node/pull/4347) +- The command `graphman chain call-cache remove` now requires `--remove-entire-cache` as an explicit flag, protecting against accidental destructive command invocations. [#4397](https://github.com/graphprotocol/graph-node/pull/4397) +- `graphman copy create` accepts two new flags, `--activate` and `--replace`, which make moving of subgraphs across shards much easier. [#4374](https://github.com/graphprotocol/graph-node/pull/4374) +- The log level for `graphman` is now set via `GRAPHMAN_LOG` or command line instead of `GRAPH_LOG`. [#4462](https://github.com/graphprotocol/graph-node/pull/4462) +- `graphman reassign` now emits a warning when it suspects a typo in node IDs. [#4377](https://github.com/graphprotocol/graph-node/pull/4377) + +### Metrics and logging +- Subgraph syncing time metric `deployment_sync_secs` now stops updating once the subgraph has synced. [#4489](https://github.com/graphprotocol/graph-node/pull/4489) +- New `endpoint_request` metric to track error rates of different providers. [#4490](https://github.com/graphprotocol/graph-node/pull/4490), [#4504](https://github.com/graphprotocol/graph-node/pull/4504), [#4430](https://github.com/graphprotocol/graph-node/pull/4430) +- New metrics `chain_head_cache_num_blocks`, `chain_head_cache_oldest_block`, `chain_head_cache_latest_block`, `chain_head_cache_hits`, and `chain_head_cache_misses` to monitor the effectiveness of `graph-node`'s in-memory chain head caches. [#4440](https://github.com/graphprotocol/graph-node/pull/4440) +- The subgraph error message `store error: Failed to remove entities` is now more detailed and contains more useful information. [#4367](https://github.com/graphprotocol/graph-node/pull/4367) +- `eth_call` logs now include the provider string. [#4548](https://github.com/graphprotocol/graph-node/pull/4548) +- Tweaks and small changes to log messages when resolving data sources, mappings, and manifests. [#4399](https://github.com/graphprotocol/graph-node/pull/4399) +- `FirehoseBlockStream` and `FirehoseBlockIngestor` now log adapter names. [#4411](https://github.com/graphprotocol/graph-node/pull/4411) +- The `deployment_count` metric has been split into `deployment_running_count` and `deployment_count`. [#4401](https://github.com/grahprotocol/graph-node/pull/4401), [#4398](https://github.com/graphprotocol/graph-node/pul/4398) + + +**Full Changelog**: https://github.com/graphprotocol/graph-node/compare/v0.30.0...aa6677a38 ## v0.30.0 From d9550eb0439e3a7857ec6d43fecc6aade8a4a9da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 16:51:10 +0200 Subject: [PATCH 0216/2104] build(deps): bump tokio from 1.28.0 to 1.28.1 (#4617) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.0 to 1.28.1. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.28.0...tokio-1.28.1) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 224374bceac..aa757e690ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4534,9 +4534,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "bytes", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 6a8b645d307..ad1b3fd6c13 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -44,7 +44,7 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" -tokio = { version = "1.28.0", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.28.1", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-retry = "0.3.0" url = "2.3.1" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index b5f40a9e757..54912491cff 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,7 +22,7 @@ hyper = "0.14" serde = "1.0" serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } -tokio = { version = "1.28.0", features = ["rt", "macros", "process"] } +tokio = { version = "1.28.1", features = ["rt", "macros", "process"] } uuid = { version = "1.3.2", features = ["v4"] } [dev-dependencies] From bc4103a2c99be29fd1ae1a663b8e7e46d99e7f04 Mon Sep 17 00:00:00 2001 From: Filippo Neysofu Costa Date: Wed, 17 May 2023 17:09:15 +0200 Subject: [PATCH 0217/2104] core, graph: de-`Arc` `IpfsClient` (#4603) * graph: make reqwest::Client CheapClone-able * core,graph: de-Arc IpfsClient and reqwest::Client * graph: add comments for reqwest::Client: CheapClone --- core/src/link_resolver.rs | 8 ++++---- graph/src/cheap_clone.rs | 2 ++ graph/src/ipfs_client.rs | 8 +++++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/core/src/link_resolver.rs b/core/src/link_resolver.rs index cebc39d5d6a..364dbd76f10 100644 --- a/core/src/link_resolver.rs +++ b/core/src/link_resolver.rs @@ -49,13 +49,13 @@ fn retry_policy( /// case multiple clients respond in a timely manner. In addition, we may make /// good use of the stat returned. async fn select_fastest_client_with_stat( - clients: Arc>>, + clients: Arc>, logger: Logger, api: StatApi, path: String, timeout: Duration, do_retry: bool, -) -> Result<(u64, Arc), Error> { +) -> Result<(u64, IpfsClient), Error> { let mut err: Option = None; let mut stats: FuturesUnordered<_> = clients @@ -108,7 +108,7 @@ fn restrict_file_size(path: &str, size: u64, max_file_bytes: usize) -> Result<() #[derive(Clone)] pub struct LinkResolver { - clients: Arc>>, + clients: Arc>, cache: Arc>>>, timeout: Duration, retry: bool, @@ -118,7 +118,7 @@ pub struct LinkResolver { impl LinkResolver { pub fn new(clients: Vec, env_vars: Arc) -> Self { Self { - clients: Arc::new(clients.into_iter().map(Arc::new).collect()), + clients: Arc::new(clients.into_iter().collect()), cache: Arc::new(Mutex::new(LruCache::with_capacity( env_vars.mappings.max_ipfs_cache_size as usize, ))), diff --git a/graph/src/cheap_clone.rs b/graph/src/cheap_clone.rs index 7deff5cd681..ed375b00079 100644 --- a/graph/src/cheap_clone.rs +++ b/graph/src/cheap_clone.rs @@ -28,6 +28,8 @@ impl CheapClone for Box {} impl CheapClone for std::pin::Pin {} impl CheapClone for Option {} impl CheapClone for Logger {} +// reqwest::Client uses Arc internally, so it is CheapClone. +impl CheapClone for reqwest::Client {} // Pool is implemented as a newtype over Arc, // So it is CheapClone. diff --git a/graph/src/ipfs_client.rs b/graph/src/ipfs_client.rs index 3fe4f578386..6d921f22837 100644 --- a/graph/src/ipfs_client.rs +++ b/graph/src/ipfs_client.rs @@ -110,7 +110,9 @@ pub struct AddResponse { #[derive(Clone)] pub struct IpfsClient { base: Arc, - client: Arc, + // reqwest::Client doesn't need to be `Arc` because it has one internally + // already. + client: reqwest::Client, } impl CheapClone for IpfsClient { @@ -125,14 +127,14 @@ impl CheapClone for IpfsClient { impl IpfsClient { pub fn new(base: &str) -> Result { Ok(IpfsClient { - client: Arc::new(reqwest::Client::new()), + client: reqwest::Client::new(), base: Arc::new(Uri::from_str(base)?), }) } pub fn localhost() -> Self { IpfsClient { - client: Arc::new(reqwest::Client::new()), + client: reqwest::Client::new(), base: Arc::new(Uri::from_str("http://localhost:5001").unwrap()), } } From 77c8d34a36dd4f62c851ba372efca3899eb290a2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 17 May 2023 09:02:33 -0700 Subject: [PATCH 0218/2104] store: Do not create GIN indexes for numeric arrays --- store/postgres/src/relational/ddl.rs | 37 ++++++++++++++-------- store/postgres/src/relational/ddl_tests.rs | 4 +-- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index 5b20be43d29..17391c53adc 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -197,22 +197,33 @@ impl Table { } fn create_attribute_indexes(&self, out: &mut String) -> fmt::Result { - // Create indexes. Skip columns whose type is an array of enum, - // since there is no good way to index them with Postgres 9.6. - // Once we move to Postgres 11, we can enable that - // (tracked in graph-node issue #1330) - for (i, column) in self + // Create indexes. + + // Skip columns whose type is an array of enum, since there is no + // good way to index them with Postgres 9.6. Once we move to + // Postgres 11, we can enable that (tracked in graph-node issue + // #1330) + let not_enum_list = |col: &&Column| !(col.is_list() && col.is_enum()); + + // We create a unique index on `id` in `create_table` + // and don't need an explicit attribute index + let not_immutable_pk = |col: &&Column| !(self.immutable && col.is_primary_key()); + + // GIN indexes on numeric types are not very useful, but expensive + // to build + let not_numeric_list = |col: &&Column| { + !(col.is_list() + && [ColumnType::BigDecimal, ColumnType::BigInt, ColumnType::Int] + .contains(&col.column_type)) + }; + let columns = self .columns .iter() - .filter(|col| !(col.is_list() && col.is_enum())) - .enumerate() - { - if self.immutable && column.is_primary_key() { - // We create a unique index on `id` in `create_table` - // and don't need an explicit attribute index - continue; - } + .filter(not_enum_list) + .filter(not_immutable_pk) + .filter(not_numeric_list); + for (i, column) in columns.enumerate() { let (method, index_expr) = if column.is_reference() && !column.is_list() { // For foreign keys, index the key together with the block range // since we almost always also have a block_range clause in diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index 487de02ae56..6ea63bae9c9 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -378,9 +378,9 @@ create table "sgd0815"."song" ( create index brin_song on "sgd0815"."song" using brin(block$, vid); -create index attr_2_1_song_title +create index attr_2_0_song_title on "sgd0815"."song" using btree(left("title", 256)); -create index attr_2_2_song_written_by +create index attr_2_1_song_written_by on "sgd0815"."song" using btree("written_by", block$); create table "sgd0815"."song_stat" ( From 91896e8b6679e6e538b9446c8ef3f2a3a8905197 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 17 May 2023 15:46:27 -0700 Subject: [PATCH 0219/2104] store: Use minmax_multi_ops for brin indexes when available --- store/postgres/src/catalog.rs | 47 ++++++++++++++++++++-- store/postgres/src/relational.rs | 3 +- store/postgres/src/relational/ddl.rs | 16 ++++---- store/postgres/src/relational/ddl_tests.rs | 28 ++++++------- store/postgres/src/relational/prune.rs | 6 ++- 5 files changed, 73 insertions(+), 27 deletions(-) diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index 1cd4e51e165..db5ba421134 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -182,6 +182,10 @@ pub struct Catalog { /// Set of tables which have an explicit causality region column. pub(crate) entities_with_causality_region: BTreeSet, + + /// Whether the database supports `int4_minmax_multi_ops` etc. + /// See the [Postgres docs](https://www.postgresql.org/docs/15/brin-builtin-opclasses.html) + has_minmax_multi_ops: bool, } impl Catalog { @@ -194,21 +198,27 @@ impl Catalog { ) -> Result { let text_columns = get_text_columns(conn, &site.namespace)?; let use_poi = supports_proof_of_indexing(conn, &site.namespace)?; + let has_minmax_multi_ops = has_minmax_multi_ops(conn)?; + Ok(Catalog { site, text_columns, use_poi, use_bytea_prefix, entities_with_causality_region: entities_with_causality_region.into_iter().collect(), + has_minmax_multi_ops, }) } /// Return a new catalog suitable for creating a new subgraph pub fn for_creation( + conn: &PgConnection, site: Arc, entities_with_causality_region: BTreeSet, - ) -> Self { - Catalog { + ) -> Result { + let has_minmax_multi_ops = has_minmax_multi_ops(conn)?; + + Ok(Catalog { site, text_columns: HashMap::default(), // DDL generation creates a POI table @@ -217,7 +227,8 @@ impl Catalog { // see: attr-bytea-prefix use_bytea_prefix: true, entities_with_causality_region, - } + has_minmax_multi_ops, + }) } /// Make a catalog as if the given `schema` did not exist in the database @@ -233,6 +244,7 @@ impl Catalog { use_poi: false, use_bytea_prefix: true, entities_with_causality_region, + has_minmax_multi_ops: false, }) } @@ -244,6 +256,19 @@ impl Catalog { .map(|cols| cols.contains(column.as_str())) .unwrap_or(false) } + + /// The operator classes to use for BRIN indexes. The first entry if the + /// operator class for `int4`, the second is for `int8` + pub fn minmax_ops(&self) -> (&str, &str) { + const MINMAX_OPS: (&str, &str) = ("int4_minmax_ops", "int8_minmax_ops"); + const MINMAX_MULTI_OPS: (&str, &str) = ("int4_minmax_multi_ops", "int8_minmax_multi_ops"); + + if self.has_minmax_multi_ops { + MINMAX_MULTI_OPS + } else { + MINMAX_OPS + } + } } fn get_text_columns( @@ -859,3 +884,19 @@ pub(crate) fn needs_autoanalyze( Ok(tables) } + +/// Check whether the database for `conn` supports the `minmax_multi_ops` +/// introduced in Postgres 14 +fn has_minmax_multi_ops(conn: &PgConnection) -> Result { + const QUERY: &str = "select count(*) = 2 as has_ops \ + from pg_opclass \ + where opcname in('int8_minmax_multi_ops', 'int4_minmax_multi_ops')"; + + #[derive(Queryable, QueryableByName)] + struct Ops { + #[sql_type = "Bool"] + has_ops: bool, + } + + Ok(sql_query(QUERY).get_result::(conn)?.has_ops) +} diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 30fbb04e4f1..3ec11dd0d5f 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -436,7 +436,8 @@ impl Layout { schema: &InputSchema, entities_with_causality_region: BTreeSet, ) -> Result { - let catalog = Catalog::for_creation(site.cheap_clone(), entities_with_causality_region); + let catalog = + Catalog::for_creation(conn, site.cheap_clone(), entities_with_causality_region)?; let layout = Self::new(site, schema, catalog)?; let sql = layout .as_ddl() diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index 17391c53adc..8c6a2ceab90 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -11,7 +11,7 @@ use crate::relational::{ VID_COLUMN, }; -use super::{Column, Layout, SqlName, Table}; +use super::{Catalog, Column, Layout, SqlName, Table}; // In debug builds (for testing etc.) unconditionally create exclusion constraints, in release // builds for production, skip them @@ -38,7 +38,7 @@ impl Layout { tables.sort_by_key(|table| table.position); // Output 'create table' statements for all tables for table in tables { - table.as_ddl(&mut out)?; + table.as_ddl(&self.catalog, &mut out)?; } Ok(out) @@ -142,13 +142,15 @@ impl Table { } } - fn create_time_travel_indexes(&self, out: &mut String) -> fmt::Result { + fn create_time_travel_indexes(&self, catalog: &Catalog, out: &mut String) -> fmt::Result { + let (int4, int8) = catalog.minmax_ops(); + if self.immutable { write!( out, "create index brin_{table_name}\n \ on {qname}\n \ - using brin({block}, vid);\n", + using brin({block} {int4}, vid {int8});\n", table_name = self.name, qname = self.qualified_name, block = BLOCK_COLUMN @@ -177,7 +179,7 @@ impl Table { // entities are stored. write!(out,"create index brin_{table_name}\n \ on {qname}\n \ - using brin(lower(block_range), coalesce(upper(block_range), {block_max}), vid);\n", + using brin(lower(block_range) {int4}, coalesce(upper(block_range), {block_max}) {int4}, vid {int8});\n", table_name = self.name, qname = self.qualified_name, block_max = BLOCK_NUMBER_MAX)?; @@ -286,9 +288,9 @@ impl Table { /// /// See the unit tests at the end of this file for the actual DDL that /// gets generated - pub(crate) fn as_ddl(&self, out: &mut String) -> fmt::Result { + pub(crate) fn as_ddl(&self, catalog: &Catalog, out: &mut String) -> fmt::Result { self.create_table(out)?; - self.create_time_travel_indexes(out)?; + self.create_time_travel_indexes(catalog, out)?; self.create_attribute_indexes(out) } diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index 6ea63bae9c9..ea4bdf57390 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -223,7 +223,7 @@ create type sgd0815."size" add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_thing on "sgd0815"."thing" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index thing_block_range_closed on "sgd0815"."thing"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -250,7 +250,7 @@ create index attr_0_1_thing_big_thing add constraint scalar_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_scalar on "sgd0815"."scalar" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index scalar_block_range_closed on "sgd0815"."scalar"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -283,7 +283,7 @@ create index attr_1_7_scalar_color add constraint file_thing_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_file_thing on "sgd0815"."file_thing" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index file_thing_block_range_closed on "sgd0815"."file_thing"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -331,7 +331,7 @@ alter table "sgd0815"."musician" add constraint musician_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_musician on "sgd0815"."musician" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index musician_block_range_closed on "sgd0815"."musician"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -355,7 +355,7 @@ alter table "sgd0815"."band" add constraint band_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_band on "sgd0815"."band" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index band_block_range_closed on "sgd0815"."band"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -377,7 +377,7 @@ create table "sgd0815"."song" ( ); create index brin_song on "sgd0815"."song" - using brin(block$, vid); + using brin(block$ int4_minmax_ops, vid int8_minmax_ops); create index attr_2_0_song_title on "sgd0815"."song" using btree(left("title", 256)); create index attr_2_1_song_written_by @@ -393,7 +393,7 @@ alter table "sgd0815"."song_stat" add constraint song_stat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_song_stat on "sgd0815"."song_stat" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index song_stat_block_range_closed on "sgd0815"."song_stat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -435,7 +435,7 @@ alter table "sgd0815"."animal" add constraint animal_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_animal on "sgd0815"."animal" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index animal_block_range_closed on "sgd0815"."animal"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -453,7 +453,7 @@ alter table "sgd0815"."forest" add constraint forest_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_forest on "sgd0815"."forest" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index forest_block_range_closed on "sgd0815"."forest"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -471,7 +471,7 @@ alter table "sgd0815"."habitat" add constraint habitat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_habitat on "sgd0815"."habitat" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index habitat_block_range_closed on "sgd0815"."habitat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -527,7 +527,7 @@ alter table "sgd0815"."animal" add constraint animal_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_animal on "sgd0815"."animal" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index animal_block_range_closed on "sgd0815"."animal"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -552,7 +552,7 @@ alter table "sgd0815"."forest" create index brin_forest on "sgd0815"."forest" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index forest_block_range_closed on "sgd0815"."forest"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -570,7 +570,7 @@ alter table "sgd0815"."habitat" add constraint habitat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_habitat on "sgd0815"."habitat" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index habitat_block_range_closed on "sgd0815"."habitat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; @@ -606,7 +606,7 @@ alter table "sgd0815"."thing" add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_thing on "sgd0815"."thing" - using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); + using brin(lower(block_range) int4_minmax_ops, coalesce(upper(block_range), 2147483647) int4_minmax_ops, vid int8_minmax_ops); create index thing_block_range_closed on "sgd0815"."thing"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 2a848cc0c2f..c068bb4a25a 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -23,7 +23,7 @@ use crate::{ relational::{Table, VID_COLUMN}, }; -use super::{Layout, Namespace}; +use super::{Catalog, Layout, Namespace}; // Additions to `Table` that are useful for pruning impl Table { @@ -84,6 +84,7 @@ impl TablePair { src: Arc
, src_nsp: Namespace, dst_nsp: Namespace, + catalog: &Catalog, ) -> Result { let dst = src.new_like(&dst_nsp, &src.name); @@ -91,7 +92,7 @@ impl TablePair { if catalog::table_exists(conn, dst_nsp.as_str(), &dst.name)? { writeln!(query, "truncate table {};", dst.qualified_name)?; } else { - dst.as_ddl(&mut query)?; + dst.as_ddl(catalog, &mut query)?; } conn.batch_execute(&query)?; @@ -423,6 +424,7 @@ impl Layout { table.cheap_clone(), self.site.namespace.clone(), dst_nsp.clone(), + &self.catalog, )?; // Copy final entities. This can happen in parallel to indexing as // that part of the table will not change From c9beddd17c4f855d983db5d84b788116339cc67f Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Thu, 18 May 2023 17:58:22 +0100 Subject: [PATCH 0220/2104] perf: Avoid cloning cached entities (#4624) This should be a measurable performance improvement. And the lifetime adjustments surprisingly didn't cause much trouble. The only case where a clone is still required is if the entity needs to be mutated by applying pending updates, so we use a `Cow` to handle that. The entity sorting step is also faster now, because it no longer requires cloning the entity keys, rather we just sort the interned values. --- graph/src/components/store/entity_cache.rs | 48 +++++---- graph/src/components/store/mod.rs | 14 ++- graph/src/data/store/mod.rs | 6 ++ runtime/test/src/test.rs | 3 +- runtime/wasm/src/host_exports.rs | 21 ++-- runtime/wasm/src/module/mod.rs | 102 +++++++++++++------ runtime/wasm/src/to_from/external.rs | 12 +++ runtime/wasm/src/to_from/mod.rs | 13 +++ store/test-store/tests/graph/entity_cache.rs | 10 +- 9 files changed, 151 insertions(+), 78 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index adfa1a495fd..9dee1b84756 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -1,8 +1,10 @@ use anyhow::anyhow; +use std::borrow::Cow; use std::collections::HashMap; use std::fmt::{self, Debug}; use std::sync::Arc; +use crate::cheap_clone::CheapClone; use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; use crate::data::store::IntoEntityIterator; use crate::prelude::ENV_VARS; @@ -114,26 +116,39 @@ impl EntityCache { self.handler_updates.clear(); } - pub fn get(&mut self, key: &EntityKey, scope: GetScope) -> Result, StoreError> { + pub fn get( + &mut self, + key: &EntityKey, + scope: GetScope, + ) -> Result>, StoreError> { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. - let mut entity = match scope { - GetScope::Store => self.current.get_entity(&*self.store, key)?, + let mut entity: Option> = match scope { + GetScope::Store => { + if !self.current.contains_key(key) { + let entity = self.store.get(key)?; + self.current.insert(key.clone(), entity); + } + // Unwrap: we just inserted the entity + self.current.get(key).unwrap().as_ref().map(Cow::Borrowed) + } GetScope::InBlock => None, }; // Always test the cache consistency in debug mode. The test only // makes sense when we were actually asked to read from the store debug_assert!(match scope { - GetScope::Store => entity == self.store.get(key).unwrap(), + GetScope::Store => entity == self.store.get(key).unwrap().map(Cow::Owned), GetScope::InBlock => true, }); if let Some(op) = self.updates.get(key).cloned() { - entity = op.apply_to(entity).map_err(|e| key.unknown_attribute(e))?; + op.apply_to(&mut entity) + .map_err(|e| key.unknown_attribute(e))?; } if let Some(op) = self.handler_updates.get(key).cloned() { - entity = op.apply_to(entity).map_err(|e| key.unknown_attribute(e))?; + op.apply_to(&mut entity) + .map_err(|e| key.unknown_attribute(e))?; } Ok(entity) } @@ -178,6 +193,7 @@ impl EntityCache { // lookup in the database and check again with an entity that merges // the existing entity with the changes if !is_valid { + let schema = self.schema.cheap_clone(); let entity = self.get(&key, GetScope::Store)?.ok_or_else(|| { anyhow!( "Failed to read entity {}[{}] back from cache", @@ -185,7 +201,7 @@ impl EntityCache { key.entity_id ) })?; - entity.validate(&self.schema, &key)?; + entity.validate(&schema, &key)?; } Ok(()) @@ -315,21 +331,3 @@ impl EntityCache { }) } } - -impl LfuCache> { - // Helper for cached lookup of an entity. - fn get_entity( - &mut self, - store: &(impl s::ReadStore + ?Sized), - key: &EntityKey, - ) -> Result, s::QueryExecutionError> { - match self.get(key) { - None => { - let entity = store.get(key)?; - self.insert(key.clone(), entity.clone()); - Ok(entity) - } - Some(data) => Ok(data.clone()), - } - } -} diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 4146be3d6d3..0a198a0dc37 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -14,7 +14,7 @@ use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; use graphql_parser::schema as s; use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; +use std::borrow::{Borrow, Cow}; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::fmt::Display; @@ -1015,16 +1015,14 @@ enum EntityOp { } impl EntityOp { - fn apply_to(self, entity: Option) -> Result, InternError> { + fn apply_to(self, entity: &mut Option>) -> Result<(), InternError> { use EntityOp::*; match (self, entity) { - (Remove, _) => Ok(None), - (Overwrite(new), _) | (Update(new), None) => Ok(Some(new)), - (Update(updates), Some(mut entity)) => { - entity.merge_remove_null_fields(updates)?; - Ok(Some(entity)) - } + (Remove, e @ _) => *e = None, + (Overwrite(new), e @ _) | (Update(new), e @ None) => *e = Some(Cow::Owned(new)), + (Update(updates), Some(entity)) => entity.to_mut().merge_remove_null_fields(updates)?, } + Ok(()) } fn accumulate(&mut self, next: EntityOp) { diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 6d7bc5f8043..3147f82fc7a 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -691,6 +691,12 @@ impl Entity { v } + pub fn sorted_ref(&self) -> Vec<(&str, &Value)> { + let mut v: Vec<_> = self.0.iter().collect(); + v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + v + } + fn check_id(&self) -> Result<(), Error> { match self.get("id") { None => Err(anyhow!( diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index c37e7c9e8d0..6e7098c2c52 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -15,6 +15,7 @@ use graph_runtime_wasm::{ }; use semver::Version; +use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::str::FromStr; use test_store::{LOGGER, STORE}; @@ -1305,7 +1306,7 @@ async fn test_store_set_id() { &mut self, entity_type: &str, id: &str, - ) -> Result, anyhow::Error> { + ) -> Result>, anyhow::Error> { let user_id = String::from(id); self.host_exports.store_get( &mut self.ctx.state, diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 0da17e2a61f..5032462043f 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -1,3 +1,4 @@ +use std::borrow::Cow; use std::collections::HashMap; use std::ops::Deref; use std::str::FromStr; @@ -251,14 +252,14 @@ impl HostExports { Ok(()) } - pub(crate) fn store_get( + pub(crate) fn store_get<'a>( &self, - state: &mut BlockState, + state: &'a mut BlockState, entity_type: String, entity_id: String, gas: &GasCounter, scope: GetScope, - ) -> Result, anyhow::Error> { + ) -> Result>, anyhow::Error> { let store_key = EntityKey { entity_type: EntityType::new(entity_type), entity_id: entity_id.into(), @@ -267,7 +268,11 @@ impl HostExports { self.check_entity_type_access(&store_key.entity_type)?; let result = state.entity_cache.get(&store_key, scope)?; - gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; + + gas.consume_host_fn(gas::STORE_GET.with_args( + complexity::Linear, + (&store_key, result.as_ref().map(|e| e.as_ref())), + ))?; Ok(result) } @@ -891,7 +896,7 @@ fn bytes_to_string(logger: &Logger, bytes: Vec) -> String { /// Expose some host functions for testing only #[cfg(debug_assertions)] pub mod test_support { - use std::{collections::HashMap, sync::Arc}; + use std::{borrow::Cow, collections::HashMap, sync::Arc}; use graph::{ blockchain::Blockchain, @@ -934,13 +939,13 @@ pub mod test_support { ) } - pub fn store_get( + pub fn store_get<'a>( &self, - state: &mut BlockState, + state: &'a mut BlockState, entity_type: String, entity_id: String, gas: &GasCounter, - ) -> Result, anyhow::Error> { + ) -> Result>, anyhow::Error> { self.0 .store_get(state, entity_type, entity_id, gas, GetScope::Store) } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index b9712e3035a..8f053536c4a 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -322,18 +322,6 @@ pub struct ExperimentalFeatures { } pub struct WasmInstanceContext { - // In the future there may be multiple memories, but currently there is only one memory per - // module. And at least AS calls it "memory". There is no uninitialized memory in Wasm, memory - // is zeroed when initialized or grown. - memory: Memory, - - // Function exported by the wasm module that will allocate the request number of bytes and - // return a pointer to the first byte of allocated space. - memory_allocate: wasmtime::TypedFunc, - - // Function wrapper for `idof` from AssemblyScript - id_of_type: Option>, - pub ctx: MappingContext, pub valid_module: Arc, pub host_metrics: Arc, @@ -342,12 +330,6 @@ pub struct WasmInstanceContext { // Used by ipfs.map. pub(crate) timeout_stopwatch: Arc>, - // First free byte in the current arena. Set on the first call to `raw_new`. - arena_start_ptr: i32, - - // Number of free bytes starting from `arena_start_ptr`. - arena_free_size: i32, - // A trap ocurred due to a possible reorg detection. pub possible_reorg: bool, @@ -355,6 +337,30 @@ pub struct WasmInstanceContext { pub deterministic_host_trap: bool, pub(crate) experimental_features: ExperimentalFeatures, + + asc_heap: AscHeapCtx, +} + +struct AscHeapCtx { + // Function wrapper for `idof` from AssemblyScript + id_of_type: Option>, + + // Function exported by the wasm module that will allocate the request number of bytes and + // return a pointer to the first byte of allocated space. + memory_allocate: wasmtime::TypedFunc, + + api_version: semver::Version, + + // In the future there may be multiple memories, but currently there is only one memory per + // module. And at least AS calls it "memory". There is no uninitialized memory in Wasm, memory + // is zeroed when initialized or grown. + memory: Memory, + + // First free byte in the current arena. Set on the first call to `raw_new`. + arena_start_ptr: i32, + + // Number of free bytes starting from `arena_start_ptr`. + arena_free_size: i32, } impl WasmInstance { @@ -713,7 +719,35 @@ fn host_export_error_from_trap(trap: Trap, context: String) -> HostExportError { } } +// This impl is a convenience that delegates to `self.asc_heap`. impl AscHeap for WasmInstanceContext { + fn raw_new(&mut self, bytes: &[u8], gas: &GasCounter) -> Result { + self.asc_heap.raw_new(bytes, gas) + } + + fn read<'a>( + &self, + offset: u32, + buffer: &'a mut [MaybeUninit], + gas: &GasCounter, + ) -> Result<&'a mut [u8], DeterministicHostError> { + self.asc_heap.read(offset, buffer, gas) + } + + fn read_u32(&self, offset: u32, gas: &GasCounter) -> Result { + self.asc_heap.read_u32(offset, gas) + } + + fn api_version(&self) -> Version { + self.asc_heap.api_version() + } + + fn asc_type_id(&mut self, type_id_index: IndexForAscTypeId) -> Result { + self.asc_heap.asc_type_id(type_id_index) + } +} + +impl AscHeap for AscHeapCtx { fn raw_new(&mut self, bytes: &[u8], gas: &GasCounter) -> Result { // The cost of writing to wasm memory from the host is the same as of writing from wasm // using load instructions. @@ -736,7 +770,7 @@ impl AscHeap for WasmInstanceContext { self.arena_start_ptr = self.memory_allocate.call(arena_size).unwrap(); self.arena_free_size = arena_size; - match &self.ctx.host_exports.api_version { + match &self.api_version { version if *version <= Version::new(0, 0, 4) => {} _ => { // This arithmetic is done because when you call AssemblyScripts's `__alloc` @@ -806,7 +840,7 @@ impl AscHeap for WasmInstanceContext { } fn api_version(&self) -> Version { - self.ctx.host_exports.api_version.clone() + self.api_version.clone() } fn asc_type_id(&mut self, type_id_index: IndexForAscTypeId) -> Result { @@ -861,16 +895,19 @@ impl WasmInstanceContext { }; Ok(WasmInstanceContext { - memory_allocate, - id_of_type, - memory, + asc_heap: AscHeapCtx { + memory_allocate, + memory, + arena_start_ptr: 0, + arena_free_size: 0, + api_version: ctx.host_exports.api_version.clone(), + id_of_type, + }, ctx, valid_module, host_metrics, timeout, timeout_stopwatch, - arena_free_size: 0, - arena_start_ptr: 0, possible_reorg: false, deterministic_host_trap: false, experimental_features, @@ -917,16 +954,19 @@ impl WasmInstanceContext { }; Ok(WasmInstanceContext { - id_of_type, - memory_allocate, - memory, + asc_heap: AscHeapCtx { + memory_allocate, + memory, + arena_start_ptr: 0, + arena_free_size: 0, + api_version: ctx.host_exports.api_version.clone(), + id_of_type, + }, ctx, valid_module, host_metrics, timeout, timeout_stopwatch, - arena_free_size: 0, - arena_start_ptr: 0, possible_reorg: false, deterministic_host_trap: false, experimental_features, @@ -968,7 +1008,7 @@ impl WasmInstanceContext { .host_metrics .stopwatch .start_section("store_get_asc_new"); - asc_new(self, &entity.sorted(), gas)? + asc_new(&mut self.asc_heap, &entity.sorted_ref(), gas)? } None => match &self.ctx.debug_fork { Some(fork) => { diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index 4e36823bc77..d8eca138743 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -340,6 +340,18 @@ impl ToAscObj for Vec<(Word, store::Value)> { } } +impl ToAscObj for Vec<(&str, &store::Value)> { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + Ok(AscTypedMap { + entries: asc_new(heap, self.as_slice(), gas)?, + }) + } +} + impl ToAscObj>> for Vec> { fn to_asc_obj( &self, diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index fb919a3979d..0f53af8f8d1 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -65,6 +65,19 @@ impl ToAscObj for str { } } +impl ToAscObj for &str { + fn to_asc_obj( + &self, + heap: &mut H, + _gas: &GasCounter, + ) -> Result { + Ok(AscString::new( + &self.encode_utf16().collect::>(), + heap.api_version(), + )?) + } +} + impl ToAscObj for String { fn to_asc_obj( &self, diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 974224b1142..1f0211dda45 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -717,16 +717,16 @@ fn scoped_get() { // For the new entity, we can retrieve it with either scope let act5 = cache.get(&key5, GetScope::InBlock).unwrap(); - assert_eq!(Some(&wallet5), act5.as_ref()); + assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); let act5 = cache.get(&key5, GetScope::Store).unwrap(); - assert_eq!(Some(&wallet5), act5.as_ref()); + assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); // For an entity in the store, we can not get it `InBlock` but with // `Store` let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); assert_eq!(None, act1); let act1 = cache.get(&key1, GetScope::Store).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref()); + assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); // Even after reading from the store, the entity is not visible with // `InBlock` let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); @@ -736,9 +736,9 @@ fn scoped_get() { wallet1.set("balance", 70).unwrap(); cache.set(key1.clone(), wallet1.clone()).unwrap(); let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref()); + assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); let act1 = cache.get(&key1, GetScope::Store).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref()); + assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); }) } From a25233b58331c580a074c4f40081de5ca180da2f Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 17 May 2023 19:49:48 +0530 Subject: [PATCH 0221/2104] runtime: catch panic while handling trigger, fixes oneshot cancelled errors --- runtime/wasm/src/mapping.rs | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index a006030343a..0b9db750506 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -9,8 +9,9 @@ use graph::data_source::{MappingTrigger, TriggerWithHandler}; use graph::prelude::*; use graph::runtime::gas::Gas; use std::collections::BTreeMap; +use std::panic::AssertUnwindSafe; use std::sync::Arc; -use std::thread; +use std::{panic, thread}; /// Spawn a wasm module in its own thread. pub fn spawn_module( @@ -52,14 +53,32 @@ where result_sender, } = request; - let result = instantiate_module_and_handle_trigger( - valid_module.cheap_clone(), - ctx, - trigger, - host_metrics.cheap_clone(), - timeout, - experimental_features, - ); + let result = panic::catch_unwind(AssertUnwindSafe(|| { + instantiate_module_and_handle_trigger( + valid_module.cheap_clone(), + ctx, + trigger, + host_metrics.cheap_clone(), + timeout, + experimental_features, + ) + })); + + let result = match result { + Ok(result) => result, + Err(panic_info) => { + let err_msg = if let Some(payload) = panic_info + .downcast_ref::() + .map(String::as_str) + .or(panic_info.downcast_ref::<&str>().copied()) + { + anyhow!("Subgraph panicked with message: {}", payload) + } else { + anyhow!("Subgraph panicked with an unknown payload.") + }; + Err(MappingError::Unknown(err_msg)) + } + }; result_sender .send(result) From eb9c3552756ccdae9c3c6a7d9d4b1dc86a25f512 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 6 May 2023 09:28:10 +0200 Subject: [PATCH 0222/2104] graph: Make debug output for Entity, EntityKey/Type more readable --- graph/src/components/store/mod.rs | 18 ++++++++++++++++-- graph/src/data/store/mod.rs | 12 +++++++++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 0a198a0dc37..8bfa55e796f 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -34,7 +34,7 @@ use crate::{constraint_violation, prelude::*}; /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityType(Word); impl EntityType { @@ -109,6 +109,11 @@ impl ToSql for EntityType { } } +impl std::fmt::Debug for EntityType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "EntityType({})", self.0) + } +} #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityFilterDerivative(bool); @@ -124,7 +129,7 @@ impl EntityFilterDerivative { /// Key by which an individual entity in the store can be accessed. Stores /// only the entity type and id. The deployment must be known from context. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityKey { /// Name of the entity type. pub entity_type: EntityType, @@ -146,6 +151,15 @@ impl EntityKey { } } +impl std::fmt::Debug for EntityKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "EntityKey({}[{}], cr={})", + self.entity_type, self.entity_id, self.causality_region + ) + } +} #[derive(Debug, Clone)] pub struct LoadRelatedRequest { /// Name of the entity type. diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 3147f82fc7a..8862595adb6 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -602,7 +602,7 @@ lazy_static! { } /// An entity is represented as a map of attribute names to values. -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] +#[derive(Clone, PartialEq, Eq, Serialize)] pub struct Entity(Object); pub trait IntoEntityIterator: IntoIterator {} @@ -929,6 +929,16 @@ impl GasSizeOf for Entity { } } +impl std::fmt::Debug for Entity { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ds = f.debug_struct("Entity"); + for (k, v) in &self.0 { + ds.field(k, v); + } + ds.finish() + } +} + #[test] fn value_bytes() { let graphql_value = r::Value::String("0x8f494c66afc1d3f8ac1b45df21f02a46".to_owned()); From 0f7cd9d44e9052291947dd0b3705758f4035d53b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 9 May 2023 09:56:04 +0200 Subject: [PATCH 0223/2104] graph: Implement StoreError.clone() --- graph/src/components/store/err.rs | 39 +++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index b07f0c64f3b..4a689463c85 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -77,6 +77,45 @@ macro_rules! constraint_violation { }} } +/// We can't derive `Clone` because some variants use non-cloneable data. +/// For those cases, produce an `Unknown` error with some details about the +/// original error +impl Clone for StoreError { + fn clone(&self) -> Self { + match self { + Self::Unknown(arg0) => Self::Unknown(anyhow!("{}", arg0)), + Self::ConflictingId(arg0, arg1, arg2) => { + Self::ConflictingId(arg0.clone(), arg1.clone(), arg2.clone()) + } + Self::UnknownField(arg0) => Self::UnknownField(arg0.clone()), + Self::UnknownTable(arg0) => Self::UnknownTable(arg0.clone()), + Self::UnknownAttribute(arg0, arg1) => { + Self::UnknownAttribute(arg0.clone(), arg1.clone()) + } + Self::MalformedDirective(arg0) => Self::MalformedDirective(arg0.clone()), + Self::QueryExecutionError(arg0) => Self::QueryExecutionError(arg0.clone()), + Self::InvalidIdentifier(arg0) => Self::InvalidIdentifier(arg0.clone()), + Self::DuplicateBlockProcessing(arg0, arg1) => { + Self::DuplicateBlockProcessing(arg0.clone(), arg1.clone()) + } + Self::ConstraintViolation(arg0) => Self::ConstraintViolation(arg0.clone()), + Self::DeploymentNotFound(arg0) => Self::DeploymentNotFound(arg0.clone()), + Self::UnknownShard(arg0) => Self::UnknownShard(arg0.clone()), + Self::FulltextSearchNonDeterministic => Self::FulltextSearchNonDeterministic, + Self::Canceled => Self::Canceled, + Self::DatabaseUnavailable => Self::DatabaseUnavailable, + Self::DatabaseDisabled => Self::DatabaseDisabled, + Self::ForkFailure(arg0) => Self::ForkFailure(arg0.clone()), + Self::Poisoned => Self::Poisoned, + Self::WriterPanic(arg0) => Self::Unknown(anyhow!("writer panic: {}", arg0)), + Self::UnsupportedDeploymentSchemaVersion(arg0) => { + Self::UnsupportedDeploymentSchemaVersion(arg0.clone()) + } + Self::PruneFailure(arg0) => Self::PruneFailure(arg0.clone()), + } + } +} + impl From for StoreError { fn from(e: DieselError) -> Self { // When the error is caused by a closed connection, treat the error From ad1c6ead6214906247d32504e542bcb64b8fe74f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 21 Apr 2023 21:40:24 -0700 Subject: [PATCH 0224/2104] store: Do not mutate entities for InsertQuery --- graph/src/data/store/mod.rs | 2 + store/postgres/src/relational_queries.rs | 79 +++++++++++++++++------- 2 files changed, 58 insertions(+), 23 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 8862595adb6..89e12584dd7 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -188,6 +188,8 @@ pub enum Value { BigInt(scalar::BigInt), } +pub const NULL: Value = Value::Null; + impl stable_hash_legacy::StableHash for Value { fn stable_hash( &self, diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 3cb86ecec29..bf486bd0da9 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -13,6 +13,7 @@ use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; use graph::components::store::{DerivedEntityQuery, EntityKey}; +use graph::data::store::NULL; use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -1747,10 +1748,51 @@ impl<'a> LoadQuery for FindDerivedQuery<'a> { impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} +#[derive(Debug)] +struct FulltextValues<'a>(HashMap>); + +impl<'a> FulltextValues<'a> { + fn new(table: &'a Table, entities: &'a [(&'a EntityKey, Cow<'a, Entity>)]) -> Self { + let mut map = HashMap::new(); + for column in table.columns.iter().filter(|column| column.is_fulltext()) { + for (_, entity) in entities { + let mut fulltext = Vec::new(); + if let Some(fields) = column.fulltext_fields.as_ref() { + let fulltext_field_values = fields + .iter() + .filter_map(|field| entity.get(field)) + .cloned() + .collect::>(); + if !fulltext_field_values.is_empty() { + fulltext.push((column.field.as_str(), Value::List(fulltext_field_values))); + } + } + if !fulltext.is_empty() { + map.insert(entity.id(), fulltext); + } + } + } + Self(map) + } + + fn get(&self, entity_id: &Word, field: &str) -> &Value { + self.0 + .get(entity_id) + .and_then(|values| { + values + .iter() + .find(|(key, _)| field == *key) + .map(|(_, value)| value) + }) + .unwrap_or(&NULL) + } +} + #[derive(Debug)] pub struct InsertQuery<'a> { table: &'a Table, entities: &'a [(&'a EntityKey, Cow<'a, Entity>)], + fulltext_values: FulltextValues<'a>, unique_columns: Vec<&'a Column>, br_column: BlockRangeColumn<'a>, } @@ -1761,19 +1803,8 @@ impl<'a> InsertQuery<'a> { entities: &'a mut [(&'a EntityKey, Cow)], block: BlockNumber, ) -> Result, StoreError> { - for (entity_key, entity) in entities.iter_mut() { - let mut fulltext = Vec::new(); + for (entity_key, entity) in entities.iter() { for column in table.columns.iter() { - if let Some(fields) = column.fulltext_fields.as_ref() { - let fulltext_field_values = fields - .iter() - .filter_map(|field| entity.get(field)) - .cloned() - .collect::>(); - if !fulltext_field_values.is_empty() { - fulltext.push((&column.field, Value::List(fulltext_field_values))); - } - } if !column.is_nullable() && !entity.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( "can not insert entity {}[{}] since value for non-nullable attribute {} is missing. \ @@ -1783,16 +1814,16 @@ impl<'a> InsertQuery<'a> { ))); } } - if !fulltext.is_empty() { - entity.to_mut().merge_iter(fulltext)?; - } } - let unique_columns = InsertQuery::unique_columns(table, entities); + + let fulltext_values = FulltextValues::new(table, entities); + let unique_columns = InsertQuery::unique_columns(table, entities, &fulltext_values); let br_column = BlockRangeColumn::new(table, "", block); Ok(InsertQuery { table, entities, + fulltext_values, unique_columns, br_column, }) @@ -1802,11 +1833,14 @@ impl<'a> InsertQuery<'a> { fn unique_columns( table: &'a Table, entities: &'a [(&'a EntityKey, Cow<'a, Entity>)], + fulltext_values: &FulltextValues<'a>, ) -> Vec<&'a Column> { let mut hashmap = HashMap::new(); for (_key, entity) in entities.iter() { for column in &table.columns { - if entity.get(&column.field).is_some() { + if entity.get(&column.field).is_some() + || !fulltext_values.get(&entity.id(), &column.field).is_null() + { hashmap.entry(column.name.as_str()).or_insert(column); } } @@ -1872,13 +1906,12 @@ impl<'a> QueryFragment for InsertQuery<'a> { while let Some((key, entity)) = iter.next() { out.push_sql("("); for column in &self.unique_columns { - // If the column name is not within this entity's fields, we will issue the - // null value in its place - if let Some(value) = entity.get(&column.field) { - QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; + let value = if column.is_fulltext() { + self.fulltext_values.get(&key.entity_id, &column.field) } else { - out.push_sql("null"); - } + entity.get(&column.field).unwrap_or(&NULL) + }; + QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } self.br_column.literal_range_current(&mut out)?; From 6c3b4013ab1a79af35e63f8ae590f5736a0d8e6d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 21 Apr 2023 21:45:44 -0700 Subject: [PATCH 0225/2104] store: Improve InsertQuery::unique_columns - Columns are now returned in the order in which they are defined, not a random order - In the common case where a column is present in all entities, we don't have to iterate over all entities --- store/postgres/src/relational_queries.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index bf486bd0da9..ad7a0f04f18 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1835,17 +1835,19 @@ impl<'a> InsertQuery<'a> { entities: &'a [(&'a EntityKey, Cow<'a, Entity>)], fulltext_values: &FulltextValues<'a>, ) -> Vec<&'a Column> { - let mut hashmap = HashMap::new(); - for (_key, entity) in entities.iter() { - for column in &table.columns { - if entity.get(&column.field).is_some() - || !fulltext_values.get(&entity.id(), &column.field).is_null() - { - hashmap.entry(column.name.as_str()).or_insert(column); - } - } - } - hashmap.into_values().collect() + table + .columns + .iter() + .filter(|column| { + entities.iter().any(|(_, entity)| { + if column.is_fulltext() { + !fulltext_values.get(&entity.id(), &column.field).is_null() + } else { + entity.get(&column.field).is_some() + } + }) + }) + .collect() } /// Return the maximum number of entities that can be inserted with one From 1cf11f5f17e6271b7ab33da739e56f598d0a1132 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 21 Apr 2023 16:36:57 -0700 Subject: [PATCH 0226/2104] store: Remove mutability from entities in various places --- store/postgres/src/deployment_store.rs | 29 ++++++---------- store/postgres/src/relational.rs | 12 +++---- store/postgres/src/relational_queries.rs | 11 +++--- store/test-store/tests/postgres/relational.rs | 34 +++++++------------ .../tests/postgres/relational_bytes.rs | 15 +++----- 5 files changed, 38 insertions(+), 63 deletions(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 32f25153917..5d054c823ef 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -23,7 +23,6 @@ use graph::tokio::task::JoinHandle; use itertools::Itertools; use lru_time_cache::LruCache; use rand::{seq::SliceRandom, thread_rng}; -use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::convert::Into; use std::iter::FromIterator; @@ -337,13 +336,13 @@ impl DeploymentStore { inserts .entry(key.entity_type.clone()) .or_insert_with(Vec::new) - .push((key, Cow::from(data))); + .push((key, data)); } Overwrite { key, data } => { overwrites .entry(key.entity_type.clone()) .or_insert_with(Vec::new) - .push((key, Cow::from(data))); + .push((key, data)); } Remove { key } => { removals @@ -356,28 +355,22 @@ impl DeploymentStore { // Apply modification groups. // Inserts: - for (entity_type, mut entities) in inserts.into_iter() { + for (entity_type, entities) in inserts.into_iter() { count += - self.insert_entities(&entity_type, &mut entities, conn, layout, ptr, stopwatch)? - as i32 + self.insert_entities(&entity_type, &entities, conn, layout, ptr, stopwatch)? as i32 } // Overwrites: - for (entity_type, mut entities) in overwrites.into_iter() { + for (entity_type, entities) in overwrites.into_iter() { // we do not update the count since the number of entities remains the same - self.overwrite_entities(&entity_type, &mut entities, conn, layout, ptr, stopwatch)?; + self.overwrite_entities(&entity_type, &entities, conn, layout, ptr, stopwatch)?; } // Removals for (entity_type, entity_keys) in removals.into_iter() { - count -= self.remove_entities( - &entity_type, - entity_keys.as_slice(), - conn, - layout, - ptr, - stopwatch, - )? as i32; + count -= + self.remove_entities(&entity_type, &entity_keys, conn, layout, ptr, stopwatch)? + as i32; } Ok(count) } @@ -385,7 +378,7 @@ impl DeploymentStore { fn insert_entities<'a>( &'a self, entity_type: &'a EntityType, - data: &'a mut [(&'a EntityKey, Cow<'a, Entity>)], + data: &'a [(&'a EntityKey, &'a Entity)], conn: &PgConnection, layout: &'a Layout, ptr: &BlockPtr, @@ -405,7 +398,7 @@ impl DeploymentStore { fn overwrite_entities<'a>( &'a self, entity_type: &'a EntityType, - data: &'a mut [(&'a EntityKey, Cow<'a, Entity>)], + data: &'a [(&'a EntityKey, &'a Entity)], conn: &PgConnection, layout: &'a Layout, ptr: &BlockPtr, diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 3ec11dd0d5f..6111ff6ac8b 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -34,7 +34,7 @@ use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE use graph::slog::warn; use inflector::Inflector; use lazy_static::lazy_static; -use std::borrow::{Borrow, Cow}; +use std::borrow::Borrow; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::{From, TryFrom}; use std::fmt::{self, Write}; @@ -653,7 +653,7 @@ impl Layout { &'a self, conn: &PgConnection, entity_type: &'a EntityType, - entities: &'a mut [(&'a EntityKey, Cow<'a, Entity>)], + entities: &'a [(&'a EntityKey, &'a Entity)], block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { @@ -664,7 +664,7 @@ impl Layout { // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in entities.chunks_mut(chunk_size) { + for chunk in entities.chunks(chunk_size) { count += InsertQuery::new(table, chunk, block)? .get_results(conn) .map(|ids| ids.len())? @@ -801,14 +801,14 @@ impl Layout { &'a self, conn: &PgConnection, entity_type: &'a EntityType, - entities: &'a mut [(&'a EntityKey, Cow<'a, Entity>)], + entities: &'a [(&'a EntityKey, &'a Entity)], block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(entity_type)?; if table.immutable { let ids = entities - .iter_mut() + .iter() .map(|(key, _)| key.entity_id.as_str()) .collect::>() .join(", "); @@ -834,7 +834,7 @@ impl Layout { // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in entities.chunks_mut(chunk_size) { + for chunk in entities.chunks(chunk_size) { count += InsertQuery::new(table, chunk, block)?.execute(conn)?; } Ok(count) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index ad7a0f04f18..033aaf2fa84 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -27,7 +27,6 @@ use graph::{ data::store::scalar, }; use itertools::Itertools; -use std::borrow::Cow; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::TryFrom; use std::fmt::{self, Display}; @@ -1752,7 +1751,7 @@ impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} struct FulltextValues<'a>(HashMap>); impl<'a> FulltextValues<'a> { - fn new(table: &'a Table, entities: &'a [(&'a EntityKey, Cow<'a, Entity>)]) -> Self { + fn new(table: &'a Table, entities: &'a [(&'a EntityKey, &'a Entity)]) -> Self { let mut map = HashMap::new(); for column in table.columns.iter().filter(|column| column.is_fulltext()) { for (_, entity) in entities { @@ -1791,7 +1790,7 @@ impl<'a> FulltextValues<'a> { #[derive(Debug)] pub struct InsertQuery<'a> { table: &'a Table, - entities: &'a [(&'a EntityKey, Cow<'a, Entity>)], + entities: &'a [(&'a EntityKey, &'a Entity)], fulltext_values: FulltextValues<'a>, unique_columns: Vec<&'a Column>, br_column: BlockRangeColumn<'a>, @@ -1800,10 +1799,10 @@ pub struct InsertQuery<'a> { impl<'a> InsertQuery<'a> { pub fn new( table: &'a Table, - entities: &'a mut [(&'a EntityKey, Cow)], + entities: &'a [(&'a EntityKey, &'a Entity)], block: BlockNumber, ) -> Result, StoreError> { - for (entity_key, entity) in entities.iter() { + for (entity_key, entity) in entities { for column in table.columns.iter() { if !column.is_nullable() && !entity.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( @@ -1832,7 +1831,7 @@ impl<'a> InsertQuery<'a> { /// Build the column name list using the subset of all keys among present entities. fn unique_columns( table: &'a Table, - entities: &'a [(&'a EntityKey, Cow<'a, Entity>)], + entities: &'a [(&'a EntityKey, &'a Entity)], fulltext_values: &FulltextValues<'a>, ) -> Vec<&'a Column> { table diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 0b01c8f0112..d14ac53e028 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -15,7 +15,6 @@ use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; use hex_literal::hex; use lazy_static::lazy_static; -use std::borrow::Cow; use std::collections::BTreeSet; use std::panic; use std::str::FromStr; @@ -224,9 +223,9 @@ fn insert_entity_at( (key, entity) }) .collect::>(); - let mut entities_with_keys: Vec<_> = entities_with_keys_owned + let entities_with_keys: Vec<_> = entities_with_keys_owned .iter() - .map(|(key, entity)| (key, Cow::from(entity))) + .map(|(key, entity)| (key, entity)) .collect(); let entity_type = EntityType::from(entity_type); let errmsg = format!( @@ -237,7 +236,7 @@ fn insert_entity_at( .insert( conn, &entity_type, - &mut entities_with_keys, + &entities_with_keys, block, &MOCK_STOPWATCH, ) @@ -263,9 +262,9 @@ fn update_entity_at( (key, entity) }) .collect(); - let mut entities_with_keys: Vec<_> = entities_with_keys_owned + let entities_with_keys: Vec<_> = entities_with_keys_owned .iter() - .map(|(key, entity)| (key, Cow::from(entity))) + .map(|(key, entity)| (key, entity)) .collect(); let entity_type = EntityType::from(entity_type); @@ -278,7 +277,7 @@ fn update_entity_at( .update( conn, &entity_type, - &mut entities_with_keys, + &entities_with_keys, block, &MOCK_STOPWATCH, ) @@ -587,9 +586,9 @@ fn update() { let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); - let mut entities = vec![(&key, Cow::from(&entity))]; + let entities = vec![(&key, &entity)]; layout - .update(conn, &entity_type, &mut entities, 0, &MOCK_STOPWATCH) + .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout @@ -641,13 +640,10 @@ fn update_many() { .collect(); let entities_vec = vec![one, two, three]; - let mut entities: Vec<(&EntityKey, Cow<'_, Entity>)> = keys - .iter() - .zip(entities_vec.iter().map(Cow::Borrowed)) - .collect(); + let entities: Vec<(&EntityKey, &Entity)> = keys.iter().zip(entities_vec.iter()).collect(); layout - .update(conn, &entity_type, &mut entities, 0, &MOCK_STOPWATCH) + .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) .expect("Failed to update"); // check updates took effect @@ -713,15 +709,9 @@ fn serialize_bigdecimal() { let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); - let mut entities = vec![(&key, Cow::Borrowed(&entity))]; + let entities = vec![(&key, &entity)]; layout - .update( - conn, - &entity_type, - entities.as_mut_slice(), - 0, - &MOCK_STOPWATCH, - ) + .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 49bd3bc45ae..ba0376aff68 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -10,7 +10,6 @@ use graph::prelude::{EntityQuery, MetricsRegistry}; use graph::schema::InputSchema; use hex_literal::hex; use lazy_static::lazy_static; -use std::borrow::Cow; use std::collections::BTreeSet; use std::str::FromStr; use std::{collections::BTreeMap, sync::Arc}; @@ -83,16 +82,10 @@ fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity let key = EntityKey::data(entity_type.to_owned(), entity.id()); let entity_type = EntityType::from(entity_type); - let mut entities = vec![(&key, Cow::from(&entity))]; + let entities = vec![(&key, &entity)]; let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); layout - .insert( - conn, - &entity_type, - entities.as_mut_slice(), - 0, - &MOCK_STOPWATCH, - ) + .insert(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) .expect(&errmsg); } @@ -303,9 +296,9 @@ fn update() { let entity_id = entity.id(); let entity_type = key.entity_type.clone(); - let mut entities = vec![(&key, Cow::from(&entity))]; + let entities = vec![(&key, &entity)]; layout - .update(conn, &entity_type, &mut entities, 1, &MOCK_STOPWATCH) + .update(conn, &entity_type, &entities, 1, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout From 379851cb34e744a782883fdf42d8c100ab609e76 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 18 Apr 2023 18:00:22 -0700 Subject: [PATCH 0227/2104] all: Set manifest_idx_and_name when creating WritableStore The mapping never changes, so it can be set once for the WritableStore, and doesn't need to be passed in for every transact_block_operations --- core/src/subgraph/inputs.rs | 5 ---- core/src/subgraph/instance_manager.rs | 17 +++++++------ core/src/subgraph/runner.rs | 1 - graph/src/components/store/traits.rs | 5 +++- runtime/test/src/common.rs | 7 +++++- runtime/test/src/test.rs | 5 +++- store/postgres/src/subgraph_store.rs | 10 +++++++- store/postgres/src/writable.rs | 25 ++++++++++--------- store/test-store/src/store.rs | 17 +++++++------ store/test-store/tests/graph/entity_cache.rs | 3 +-- store/test-store/tests/postgres/graft.rs | 18 ++++++++++---- store/test-store/tests/postgres/store.rs | 4 +-- store/test-store/tests/postgres/subgraph.rs | 26 +++++++++++--------- store/test-store/tests/postgres/writable.rs | 2 +- tests/tests/runner_tests.rs | 4 +-- 15 files changed, 87 insertions(+), 62 deletions(-) diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 060c698fc19..e0c8a655b73 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -26,9 +26,6 @@ pub struct IndexingInputs { pub poi_version: ProofOfIndexingVersion, pub network: String, - // Correspondence between data source or template position in the manifest and name. - pub manifest_idx_and_name: Vec<(u32, String)>, - /// Whether to instrument trigger processing and log additional, /// possibly expensive and noisy, information pub instrument: bool, @@ -50,7 +47,6 @@ impl IndexingInputs { static_filters, poi_version, network, - manifest_idx_and_name, instrument, } = self; IndexingInputs { @@ -67,7 +63,6 @@ impl IndexingInputs { static_filters: *static_filters, poi_version: *poi_version, network: network.clone(), - manifest_idx_and_name: manifest_idx_and_name.clone(), instrument: *instrument, } } diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index af3880f980b..4c62113b789 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -199,12 +199,6 @@ impl SubgraphInstanceManager { let subgraph_store = self.subgraph_store.cheap_clone(); let registry = self.metrics_registry.cheap_clone(); - let store = self - .subgraph_store - .cheap_clone() - .writable(logger.clone(), deployment.id) - .await?; - let raw_yaml = serde_yaml::to_string(&manifest).unwrap(); let manifest = UnresolvedSubgraphManifest::parse(deployment.hash.cheap_clone(), manifest)?; @@ -256,7 +250,15 @@ impl SubgraphInstanceManager { ); } - let manifest_idx_and_name: Vec<(u32, String)> = manifest.template_idx_and_name().collect(); + let store = self + .subgraph_store + .cheap_clone() + .writable( + logger.clone(), + deployment.id, + Arc::new(manifest.template_idx_and_name().collect()), + ) + .await?; // Start the subgraph deployment before reading dynamic data // sources; if the subgraph is a graft or a copy, starting it will @@ -425,7 +427,6 @@ impl SubgraphInstanceManager { templates, unified_api_version, static_filters, - manifest_idx_and_name, poi_version, network, instrument, diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 3159e89dce4..34601dbdc75 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -477,7 +477,6 @@ where &self.metrics.host.stopwatch, persisted_data_sources, deterministic_errors, - self.inputs.manifest_idx_and_name.clone(), processed_data_sources, ) .await diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index a48cc7b9f5f..01875b9bfa2 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -139,10 +139,14 @@ pub trait SubgraphStore: Send + Sync + 'static { /// assumptions about the in-memory state of writing has been made; in /// particular, no assumptions about whether previous writes have /// actually been committed or not. + /// + /// The `manifest_idx_and_name` lists the correspondence between data + /// source or template position in the manifest and name. async fn writable( self: Arc, logger: Logger, deployment: DeploymentId, + manifest_idx_and_name: Arc>, ) -> Result, StoreError>; /// Initiate a graceful shutdown of the writable that a previous call to @@ -292,7 +296,6 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { stopwatch: &StopwatchMetrics, data_sources: Vec, deterministic_errors: Vec, - manifest_idx_and_name: Vec<(u32, String)>, offchain_to_remove: Vec, ) -> Result<(), StoreError>; diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index 16d95a95a1d..dde5b24e1bd 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -109,7 +109,12 @@ pub fn mock_context( api_version, )), state: BlockState::new( - futures03::executor::block_on(store.writable(LOGGER.clone(), deployment.id)).unwrap(), + futures03::executor::block_on(store.writable( + LOGGER.clone(), + deployment.id, + Arc::new(Vec::new()), + )) + .unwrap(), Default::default(), ), proof_of_indexing: None, diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 6e7098c2c52..ee92d3055e3 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1009,7 +1009,10 @@ async fn test_entity_store(api_version: Version) { load_and_set_user_name(&mut module, "steve", "Steve-O"); // We need to empty the cache for the next test - let writable = store.writable(LOGGER.clone(), deployment.id).await.unwrap(); + let writable = store + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .await + .unwrap(); let cache = std::mem::replace( &mut module.instance_ctx_mut().ctx.state.entity_cache, EntityCache::new(Arc::new(writable.clone())), diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 21cfa6f58af..0ce48b4ac88 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1347,6 +1347,7 @@ impl SubgraphStoreTrait for SubgraphStore { self: Arc, logger: Logger, deployment: graph::components::store::DeploymentId, + manifest_idx_and_name: Arc>, ) -> Result, StoreError> { let deployment = deployment.into(); // We cache writables to make sure calls to this method are @@ -1370,7 +1371,14 @@ impl SubgraphStoreTrait for SubgraphStore { .unwrap()?; // Propagate panics, there shouldn't be any. let writable = Arc::new( - WritableStore::new(self.as_ref().clone(), logger, site, self.registry.clone()).await?, + WritableStore::new( + self.as_ref().clone(), + logger, + site, + manifest_idx_and_name, + self.registry.clone(), + ) + .await?, ); self.writables .lock() diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 767d74a19a1..732985b30ea 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -71,6 +71,7 @@ struct SyncStore { writable: Arc, site: Arc, input_schema: Arc, + manifest_idx_and_name: Arc>, } impl SyncStore { @@ -78,6 +79,7 @@ impl SyncStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, + manifest_idx_and_name: Arc>, ) -> Result { let store = WritableSubgraphStore(subgraph_store.clone()); let writable = subgraph_store.for_site(site.as_ref())?.clone(); @@ -88,6 +90,7 @@ impl SyncStore { writable, site, input_schema, + manifest_idx_and_name, }) } @@ -211,7 +214,6 @@ impl SyncStore { stopwatch: &StopwatchMetrics, data_sources: &[StoredDynamicDataSource], deterministic_errors: &[SubgraphError], - manifest_idx_and_name: &[(u32, String)], processed_data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { retry::forever(&self.logger, "transact_block_operations", move || { @@ -224,7 +226,7 @@ impl SyncStore { stopwatch, data_sources, deterministic_errors, - manifest_idx_and_name, + &self.manifest_idx_and_name, processed_data_sources, )?; @@ -439,7 +441,6 @@ enum Request { mods: Vec, data_sources: Vec, deterministic_errors: Vec, - manifest_idx_and_name: Vec<(u32, String)>, processed_data_sources: Vec, }, RevertTo { @@ -490,7 +491,6 @@ impl Request { mods, data_sources, deterministic_errors, - manifest_idx_and_name, processed_data_sources, } => store .transact_block_operations( @@ -500,7 +500,6 @@ impl Request { stopwatch, data_sources, deterministic_errors, - manifest_idx_and_name, processed_data_sources, ) .map(|()| ExecResult::Continue), @@ -983,7 +982,6 @@ impl Writer { stopwatch: &StopwatchMetrics, data_sources: Vec, deterministic_errors: Vec, - manifest_idx_and_name: Vec<(u32, String)>, processed_data_sources: Vec, ) -> Result<(), StoreError> { match self { @@ -994,7 +992,6 @@ impl Writer { stopwatch, &data_sources, &deterministic_errors, - &manifest_idx_and_name, &processed_data_sources, ), Writer::Async { queue, .. } => { @@ -1007,7 +1004,6 @@ impl Writer { mods, data_sources, deterministic_errors, - manifest_idx_and_name, processed_data_sources, }; queue.push(req).await @@ -1121,9 +1117,15 @@ impl WritableStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, + manifest_idx_and_name: Arc>, registry: Arc, ) -> Result { - let store = Arc::new(SyncStore::new(subgraph_store, logger.clone(), site)?); + let store = Arc::new(SyncStore::new( + subgraph_store, + logger.clone(), + site, + manifest_idx_and_name, + )?); let block_ptr = Mutex::new(store.block_ptr().await?); let block_cursor = Mutex::new(store.block_cursor().await?); let writer = Writer::new( @@ -1254,7 +1256,6 @@ impl WritableStoreTrait for WritableStore { stopwatch: &StopwatchMetrics, data_sources: Vec, deterministic_errors: Vec, - manifest_idx_and_name: Vec<(u32, String)>, processed_data_sources: Vec, ) -> Result<(), StoreError> { self.writer @@ -1265,7 +1266,6 @@ impl WritableStoreTrait for WritableStore { stopwatch, data_sources, deterministic_errors, - manifest_idx_and_name, processed_data_sources, ) .await?; @@ -1336,8 +1336,9 @@ impl WritableStoreTrait for WritableStore { } } let store = Arc::new(self.store.store.0.clone()); + let manifest_idx_and_name = self.store.manifest_idx_and_name.cheap_clone(); store - .writable(logger, self.store.site.id.into()) + .writable(logger, self.store.site.id.into(), manifest_idx_and_name) .await .map(|store| Some(store)) } else { diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index a136c1fae5c..1a1737f9eb1 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -183,7 +183,7 @@ pub async fn create_subgraph( SUBGRAPH_STORE .cheap_clone() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await? .start_subgraph_deployment(&LOGGER) .await?; @@ -222,7 +222,7 @@ pub async fn transact_errors( ); store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await? .transact_block_operations( block_ptr_to, @@ -232,7 +232,6 @@ pub async fn transact_errors( Vec::new(), errs, Vec::new(), - Vec::new(), ) .await?; flush(deployment).await @@ -283,8 +282,11 @@ pub async fn transact_entities_and_dynamic_data_sources( ops: Vec, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result<(), StoreError> { - let store = - futures03::executor::block_on(store.cheap_clone().writable(LOGGER.clone(), deployment.id))?; + let store = futures03::executor::block_on(store.cheap_clone().writable( + LOGGER.clone(), + deployment.id, + Arc::new(manifest_idx_and_name), + ))?; let mut entity_cache = EntityCache::new(Arc::new(store.clone())); entity_cache.append(ops); let mods = entity_cache @@ -306,7 +308,6 @@ pub async fn transact_entities_and_dynamic_data_sources( &stopwatch_metrics, data_sources, Vec::new(), - manifest_idx_and_name, Vec::new(), ) .await @@ -316,7 +317,7 @@ pub async fn transact_entities_and_dynamic_data_sources( pub async fn revert_block(store: &Arc, deployment: &DeploymentLocator, ptr: &BlockPtr) { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable") .revert_block_operations(ptr.clone(), FirehoseCursor::None) @@ -371,7 +372,7 @@ pub async fn insert_entities( pub async fn flush(deployment: &DeploymentLocator) -> Result<(), StoreError> { let writable = SUBGRAPH_STORE .cheap_clone() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("we can get a writable"); writable.flush().await diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 1f0211dda45..12a05e1caba 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -129,7 +129,6 @@ impl WritableStore for MockStore { _: &StopwatchMetrics, _: Vec, _: Vec, - _: Vec<(u32, String)>, _: Vec, ) -> Result<(), StoreError> { unimplemented!() @@ -384,7 +383,7 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("we can get a writable store"); diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 92c8b9afdd7..c6ae1fcb7e1 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -121,7 +121,7 @@ where store .cheap_clone() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .unwrap() .flush() @@ -326,7 +326,9 @@ async fn check_graft( .await .unwrap(); - let writable = store.writable(LOGGER.clone(), deployment.id).await?; + let writable = store + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .await?; writable .revert_block_operations(BLOCKS[1].clone(), FirehoseCursor::None) .await @@ -438,7 +440,7 @@ fn copy() { store .cheap_clone() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await? .start_subgraph_deployment(&LOGGER) .await?; @@ -467,7 +469,10 @@ fn on_sync() { on_sync, )?; - let writable = store.cheap_clone().writable(LOGGER.clone(), dst.id).await?; + let writable = store + .cheap_clone() + .writable(LOGGER.clone(), dst.id, Arc::new(Vec::new())) + .await?; writable.start_subgraph_deployment(&LOGGER).await?; writable.deployment_synced()?; @@ -513,7 +518,10 @@ fn on_sync() { OnSync::Replace, )?; - let writable = store.cheap_clone().writable(LOGGER.clone(), dst.id).await?; + let writable = store + .cheap_clone() + .writable(LOGGER.clone(), dst.id, Arc::new(Vec::new())) + .await?; // Perform the copy writable.start_subgraph_deployment(&LOGGER).await?; diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 8f30ca6e229..d6231bf58f8 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -141,7 +141,7 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("we can get a writable store"); @@ -1530,7 +1530,6 @@ fn handle_large_string_with_index() { Vec::new(), Vec::new(), Vec::new(), - Vec::new(), ) .await .expect("Failed to insert large text"); @@ -1624,7 +1623,6 @@ fn handle_large_bytea_with_index() { Vec::new(), Vec::new(), Vec::new(), - Vec::new(), ) .await .expect("Failed to insert large text"); diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index e0e58d8ea72..30b5e032d21 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -55,7 +55,7 @@ fn get_version_info(store: &Store, subgraph_name: &str) -> VersionInfo { async fn latest_block(store: &Store, deployment_id: DeploymentId) -> BlockPtr { store .subgraph_store() - .writable(LOGGER.clone(), deployment_id) + .writable(LOGGER.clone(), deployment_id, Arc::new(Vec::new())) .await .expect("can get writable") .block_ptr() @@ -176,10 +176,14 @@ fn create_subgraph() { } fn deployment_synced(store: &Arc, deployment: &DeploymentLocator) { - futures03::executor::block_on(store.cheap_clone().writable(LOGGER.clone(), deployment.id)) - .expect("can get writable") - .deployment_synced() - .unwrap(); + futures03::executor::block_on(store.cheap_clone().writable( + LOGGER.clone(), + deployment.id, + Arc::new(Vec::new()), + )) + .expect("can get writable") + .deployment_synced() + .unwrap(); } // Test VersionSwitchingMode::Instant @@ -416,7 +420,7 @@ fn status() { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable") .fail_subgraph(error) @@ -576,7 +580,7 @@ fn fatal_vs_non_fatal() { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable") .fail_subgraph(error()) @@ -670,7 +674,7 @@ fn fail_unfail_deterministic_error() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable"); @@ -762,7 +766,7 @@ fn fail_unfail_deterministic_error_noop() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable"); @@ -889,7 +893,7 @@ fn fail_unfail_non_deterministic_error() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable"); @@ -989,7 +993,7 @@ fn fail_unfail_non_deterministic_error_noop() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("can get writable"); diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index caf402f9e82..9965bb47cd9 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -87,7 +87,7 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id) + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("we can get a writable store"); diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index 09656faf8f3..568863a6fa6 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -196,7 +196,7 @@ async fn file_data_sources() { let store = ctx.store.cheap_clone(); let writable = store - .writable(ctx.logger.clone(), ctx.deployment.id) + .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) .await .unwrap(); let datasources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); @@ -218,7 +218,7 @@ async fn file_data_sources() { let writable = ctx .store .clone() - .writable(ctx.logger.clone(), ctx.deployment.id) + .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) .await .unwrap(); let data_sources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); From c5fde6028dfed37e78e80ae3488f9b34a52c4987 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 20 Apr 2023 16:28:41 -0700 Subject: [PATCH 0228/2104] graph, store: Introduce a data structure to capture a write The contents of graph::components::store::write are somewhat provisional, mostly to plumb using a `Batch` through the rest of the code. A later commit will change this quite a bit. --- graph/src/components/store/mod.rs | 21 +- graph/src/components/store/write.rs | 289 ++++++++++++++++++ store/postgres/src/deployment_store.rs | 117 +++---- store/postgres/src/dynds/mod.rs | 12 +- store/postgres/src/dynds/private.rs | 93 +++--- store/postgres/src/dynds/shared.rs | 88 +++--- store/postgres/src/relational.rs | 42 +-- store/postgres/src/relational_queries.rs | 43 +-- store/postgres/src/writable.rs | 206 +++++-------- store/test-store/tests/postgres/relational.rs | 56 ++-- .../tests/postgres/relational_bytes.rs | 47 ++- 11 files changed, 633 insertions(+), 381 deletions(-) create mode 100644 graph/src/components/store/write.rs diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 8bfa55e796f..1a6fd3e88fa 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1,6 +1,7 @@ mod entity_cache; mod err; mod traits; +pub mod write; pub use entity_cache::{EntityCache, GetScope, ModificationsAndCache}; @@ -9,6 +10,7 @@ pub use err::StoreError; use itertools::Itertools; use strum_macros::Display; pub use traits::*; +pub use write::Batch; use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; @@ -703,10 +705,14 @@ pub struct StoreEvent { impl StoreEvent { pub fn new(changes: Vec) -> StoreEvent { + let changes = changes.into_iter().collect(); + StoreEvent::from_set(changes) + } + + fn from_set(changes: HashSet) -> StoreEvent { static NEXT_TAG: AtomicUsize = AtomicUsize::new(0); let tag = NEXT_TAG.fetch_add(1, Ordering::Relaxed); - let changes = changes.into_iter().collect(); StoreEvent { tag, changes } } @@ -728,6 +734,19 @@ impl StoreEvent { StoreEvent::new(changes) } + pub fn from_types(deployment: &DeploymentHash, entity_types: HashSet) -> Self { + let changes = + HashSet::from_iter( + entity_types + .into_iter() + .map(|entity_type| EntityChange::Data { + subgraph_id: deployment.clone(), + entity_type, + }), + ); + Self::from_set(changes) + } + /// Extend `ev1` with `ev2`. If `ev1` is `None`, just set it to `ev2` fn accumulate(logger: &Logger, ev1: &mut Option, ev2: StoreEvent) { if let Some(e) = ev1 { diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs new file mode 100644 index 00000000000..b4f5e1d537a --- /dev/null +++ b/graph/src/components/store/write.rs @@ -0,0 +1,289 @@ +//! Data structures and helpers for writing subgraph changes to the store +use std::collections::HashSet; + +use crate::{ + blockchain::{block_stream::FirehoseCursor, BlockPtr}, + cheap_clone::CheapClone, + components::subgraph::Entity, + data::subgraph::schema::SubgraphError, + prelude::DeploymentHash, +}; + +use super::{ + BlockNumber, EntityKey, EntityModification, EntityType, StoreEvent, StoredDynamicDataSource, +}; + +/// The data for a write operation; a write operation is either an insert of +/// a new entity or the overwriting of an existing entity. +#[derive(Debug)] +pub struct EntityWrite { + pub key: EntityKey, + pub data: Entity, + pub block: BlockNumber, +} + +impl EntityWrite { + pub fn new(key: EntityKey, data: Entity, block: BlockNumber) -> Self { + Self { key, data, block } + } +} + +/// A list of entity changes grouped by the entity type +pub struct RowGroup { + pub entity_type: EntityType, + pub rows: Vec, +} + +impl RowGroup { + pub fn new(entity_type: EntityType) -> Self { + Self { + entity_type, + rows: Vec::new(), + } + } + + pub fn push(&mut self, row: R) { + self.rows.push(row) + } + + fn row_count(&self) -> usize { + self.rows.len() + } +} + +/// A list of entity changes with one group per entity type +pub struct RowGroups { + pub groups: Vec>, +} + +impl RowGroups { + fn new() -> Self { + Self { groups: Vec::new() } + } + + fn group(&self, entity_type: &EntityType) -> Option<&RowGroup> { + self.groups + .iter() + .find(|group| &group.entity_type == entity_type) + } + + /// Return a mutable reference to an existing group. + fn group_mut(&mut self, entity_type: &EntityType) -> Option<&mut RowGroup> { + self.groups + .iter_mut() + .find(|group| &group.entity_type == entity_type) + } + + /// Return a mutable reference to an existing group, or create a new one + /// if there isn't one yet and return a reference to that + fn group_entry(&mut self, entity_type: &EntityType) -> &mut RowGroup { + let pos = self + .groups + .iter() + .position(|group| &group.entity_type == entity_type); + match pos { + Some(pos) => &mut self.groups[pos], + None => { + self.groups.push(RowGroup::new(entity_type.clone())); + // unwrap: we just pushed an entry + self.groups.last_mut().unwrap() + } + } + } + + fn entity_count(&self) -> usize { + self.groups.iter().map(|group| group.row_count()).sum() + } +} + +/// Data sources data grouped by block +pub struct DataSources { + pub entries: Vec<(BlockPtr, Vec)>, +} + +impl DataSources { + fn new(ptr: BlockPtr, entries: Vec) -> Self { + let entries = if entries.is_empty() { + Vec::new() + } else { + vec![(ptr, entries)] + }; + DataSources { entries } + } + + pub fn is_empty(&self) -> bool { + self.entries.iter().all(|(_, dss)| dss.is_empty()) + } +} + +/// Indicate to code that looks up entities from the in-memory batch whether +/// the entity in question will be written or removed at the block of the +/// lookup +pub enum EntityOp<'a> { + /// There is a new version of the entity that will be written + Write(&'a Entity), + /// The entity has been removed + Remove, +} + +/// A write batch. This data structure encapsulates all the things that need +/// to be changed to persist the output of mappings up to a certain block. +/// For now, a batch will only contain changes for a single block, but will +/// eventually contain data for multiple blocks. +pub struct Batch { + /// The last block for which this batch contains changes + pub block_ptr: BlockPtr, + /// The firehose cursor corresponding to `block_ptr` + pub firehose_cursor: FirehoseCursor, + /// New entities that need to be inserted + pub inserts: RowGroups, + /// Existing entities that need to be modified + pub overwrites: RowGroups, + /// Existing entities that need to be removed + pub removes: RowGroups, + /// New data sources + pub data_sources: DataSources, + pub deterministic_errors: Vec, + pub offchain_to_remove: DataSources, +} + +impl Batch { + pub fn new( + block_ptr: BlockPtr, + firehose_cursor: FirehoseCursor, + mods: Vec, + data_sources: Vec, + deterministic_errors: Vec, + offchain_to_remove: Vec, + ) -> Self { + let block = block_ptr.number; + + let mut inserts = RowGroups::new(); + let mut overwrites = RowGroups::new(); + let mut removes = RowGroups::new(); + + for m in mods { + match m { + EntityModification::Insert { key, data } => { + let row = EntityWrite::new(key, data, block); + inserts.group_entry(&row.key.entity_type).push(row); + } + EntityModification::Overwrite { key, data } => { + let row = EntityWrite::new(key, data, block); + overwrites.group_entry(&row.key.entity_type).push(row); + } + EntityModification::Remove { key } => { + removes.group_entry(&key.entity_type).push(key) + } + } + } + + let data_sources = DataSources::new(block_ptr.cheap_clone(), data_sources); + let offchain_to_remove = DataSources::new(block_ptr.cheap_clone(), offchain_to_remove); + Self { + block_ptr, + firehose_cursor, + inserts, + overwrites, + removes, + data_sources, + deterministic_errors, + offchain_to_remove, + } + } + + pub fn entity_count(&self) -> usize { + self.inserts.entity_count() + self.overwrites.entity_count() + self.removes.entity_count() + } + + /// Find out whether the latest operation for the entity with type + /// `entity_type` and `id` is going to write that entity, i.e., insert + /// or overwrite it, or if it is going to remove it. If no change will + /// be made to the entity, return `None` + pub fn last_op(&self, entity_type: &EntityType, id: &str) -> Option> { + // Check if we are inserting or overwriting the entity + if let Some((_, entity)) = self + .writes(entity_type) + .find(|(_, entity)| entity.id() == id) + { + return Some(EntityOp::Write(entity)); + } + self.removes(entity_type) + .find(|key| key.entity_id.as_str() == id) + .map(|_| EntityOp::Remove) + } + + /// Iterate over all entities that have a pending write + pub fn writes(&self, entity_type: &EntityType) -> impl Iterator { + self.inserts + .group(entity_type) + .into_iter() + .map(|ew| &ew.rows) + .flatten() + .chain( + self.overwrites + .group(entity_type) + .into_iter() + .map(|ew| &ew.rows) + .flatten(), + ) + .map(|ew| (&ew.key, &ew.data)) + } + + /// Iterate over all entities that have a pending write, allowing for + /// mutation of the entity + pub fn writes_mut( + &mut self, + entity_type: &EntityType, + ) -> impl Iterator { + self.inserts + .group_mut(entity_type) + .into_iter() + .map(|rg| &mut rg.rows) + .flatten() + .chain( + self.overwrites + .group_mut(entity_type) + .into_iter() + .map(|rg| &mut rg.rows) + .flatten(), + ) + .map(|ew| (&ew.key, &mut ew.data)) + } + + /// Iterate over all entity deletions/removals + pub fn removes(&self, entity_type: &EntityType) -> impl Iterator { + self.removes + .group(entity_type) + .into_iter() + .map(|rg| &rg.rows) + .flatten() + } + + pub fn new_data_sources(&self) -> impl Iterator { + self.data_sources + .entries + .iter() + .map(|(_, ds)| ds) + .flatten() + .filter(|ds| { + !self + .offchain_to_remove + .entries + .iter() + .any(|(_, entries)| entries.contains(ds)) + }) + } + + /// Generate a store event for all the changes that this batch makes + pub fn store_event(&self, deployment: &DeploymentHash) -> StoreEvent { + let entity_types = HashSet::from_iter( + self.inserts + .groups + .iter() + .chain(self.overwrites.groups.iter()) + .map(|group| group.entity_type.clone()), + ); + StoreEvent::from_types(deployment, entity_types) + } +} diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 5d054c823ef..3453d51c7a0 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -5,8 +5,9 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; +use graph::components::store::write::{EntityWrite, RowGroup, RowGroups}; use graph::components::store::{ - DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, + Batch, DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; @@ -38,8 +39,8 @@ use graph::constraint_violation; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_DIGEST, POI_OBJECT}; use graph::prelude::{ anyhow, debug, info, o, warn, web3, AttributeNames, BlockNumber, BlockPtr, CheapClone, - DeploymentHash, DeploymentState, Entity, EntityModification, EntityQuery, Error, Logger, - QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, + DeploymentHash, DeploymentState, Entity, EntityQuery, Error, Logger, QueryExecutionError, + StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, }; use graph::schema::{ApiSchema, InputSchema}; use web3::types::Address; @@ -319,113 +320,81 @@ impl DeploymentStore { &self, conn: &PgConnection, layout: &Layout, - mods: &[EntityModification], + inserts: &RowGroups, + overwrites: &RowGroups, + removes: &RowGroups, ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { - use EntityModification::*; let mut count = 0; - // Group `Insert`s and `Overwrite`s by key, and accumulate `Remove`s. - let mut inserts = HashMap::new(); - let mut overwrites = HashMap::new(); - let mut removals = HashMap::new(); - for modification in mods.iter() { - match modification { - Insert { key, data } => { - inserts - .entry(key.entity_type.clone()) - .or_insert_with(Vec::new) - .push((key, data)); - } - Overwrite { key, data } => { - overwrites - .entry(key.entity_type.clone()) - .or_insert_with(Vec::new) - .push((key, data)); - } - Remove { key } => { - removals - .entry(key.entity_type.clone()) - .or_insert_with(Vec::new) - .push(key.entity_id.as_str()); - } - } - } - // Apply modification groups. // Inserts: - for (entity_type, entities) in inserts.into_iter() { - count += - self.insert_entities(&entity_type, &entities, conn, layout, ptr, stopwatch)? as i32 + for group in &inserts.groups { + count += self.insert_entities(group, conn, layout, ptr, stopwatch)? as i32 } // Overwrites: - for (entity_type, entities) in overwrites.into_iter() { + for group in &overwrites.groups { // we do not update the count since the number of entities remains the same - self.overwrite_entities(&entity_type, &entities, conn, layout, ptr, stopwatch)?; + self.overwrite_entities(group, conn, layout, ptr, stopwatch)?; } // Removals - for (entity_type, entity_keys) in removals.into_iter() { - count -= - self.remove_entities(&entity_type, &entity_keys, conn, layout, ptr, stopwatch)? - as i32; + for group in &removes.groups { + count -= self.remove_entities(group, conn, layout, ptr, stopwatch)? as i32; } Ok(count) } fn insert_entities<'a>( &'a self, - entity_type: &'a EntityType, - data: &'a [(&'a EntityKey, &'a Entity)], + group: &'a RowGroup, conn: &PgConnection, layout: &'a Layout, ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let section = stopwatch.start_section("check_interface_entity_uniqueness"); - for (key, _) in data.iter() { + for row in group.rows.iter() { // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness(conn, layout, key)?; + self.check_interface_entity_uniqueness(conn, layout, &row.key)?; } section.end(); let _section = stopwatch.start_section("apply_entity_modifications_insert"); - layout.insert(conn, entity_type, data, block_number(ptr), stopwatch) + layout.insert(conn, group, block_number(ptr), stopwatch) } fn overwrite_entities<'a>( &'a self, - entity_type: &'a EntityType, - data: &'a [(&'a EntityKey, &'a Entity)], + group: &'a RowGroup, conn: &PgConnection, layout: &'a Layout, ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let section = stopwatch.start_section("check_interface_entity_uniqueness"); - for (key, _) in data.iter() { + for row in group.rows.iter() { // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness(conn, layout, key)?; + self.check_interface_entity_uniqueness(conn, layout, &row.key)?; } section.end(); let _section = stopwatch.start_section("apply_entity_modifications_update"); - layout.update(conn, entity_type, data, block_number(ptr), stopwatch) + layout.update(conn, group, block_number(ptr), stopwatch) } fn remove_entities( &self, - entity_type: &EntityType, - entity_keys: &[&str], + group: &RowGroup, conn: &PgConnection, layout: &Layout, ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let _section = stopwatch.start_section("apply_entity_modifications_delete"); - layout.delete(conn, entity_type, entity_keys, block_number(ptr), stopwatch) + layout.delete(conn, group, block_number(ptr), stopwatch) } /// Execute a closure with a connection to the database. @@ -1148,14 +1117,9 @@ impl DeploymentStore { self: &Arc, logger: &Logger, site: Arc, - block_ptr_to: &BlockPtr, - firehose_cursor: &FirehoseCursor, - mods: &[EntityModification], + batch: &Batch, stopwatch: &StopwatchMetrics, - data_sources: &[StoredDynamicDataSource], - deterministic_errors: &[SubgraphError], manifest_idx_and_name: &[(u32, String)], - processed_data_sources: &[StoredDynamicDataSource], ) -> Result { let conn = { let _section = stopwatch.start_section("transact_blocks_get_conn"); @@ -1166,7 +1130,7 @@ impl DeploymentStore { // wait with sending it until we have done all our other work // so that we do not hold a lock on the notification queue // for longer than we have to - let event: StoreEvent = StoreEvent::from_mods(&site.deployment, mods); + let event: StoreEvent = batch.store_event(&site.deployment); let (layout, earliest_block) = deployment::with_lock(&conn, &site, || { conn.transaction(|| -> Result<_, StoreError> { @@ -1177,8 +1141,10 @@ impl DeploymentStore { let count = self.apply_entity_modifications( &conn, layout.as_ref(), - mods, - block_ptr_to, + &batch.inserts, + &batch.overwrites, + &batch.removes, + &batch.block_ptr, stopwatch, )?; section.end(); @@ -1186,30 +1152,35 @@ impl DeploymentStore { dynds::insert( &conn, &site, - data_sources, - block_ptr_to, + &batch.data_sources, + &batch.block_ptr, manifest_idx_and_name, )?; - dynds::update_offchain_status(&conn, &site, processed_data_sources)?; + dynds::update_offchain_status(&conn, &site, &batch.offchain_to_remove)?; - if !deterministic_errors.is_empty() { + if !batch.deterministic_errors.is_empty() { deployment::insert_subgraph_errors( &conn, &site.deployment, - deterministic_errors, - block_ptr_to.block_number(), + &batch.deterministic_errors, + batch.block_ptr.number, )?; } - let earliest_block = - deployment::transact_block(&conn, &site, block_ptr_to, firehose_cursor, count)?; + let earliest_block = deployment::transact_block( + &conn, + &site, + &batch.block_ptr, + &batch.firehose_cursor, + count, + )?; Ok((layout, earliest_block)) }) })?; - if block_ptr_to.number as f64 + if batch.block_ptr.number as f64 > earliest_block as f64 + layout.history_blocks as f64 * ENV_VARS.store.history_slack_factor { @@ -1222,7 +1193,7 @@ impl DeploymentStore { site, layout.history_blocks, earliest_block, - block_ptr_to.number, + batch.block_ptr.number, )?; } diff --git a/store/postgres/src/dynds/mod.rs b/store/postgres/src/dynds/mod.rs index 5f6dd273964..5731aed07f1 100644 --- a/store/postgres/src/dynds/mod.rs +++ b/store/postgres/src/dynds/mod.rs @@ -7,7 +7,7 @@ use crate::primary::Site; use diesel::PgConnection; use graph::{ blockchain::BlockPtr, - components::store::StoredDynamicDataSource, + components::store::{write, StoredDynamicDataSource}, constraint_violation, data_source::CausalityRegion, prelude::{BlockNumber, StoreError}, @@ -28,16 +28,12 @@ pub fn load( pub(crate) fn insert( conn: &PgConnection, site: &Site, - data_sources: &[StoredDynamicDataSource], + data_sources: &write::DataSources, block_ptr: &BlockPtr, manifest_idx_and_name: &[(u32, String)], ) -> Result { match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).insert( - conn, - data_sources, - block_ptr.number, - ), + true => DataSourcesTable::new(site.namespace.clone()).insert(conn, data_sources), false => shared::insert( conn, &site.deployment, @@ -62,7 +58,7 @@ pub(crate) fn revert( pub(crate) fn update_offchain_status( conn: &PgConnection, site: &Site, - data_sources: &[StoredDynamicDataSource], + data_sources: &write::DataSources, ) -> Result<(), StoreError> { if data_sources.is_empty() { return Ok(()); diff --git a/store/postgres/src/dynds/private.rs b/store/postgres/src/dynds/private.rs index 98568dc8c5f..08fc9f87272 100644 --- a/store/postgres/src/dynds/private.rs +++ b/store/postgres/src/dynds/private.rs @@ -10,7 +10,7 @@ use diesel::{ use graph::{ anyhow::Context, - components::store::StoredDynamicDataSource, + components::store::{write, StoredDynamicDataSource}, constraint_violation, data_source::CausalityRegion, prelude::{serde_json, BlockNumber, StoreError}, @@ -144,48 +144,49 @@ impl DataSourcesTable { pub(crate) fn insert( &self, conn: &PgConnection, - data_sources: &[StoredDynamicDataSource], - block: BlockNumber, + data_sources: &write::DataSources, ) -> Result { let mut inserted_total = 0; - for ds in data_sources { - let StoredDynamicDataSource { - manifest_idx, - param, - context, - creation_block, - done_at, - causality_region, - } = ds; - - if creation_block != &Some(block) { - return Err(constraint_violation!( - "mismatching creation blocks `{:?}` and `{}`", + for (block_ptr, dss) in &data_sources.entries { + let block = block_ptr.number; + for ds in dss { + let StoredDynamicDataSource { + manifest_idx, + param, + context, creation_block, - block - )); - } + done_at, + causality_region, + } = ds; - // Offchain data sources have a unique causality region assigned from a sequence in the - // database, while onchain data sources always have causality region 0. - let query = format!( + if creation_block != &Some(block) { + return Err(constraint_violation!( + "mismatching creation blocks `{:?}` and `{}`", + creation_block, + block + )); + } + + // Offchain data sources have a unique causality region assigned from a sequence in the + // database, while onchain data sources always have causality region 0. + let query = format!( "insert into {}(block_range, manifest_idx, param, context, causality_region, done_at) \ values (int4range($1, null), $2, $3, $4, $5, $6)", self.qname ); - let query = sql_query(query) - .bind::, _>(creation_block) - .bind::(*manifest_idx as i32) - .bind::, _>(param.as_ref().map(|p| &**p)) - .bind::, _>(context) - .bind::(causality_region) - .bind::, _>(done_at); + let query = sql_query(query) + .bind::, _>(creation_block) + .bind::(*manifest_idx as i32) + .bind::, _>(param.as_ref().map(|p| &**p)) + .bind::, _>(context) + .bind::(causality_region) + .bind::, _>(done_at); - inserted_total += query.execute(conn)?; + inserted_total += query.execute(conn)?; + } } - Ok(inserted_total) } @@ -294,25 +295,27 @@ impl DataSourcesTable { pub(super) fn update_offchain_status( &self, conn: &PgConnection, - data_sources: &[StoredDynamicDataSource], + data_sources: &write::DataSources, ) -> Result<(), StoreError> { - for ds in data_sources { - let query = format!( - "update {} set done_at = $1 where causality_region = $2", - self.qname - ); - - let count = sql_query(query) - .bind::, _>(ds.done_at) - .bind::(ds.causality_region) - .execute(conn)?; - - if count > 1 { - return Err(constraint_violation!( + for (_, dss) in &data_sources.entries { + for ds in dss { + let query = format!( + "update {} set done_at = $1 where causality_region = $2", + self.qname + ); + + let count = sql_query(query) + .bind::, _>(ds.done_at) + .bind::(ds.causality_region) + .execute(conn)?; + + if count > 1 { + return Err(constraint_violation!( "expected to remove at most one offchain data source but would remove {}, causality region: {}", count, ds.causality_region )); + } } } diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index 45f3108806a..c3e5a77919e 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -10,7 +10,7 @@ use diesel::{ use diesel::{insert_into, pg::PgConnection}; use graph::{ - components::store::StoredDynamicDataSource, + components::store::{write, StoredDynamicDataSource}, constraint_violation, data_source::CausalityRegion, prelude::{ @@ -103,7 +103,7 @@ pub(super) fn load( pub(super) fn insert( conn: &PgConnection, deployment: &DeploymentHash, - data_sources: &[StoredDynamicDataSource], + data_sources: &write::DataSources, block_ptr: &BlockPtr, manifest_idx_and_name: &[(u32, String)], ) -> Result { @@ -115,50 +115,56 @@ pub(super) fn insert( } let dds: Vec<_> = data_sources + .entries .iter() - .map(|ds| { - let StoredDynamicDataSource { - manifest_idx: _, - param, - context, - creation_block: _, - done_at: _, - causality_region, - } = ds; - - if causality_region != &CausalityRegion::ONCHAIN { - return Err(constraint_violation!( - "using shared data source schema with file data sources" - )); - } - - let address = match param { - Some(param) => param, - None => { + .map(|(_, dds)| { + dds.iter().map(|ds| { + let StoredDynamicDataSource { + manifest_idx: _, + param, + context, + creation_block: _, + done_at: _, + causality_region, + } = ds; + + if causality_region != &CausalityRegion::ONCHAIN { return Err(constraint_violation!( - "dynamic data sources must have an addres", + "using shared data source schema with file data sources" )); } - }; - let name = manifest_idx_and_name - .iter() - .find(|(idx, _)| *idx == ds.manifest_idx) - .ok_or_else(|| constraint_violation!("manifest idx {} not found", ds.manifest_idx))? - .1 - .clone(); - Ok(( - decds::deployment.eq(deployment.as_str()), - decds::name.eq(name), - decds::context.eq(context - .as_ref() - .map(|ctx| serde_json::to_string(ctx).unwrap())), - decds::address.eq(&**address), - decds::abi.eq(""), - decds::start_block.eq(0), - decds::ethereum_block_number.eq(sql(&format!("{}::numeric", block_ptr.number))), - decds::ethereum_block_hash.eq(block_ptr.hash_slice()), - )) + + let address = match param { + Some(param) => param, + None => { + return Err(constraint_violation!( + "dynamic data sources must have an addres", + )); + } + }; + let name = manifest_idx_and_name + .iter() + .find(|(idx, _)| *idx == ds.manifest_idx) + .ok_or_else(|| { + constraint_violation!("manifest idx {} not found", ds.manifest_idx) + })? + .1 + .clone(); + Ok(( + decds::deployment.eq(deployment.as_str()), + decds::name.eq(name), + decds::context.eq(context + .as_ref() + .map(|ctx| serde_json::to_string(ctx).unwrap())), + decds::address.eq(&**address), + decds::abi.eq(""), + decds::start_block.eq(0), + decds::ethereum_block_number.eq(sql(&format!("{}::numeric", block_ptr.number))), + decds::ethereum_block_hash.eq(block_ptr.hash_slice()), + )) + }) }) + .flatten() .collect::>()?; insert_into(decds::table) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 6111ff6ac8b..d0220af1826 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -24,6 +24,7 @@ use diesel::types::{FromSql, ToSql}; use diesel::{connection::SimpleConnection, Connection}; use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; use graph::cheap_clone::CheapClone; +use graph::components::store::write::{EntityWrite, RowGroup}; use graph::constraint_violation; use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; @@ -33,6 +34,7 @@ use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME}; use graph::slog::warn; use inflector::Inflector; +use itertools::Itertools; use lazy_static::lazy_static; use std::borrow::Borrow; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; @@ -652,19 +654,18 @@ impl Layout { pub fn insert<'a>( &'a self, conn: &PgConnection, - entity_type: &'a EntityType, - entities: &'a [(&'a EntityKey, &'a Entity)], + group: &'a RowGroup, block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { - let table = self.table_for_entity(entity_type)?; + let table = self.table_for_entity(&group.entity_type)?; let _section = stopwatch.start_section("insert_modification_insert_query"); let mut count = 0; // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in entities.chunks(chunk_size) { + for chunk in group.rows.chunks(chunk_size) { count += InsertQuery::new(table, chunk, block)? .get_results(conn) .map(|ids| ids.len())? @@ -800,28 +801,29 @@ impl Layout { pub fn update<'a>( &'a self, conn: &PgConnection, - entity_type: &'a EntityType, - entities: &'a [(&'a EntityKey, &'a Entity)], + group: &'a RowGroup, block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { - let table = self.table_for_entity(entity_type)?; + let table = self.table_for_entity(&group.entity_type)?; if table.immutable { - let ids = entities + let ids = group + .rows .iter() - .map(|(key, _)| key.entity_id.as_str()) + .map(|row| row.key.entity_id.as_str()) .collect::>() .join(", "); return Err(constraint_violation!( "entities of type `{}` can not be updated since they are immutable. Entity ids are [{}]", - entity_type, + group.entity_type, ids )); } - let entity_keys: Vec<&str> = entities + let entity_keys: Vec<&str> = group + .rows .iter() - .map(|(key, _)| key.entity_id.as_str()) + .map(|row| row.key.entity_id.as_str()) .collect(); let section = stopwatch.start_section("update_modification_clamp_range_query"); @@ -834,7 +836,7 @@ impl Layout { // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in entities.chunks(chunk_size) { + for chunk in group.rows.chunks(chunk_size) { count += InsertQuery::new(table, chunk, block)?.execute(conn)?; } Ok(count) @@ -843,22 +845,26 @@ impl Layout { pub fn delete( &self, conn: &PgConnection, - entity_type: &EntityType, - entity_ids: &[&str], + group: &RowGroup, block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { - let table = self.table_for_entity(entity_type)?; + let table = self.table_for_entity(&group.entity_type)?; if table.immutable { return Err(constraint_violation!( "entities of type `{}` can not be deleted since they are immutable. Entity ids are [{}]", - entity_type, entity_ids.join(", ") + table.object, group.rows.iter().map(|key| &key.entity_id).join(", ") )); } let _section = stopwatch.start_section("delete_modification_clamp_range_query"); let mut count = 0; - for chunk in entity_ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { + let ids: Vec<_> = group + .rows + .iter() + .map(|key| key.entity_id.as_str()) + .collect(); + for chunk in ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { count += ClampRangeQuery::new(table, chunk, block)?.execute(conn)? } Ok(count) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 033aaf2fa84..cfbf30df2a7 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -12,6 +12,7 @@ use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; use diesel::Connection; +use graph::components::store::write::EntityWrite; use graph::components::store::{DerivedEntityQuery, EntityKey}; use graph::data::store::NULL; use graph::data::value::{Object, Word}; @@ -1751,15 +1752,15 @@ impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} struct FulltextValues<'a>(HashMap>); impl<'a> FulltextValues<'a> { - fn new(table: &'a Table, entities: &'a [(&'a EntityKey, &'a Entity)]) -> Self { + fn new(table: &'a Table, rows: &'a [EntityWrite]) -> Self { let mut map = HashMap::new(); for column in table.columns.iter().filter(|column| column.is_fulltext()) { - for (_, entity) in entities { + for row in rows { let mut fulltext = Vec::new(); if let Some(fields) = column.fulltext_fields.as_ref() { let fulltext_field_values = fields .iter() - .filter_map(|field| entity.get(field)) + .filter_map(|field| row.data.get(field)) .cloned() .collect::>(); if !fulltext_field_values.is_empty() { @@ -1767,7 +1768,7 @@ impl<'a> FulltextValues<'a> { } } if !fulltext.is_empty() { - map.insert(entity.id(), fulltext); + map.insert(row.data.id(), fulltext); } } } @@ -1790,7 +1791,7 @@ impl<'a> FulltextValues<'a> { #[derive(Debug)] pub struct InsertQuery<'a> { table: &'a Table, - entities: &'a [(&'a EntityKey, &'a Entity)], + rows: &'a [EntityWrite], fulltext_values: FulltextValues<'a>, unique_columns: Vec<&'a Column>, br_column: BlockRangeColumn<'a>, @@ -1799,29 +1800,29 @@ pub struct InsertQuery<'a> { impl<'a> InsertQuery<'a> { pub fn new( table: &'a Table, - entities: &'a [(&'a EntityKey, &'a Entity)], + rows: &'a [EntityWrite], block: BlockNumber, ) -> Result, StoreError> { - for (entity_key, entity) in entities { + for row in rows { for column in table.columns.iter() { - if !column.is_nullable() && !entity.contains_key(&column.field) { + if !column.is_nullable() && !row.data.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( "can not insert entity {}[{}] since value for non-nullable attribute {} is missing. \ To fix this, mark the attribute as nullable in the GraphQL schema or change the \ mapping code to always set this attribute.", - entity_key.entity_type, entity_key.entity_id, column.field + table.object, row.data.id(), column.field ))); } } } - let fulltext_values = FulltextValues::new(table, entities); - let unique_columns = InsertQuery::unique_columns(table, entities, &fulltext_values); + let fulltext_values = FulltextValues::new(table, rows); + let unique_columns = InsertQuery::unique_columns(table, rows, &fulltext_values); let br_column = BlockRangeColumn::new(table, "", block); Ok(InsertQuery { table, - entities, + rows, fulltext_values, unique_columns, br_column, @@ -1831,18 +1832,18 @@ impl<'a> InsertQuery<'a> { /// Build the column name list using the subset of all keys among present entities. fn unique_columns( table: &'a Table, - entities: &'a [(&'a EntityKey, &'a Entity)], + rows: &'a [EntityWrite], fulltext_values: &FulltextValues<'a>, ) -> Vec<&'a Column> { table .columns .iter() .filter(|column| { - entities.iter().any(|(_, entity)| { + rows.iter().any(|row| { if column.is_fulltext() { - !fulltext_values.get(&entity.id(), &column.field).is_null() + !fulltext_values.get(&row.data.id(), &column.field).is_null() } else { - entity.get(&column.field).is_some() + row.data.get(&column.field).is_some() } }) }) @@ -1903,14 +1904,14 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(") values\n"); // Use a `Peekable` iterator to help us decide how to finalize each line. - let mut iter = self.entities.iter().peekable(); - while let Some((key, entity)) = iter.next() { + let mut iter = self.rows.iter().peekable(); + while let Some(row) = iter.next() { out.push_sql("("); for column in &self.unique_columns { let value = if column.is_fulltext() { - self.fulltext_values.get(&key.entity_id, &column.field) + self.fulltext_values.get(&row.key.entity_id, &column.field) } else { - entity.get(&column.field).unwrap_or(&NULL) + row.data.get(&column.field).unwrap_or(&NULL) }; QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); @@ -1918,7 +1919,7 @@ impl<'a> QueryFragment for InsertQuery<'a> { self.br_column.literal_range_current(&mut out)?; if self.table.has_causality_region { out.push_sql(", "); - out.push_bind_param::(&key.causality_region)?; + out.push_bind_param::(&row.key.causality_region)?; }; out.push_sql(")"); diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 732985b30ea..d9b4a1918c0 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -4,7 +4,9 @@ use std::sync::Mutex; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::{DeploymentCursorTracker, DerivedEntityQuery, EntityKey, ReadStore}; +use graph::components::store::{ + Batch, DeploymentCursorTracker, DerivedEntityQuery, EntityKey, ReadStore, +}; use graph::constraint_violation; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; @@ -18,7 +20,7 @@ use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ cheap_clone::CheapClone, - components::store::{self, EntityType, WritableStore as WritableStoreTrait}, + components::store::{self, write::EntityOp, EntityType, WritableStore as WritableStoreTrait}, data::subgraph::schema::SubgraphError, prelude::{ BlockPtr, DeploymentHash, EntityModification, Error, Logger, StopwatchMetrics, StoreError, @@ -208,26 +210,16 @@ impl SyncStore { fn transact_block_operations( &self, - block_ptr_to: &BlockPtr, - firehose_cursor: &FirehoseCursor, - mods: &[EntityModification], + batch: &Batch, stopwatch: &StopwatchMetrics, - data_sources: &[StoredDynamicDataSource], - deterministic_errors: &[SubgraphError], - processed_data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { retry::forever(&self.logger, "transact_block_operations", move || { let event = self.writable.transact_block_operations( &self.logger, self.site.clone(), - block_ptr_to, - firehose_cursor, - mods, + batch, stopwatch, - data_sources, - deterministic_errors, &self.manifest_idx_and_name, - processed_data_sources, )?; let _section = stopwatch.start_section("send_store_event"); @@ -402,8 +394,8 @@ impl BlockTracker { fn update(&mut self, req: &Request) { match req { - Request::Write { block_ptr, .. } => { - self.block = self.block.min(block_ptr.number - 1); + Request::Write { batch, .. } => { + self.block = self.block.min(batch.block_ptr.number - 1); } Request::RevertTo { block_ptr, .. } => { // `block_ptr` is the block pointer we are reverting _to_, @@ -435,13 +427,7 @@ enum Request { Write { store: Arc, stopwatch: StopwatchMetrics, - /// The block at which we are writing the changes - block_ptr: BlockPtr, - firehose_cursor: FirehoseCursor, - mods: Vec, - data_sources: Vec, - deterministic_errors: Vec, - processed_data_sources: Vec, + batch: Batch, }, RevertTo { store: Arc, @@ -455,17 +441,12 @@ enum Request { impl std::fmt::Debug for Request { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::Write { - block_ptr, - mods, - store, - .. - } => write!( + Self::Write { batch, store, .. } => write!( f, "write[{}, {:p}, {} entities]", - block_ptr.number, + batch.block_ptr.number, store.as_ref(), - mods.len() + batch.entity_count() ), Self::RevertTo { block_ptr, store, .. @@ -484,24 +465,11 @@ impl Request { fn execute(&self) -> Result { match self { Request::Write { + batch, store, stopwatch, - block_ptr: block_ptr_to, - firehose_cursor, - mods, - data_sources, - deterministic_errors, - processed_data_sources, } => store - .transact_block_operations( - block_ptr_to, - firehose_cursor, - mods, - stopwatch, - data_sources, - deterministic_errors, - processed_data_sources, - ) + .transact_block_operations(batch, stopwatch) .map(|()| ExecResult::Continue), Request::RevertTo { store, @@ -725,6 +693,15 @@ impl Queue { Remove, } + impl<'a> From> for Op { + fn from(value: EntityOp) -> Self { + match value { + EntityOp::Write(entity) => Self::Write(entity.clone()), + EntityOp::Remove => Self::Remove, + } + } + } + // Going from newest to oldest entry in the queue as `find_map` does // ensures that we see reverts before we see the corresponding write // request. We ignore any write request that writes blocks that have @@ -736,19 +713,11 @@ impl Queue { let op = self.queue.find_map(|req| { tracker.update(req.as_ref()); match req.as_ref() { - Request::Write { - block_ptr, mods, .. - } => { - if tracker.visible(block_ptr) { - mods.iter() - .find(|emod| emod.entity_ref() == key) - .map(|emod| match emod { - EntityModification::Insert { data, .. } - | EntityModification::Overwrite { data, .. } => { - Op::Write(data.clone()) - } - EntityModification::Remove { .. } => Op::Remove, - }) + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + batch + .last_op(&key.entity_type, &key.entity_id) + .map(Op::from) } else { None } @@ -778,15 +747,18 @@ impl Queue { |mut map: BTreeMap>, req| { tracker.update(req.as_ref()); match req.as_ref() { - Request::Write { - block_ptr, mods, .. - } => { - if tracker.visible(block_ptr) { - for emod in mods { - let key = emod.entity_ref(); - // The key must be removed to avoid overwriting it with a stale value. - if let Some(key) = keys.take(key) { - map.insert(key, emod.entity().cloned()); + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + // See if we have changes for any of the keys. + for key in &keys { + match batch.last_op(&key.entity_type, &key.entity_id) { + Some(EntityOp::Write(entity)) => { + map.insert(key.clone(), Some(entity.clone())); + } + Some(EntityOp::Remove) => { + map.insert(key.clone(), None); + } + None => { /* nothing to do */ } } } } @@ -797,7 +769,8 @@ impl Queue { }, ); - // Whatever remains in `keys` needs to be gotten from the store + // Look entities for the remaining keys up in the store + keys.retain(|key| !entities_in_queue.contains_key(key)); let mut map = self.store.get_many(keys, tracker.query_block())?; // Extend the store results with the entities from the queue. @@ -823,32 +796,21 @@ impl Queue { |mut map: BTreeMap>, req| { tracker.update(req.as_ref()); match req.as_ref() { - Request::Write { - block_ptr, mods, .. - } => { - if tracker.visible(block_ptr) { - for emod in mods { - let key = emod.entity_ref(); - // we select just the entities that match the query - if derived_query.entity_type == key.entity_type { - if let Some(entity) = emod.entity().cloned() { - if let Some(related_id) = - entity.get(derived_query.entity_field.as_str()) - { - // we check only the field agains the value - if related_id.to_string() - == derived_query.value.to_string() - { - map.insert(key.clone(), Some(entity)); - } - } - } else { - // if the entity was deleted, we add here with no checks - // just for removing from the query - map.insert(key.clone(), emod.entity().cloned()); + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + for (key, entity) in batch.writes(&derived_query.entity_type) { + if let Some(related_id) = + entity.get(derived_query.entity_field.as_str()) + { + // we check only the field against the value + if related_id.as_str() == Some(&derived_query.value) { + map.insert(key.clone(), Some(entity.clone())); } } } + for key in batch.removes(&derived_query.entity_type) { + map.insert(key.clone(), None); + } } } Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } @@ -892,15 +854,9 @@ impl Queue { let mut queue_dds = self.queue.fold(Vec::new(), |mut dds, req| { tracker.update(req.as_ref()); match req.as_ref() { - Request::Write { - block_ptr, - data_sources, - processed_data_sources, - .. - } => { - if tracker.visible(block_ptr) { - dds.extend(data_sources.clone()); - dds.retain(|dds| !processed_data_sources.contains(dds)); + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + dds.extend(batch.new_data_sources().cloned()); } } Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } @@ -974,37 +930,15 @@ impl Writer { } } - async fn write( - &self, - block_ptr_to: BlockPtr, - firehose_cursor: FirehoseCursor, - mods: Vec, - stopwatch: &StopwatchMetrics, - data_sources: Vec, - deterministic_errors: Vec, - processed_data_sources: Vec, - ) -> Result<(), StoreError> { + async fn write(&self, batch: Batch, stopwatch: &StopwatchMetrics) -> Result<(), StoreError> { match self { - Writer::Sync(store) => store.transact_block_operations( - &block_ptr_to, - &firehose_cursor, - &mods, - stopwatch, - &data_sources, - &deterministic_errors, - &processed_data_sources, - ), + Writer::Sync(store) => store.transact_block_operations(&batch, stopwatch), Writer::Async { queue, .. } => { self.check_queue_running()?; let req = Request::Write { store: queue.store.cheap_clone(), stopwatch: queue.stopwatch.cheap_clone(), - block_ptr: block_ptr_to, - firehose_cursor, - mods, - data_sources, - deterministic_errors, - processed_data_sources, + batch, }; queue.push(req).await } @@ -1258,17 +1192,15 @@ impl WritableStoreTrait for WritableStore { deterministic_errors: Vec, processed_data_sources: Vec, ) -> Result<(), StoreError> { - self.writer - .write( - block_ptr_to.clone(), - firehose_cursor.clone(), - mods, - stopwatch, - data_sources, - deterministic_errors, - processed_data_sources, - ) - .await?; + let batch = Batch::new( + block_ptr_to.clone(), + firehose_cursor.clone(), + mods, + data_sources, + deterministic_errors, + processed_data_sources, + ); + self.writer.write(batch, stopwatch).await?; *self.block_ptr.lock().unwrap() = Some(block_ptr_to); *self.block_cursor.lock().unwrap() = firehose_cursor; diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index d14ac53e028..3a6fc11d3d1 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -3,6 +3,7 @@ use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; +use graph::data::value::Word; use graph::entity; use graph::prelude::{ o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, @@ -33,6 +34,8 @@ use graph_store_postgres::{ use test_store::*; +use crate::postgres::relational_bytes::{row_group, row_group_key}; + const THINGS_GQL: &str = r#" type _Schema_ @fulltext( name: "userSearch" @@ -232,14 +235,9 @@ fn insert_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); + let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); let inserted = layout - .insert( - conn, - &entity_type, - &entities_with_keys, - block, - &MOCK_STOPWATCH, - ) + .insert(conn, &group, block, &MOCK_STOPWATCH) .expect(&errmsg); assert_eq!(inserted, entities_with_keys_owned.len()); } @@ -272,15 +270,9 @@ fn update_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - + let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); let updated = layout - .update( - conn, - &entity_type, - &entities_with_keys, - block, - &MOCK_STOPWATCH, - ) + .update(conn, &group, block, &MOCK_STOPWATCH) .expect(&errmsg); assert_eq!(updated, entities_with_keys_owned.len()); } @@ -586,9 +578,10 @@ fn update() { let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); - let entities = vec![(&key, &entity)]; + let entities = vec![(key, entity.clone())]; + let group = row_group(&entity_type, 0, entities); layout - .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) + .update(conn, &group, 0, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout @@ -640,10 +633,10 @@ fn update_many() { .collect(); let entities_vec = vec![one, two, three]; - let entities: Vec<(&EntityKey, &Entity)> = keys.iter().zip(entities_vec.iter()).collect(); - + let entities: Vec<_> = keys.into_iter().zip(entities_vec.into_iter()).collect(); + let group = row_group(&entity_type, 0, entities); layout - .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) + .update(conn, &group, 0, &MOCK_STOPWATCH) .expect("Failed to update"); // check updates took effect @@ -709,9 +702,10 @@ fn serialize_bigdecimal() { let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); - let entities = vec![(&key, &entity)]; + let entities = vec![(key, entity.clone())]; + let group = row_group(&entity_type, 0, entities); layout - .update(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) + .update(conn, &group, 0, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout @@ -754,9 +748,10 @@ fn delete() { // Delete where nothing is getting deleted let key = EntityKey::data("Scalar".to_owned(), "no such entity".to_owned()); let entity_type = EntityType::from("Scalar"); - let mut entity_keys = vec![key.entity_id.as_str()]; + let mut entity_keys = vec![key]; + let group = row_group_key(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(0, count); assert_eq!(2, count_scalar_entities(conn, layout)); @@ -764,11 +759,12 @@ fn delete() { // Delete entity two entity_keys .get_mut(0) - .map(|key| *key = "two") + .map(|key| key.entity_id = Word::from("two")) .expect("Failed to update key"); + let group = row_group_key(&entity_type, 1, entity_keys); let count = layout - .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(1, count); assert_eq!(1, count_scalar_entities(conn, layout)); @@ -790,9 +786,13 @@ fn insert_many_and_delete_many() { // Delete entities with ids equal to "two" and "three" let entity_type = EntityType::from("Scalar"); - let entity_keys = vec!["two", "three"]; + let entity_keys: Vec<_> = vec!["two", "three"] + .into_iter() + .map(|key| EntityKey::data(entity_type.as_str(), key)) + .collect(); + let group = row_group_key(&entity_type, 1, entity_keys); let num_removed = layout - .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(2, num_removed); assert_eq!(1, count_scalar_entities(conn, layout)); diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index ba0376aff68..7eddc8c0e72 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -1,12 +1,13 @@ //! Test relational schemas that use `Bytes` to store ids use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; +use graph::components::store::write::{EntityWrite, RowGroup}; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; -use graph::prelude::{EntityQuery, MetricsRegistry}; +use graph::prelude::{BlockNumber, EntityQuery, MetricsRegistry}; use graph::schema::InputSchema; use hex_literal::hex; use lazy_static::lazy_static; @@ -78,14 +79,39 @@ fn remove_test_data(conn: &PgConnection) { .expect("Failed to drop test schema"); } +pub fn row_group( + entity_type: &EntityType, + block: BlockNumber, + data: impl IntoIterator, +) -> RowGroup { + let mut group = RowGroup::new(entity_type.clone()); + for (key, entity) in data { + group.push(EntityWrite::new(key, entity, block)); + } + group +} + +pub fn row_group_key( + entity_type: &EntityType, + _block: BlockNumber, + data: impl IntoIterator, +) -> RowGroup { + let mut group = RowGroup::new(entity_type.clone()); + for key in data { + group.push(key); + } + group +} + fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity: Entity) { let key = EntityKey::data(entity_type.to_owned(), entity.id()); let entity_type = EntityType::from(entity_type); - let entities = vec![(&key, &entity)]; + let entities = vec![(key.clone(), entity)]; + let group = row_group(&entity_type, 0, entities); let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); layout - .insert(conn, &entity_type, &entities, 0, &MOCK_STOPWATCH) + .insert(conn, &group, 0, &MOCK_STOPWATCH) .expect(&errmsg); } @@ -296,9 +322,10 @@ fn update() { let entity_id = entity.id(); let entity_type = key.entity_type.clone(); - let entities = vec![(&key, &entity)]; + let entities = vec![(key, entity.clone())]; + let group = row_group(&entity_type, 1, entities); layout - .update(conn, &entity_type, &entities, 1, &MOCK_STOPWATCH) + .update(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout @@ -327,19 +354,21 @@ fn delete() { // Delete where nothing is getting deleted let key = EntityKey::data("Thing".to_owned(), "ffff".to_owned()); let entity_type = key.entity_type.clone(); - let mut entity_keys = vec![key.entity_id.as_str()]; + let mut entity_keys = vec![key.clone()]; + let group = row_group_key(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(0, count); // Delete entity two entity_keys .get_mut(0) - .map(|key| *key = TWO_ID) + .map(|key| key.entity_id = Word::from(TWO_ID)) .expect("Failed to update entity types"); + let group = row_group_key(&entity_type, 1, entity_keys); let count = layout - .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &group, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(1, count); }); From 3803ae73f9bb2bc64d3e45abb9be7cce1c093e28 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 25 Apr 2023 16:25:30 -0700 Subject: [PATCH 0229/2104] store: Allow using a different block for each entity on insert --- store/postgres/src/block_range.rs | 12 ------- store/postgres/src/deployment_store.rs | 5 ++- store/postgres/src/relational.rs | 5 ++- store/postgres/src/relational_queries.rs | 32 ++++++++++++------- store/test-store/tests/postgres/relational.rs | 4 +-- .../tests/postgres/relational_bytes.rs | 4 +-- 6 files changed, 27 insertions(+), 35 deletions(-) diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index a8d85e4c21c..98eeea144af 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -235,18 +235,6 @@ impl<'a> BlockRangeColumn<'a> { } } - /// Output the literal value of the block range `[block,..)`, mostly for - /// generating an insert statement containing the block range column - pub fn literal_range_current(&self, out: &mut AstPass) -> QueryResult<()> { - match self { - BlockRangeColumn::Mutable { block, .. } => { - let block_range: BlockRange = (*block..).into(); - out.push_bind_param::, _>(&block_range) - } - BlockRangeColumn::Immutable { block, .. } => out.push_bind_param::(block), - } - } - /// Output an expression that matches rows that are the latest version /// of their entity pub fn latest(&self, out: &mut AstPass) { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 3453d51c7a0..2b9577f8f9c 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -331,7 +331,7 @@ impl DeploymentStore { // Apply modification groups. // Inserts: for group in &inserts.groups { - count += self.insert_entities(group, conn, layout, ptr, stopwatch)? as i32 + count += self.insert_entities(group, conn, layout, stopwatch)? as i32 } // Overwrites: @@ -352,7 +352,6 @@ impl DeploymentStore { group: &'a RowGroup, conn: &PgConnection, layout: &'a Layout, - ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let section = stopwatch.start_section("check_interface_entity_uniqueness"); @@ -363,7 +362,7 @@ impl DeploymentStore { section.end(); let _section = stopwatch.start_section("apply_entity_modifications_insert"); - layout.insert(conn, group, block_number(ptr), stopwatch) + layout.insert(conn, group, stopwatch) } fn overwrite_entities<'a>( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index d0220af1826..b456d4eede1 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -655,7 +655,6 @@ impl Layout { &'a self, conn: &PgConnection, group: &'a RowGroup, - block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; @@ -666,7 +665,7 @@ impl Layout { // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); for chunk in group.rows.chunks(chunk_size) { - count += InsertQuery::new(table, chunk, block)? + count += InsertQuery::new(table, chunk)? .get_results(conn) .map(|ids| ids.len())? } @@ -837,7 +836,7 @@ impl Layout { // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); for chunk in group.rows.chunks(chunk_size) { - count += InsertQuery::new(table, chunk, block)?.execute(conn)?; + count += InsertQuery::new(table, chunk)?.execute(conn)?; } Ok(count) } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index cfbf30df2a7..4f3ef87e73b 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -9,7 +9,7 @@ use diesel::pg::{Pg, PgConnection}; use diesel::query_builder::{AstPass, QueryFragment, QueryId}; use diesel::query_dsl::{LoadQuery, RunQueryDsl}; use diesel::result::{Error as DieselError, QueryResult}; -use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Text}; +use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Range, Text}; use diesel::Connection; use graph::components::store::write::EntityWrite; @@ -34,6 +34,7 @@ use std::fmt::{self, Display}; use std::iter::FromIterator; use std::str::FromStr; +use crate::block_range::BlockRange; use crate::relational::{ Column, ColumnType, IdType, Layout, SqlName, Table, BYTE_ARRAY_PREFIX_SIZE, PRIMARY_KEY_COLUMN, STRING_PREFIX_SIZE, @@ -1794,15 +1795,10 @@ pub struct InsertQuery<'a> { rows: &'a [EntityWrite], fulltext_values: FulltextValues<'a>, unique_columns: Vec<&'a Column>, - br_column: BlockRangeColumn<'a>, } impl<'a> InsertQuery<'a> { - pub fn new( - table: &'a Table, - rows: &'a [EntityWrite], - block: BlockNumber, - ) -> Result, StoreError> { + pub fn new(table: &'a Table, rows: &'a [EntityWrite]) -> Result, StoreError> { for row in rows { for column in table.columns.iter() { if !column.is_nullable() && !row.data.contains_key(&column.field) { @@ -1818,14 +1814,12 @@ impl<'a> InsertQuery<'a> { let fulltext_values = FulltextValues::new(table, rows); let unique_columns = InsertQuery::unique_columns(table, rows, &fulltext_values); - let br_column = BlockRangeColumn::new(table, "", block); Ok(InsertQuery { table, rows, fulltext_values, unique_columns, - br_column, }) } @@ -1871,6 +1865,21 @@ impl<'a> InsertQuery<'a> { } POSTGRES_MAX_PARAMETERS / count } + + /// Output the literal value of the block range `[block,..)`, mostly for + /// generating an insert statement containing the block range column + pub fn literal_range_current( + table: &Table, + block: BlockNumber, + out: &mut AstPass, + ) -> QueryResult<()> { + if table.immutable { + out.push_bind_param::(&block) + } else { + let block_range: BlockRange = (block..).into(); + out.push_bind_param::, _>(&block_range) + } + } } impl<'a> QueryFragment for InsertQuery<'a> { @@ -1895,7 +1904,8 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_identifier(column.name.as_str())?; out.push_sql(", "); } - self.br_column.name(&mut out); + out.push_sql(self.table.block_column().as_str()); + if self.table.has_causality_region { out.push_sql(", "); out.push_sql(CAUSALITY_REGION_COLUMN); @@ -1916,7 +1926,7 @@ impl<'a> QueryFragment for InsertQuery<'a> { QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } - self.br_column.literal_range_current(&mut out)?; + Self::literal_range_current(&self.table, row.block, &mut out)?; if self.table.has_causality_region { out.push_sql(", "); out.push_bind_param::(&row.key.causality_region)?; diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 3a6fc11d3d1..b143a9db789 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -236,9 +236,7 @@ fn insert_entity_at( entity_type, entities_with_keys ); let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); - let inserted = layout - .insert(conn, &group, block, &MOCK_STOPWATCH) - .expect(&errmsg); + let inserted = layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); assert_eq!(inserted, entities_with_keys_owned.len()); } diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 7eddc8c0e72..f1715f63437 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -110,9 +110,7 @@ fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity let entities = vec![(key.clone(), entity)]; let group = row_group(&entity_type, 0, entities); let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); - layout - .insert(conn, &group, 0, &MOCK_STOPWATCH) - .expect(&errmsg); + layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); } fn insert_thing(conn: &PgConnection, layout: &Layout, id: &str, name: &str) { From 2f899afa6cfadff805186c8705d0ea95f68f94f9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 25 Apr 2023 17:35:52 -0700 Subject: [PATCH 0230/2104] graph, store: Allow using multiple blocks for updating entities We group all pending updates into runs by block. --- graph/src/components/store/write.rs | 117 +++++++++++++++++- store/postgres/src/deployment_store.rs | 5 +- store/postgres/src/relational.rs | 15 ++- store/test-store/tests/postgres/relational.rs | 10 +- .../tests/postgres/relational_bytes.rs | 2 +- 5 files changed, 130 insertions(+), 19 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index b4f5e1d537a..d81576106fb 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -13,9 +13,15 @@ use super::{ BlockNumber, EntityKey, EntityModification, EntityType, StoreEvent, StoredDynamicDataSource, }; +/// Trait for something that has a block number associated with it +pub trait BlockTagged { + fn block(&self) -> BlockNumber; +} + +#[derive(Debug)] /// The data for a write operation; a write operation is either an insert of /// a new entity or the overwriting of an existing entity. -#[derive(Debug)] +/// A helper for objects that are tagged with a block number pub struct EntityWrite { pub key: EntityKey, pub data: Entity, @@ -28,6 +34,12 @@ impl EntityWrite { } } +impl BlockTagged for EntityWrite { + fn block(&self) -> BlockNumber { + self.block + } +} + /// A list of entity changes grouped by the entity type pub struct RowGroup { pub entity_type: EntityType, @@ -51,6 +63,44 @@ impl RowGroup { } } +impl RowGroup { + pub fn runs(&self) -> impl Iterator { + RunIterator::new(self) + } +} + +struct RunIterator<'a, R> { + position: usize, + rows: &'a [R], +} + +impl<'a, R> RunIterator<'a, R> { + fn new(group: &'a RowGroup) -> Self { + RunIterator { + position: 0, + rows: &group.rows, + } + } +} + +impl<'a, R: BlockTagged> Iterator for RunIterator<'a, R> { + type Item = (BlockNumber, &'a [R]); + + fn next(&mut self) -> Option { + if self.position >= self.rows.len() { + return None; + } + let block = self.rows[self.position].block(); + let mut next = self.position; + while next < self.rows.len() && self.rows[next].block() == block { + next += 1; + } + let res = Some((block, &self.rows[self.position..next])); + self.position = next; + res + } +} + /// A list of entity changes with one group per entity type pub struct RowGroups { pub groups: Vec>, @@ -287,3 +337,68 @@ impl Batch { StoreEvent::from_types(deployment, entity_types) } } + +#[cfg(test)] +mod test { + use crate::components::store::{BlockNumber, EntityType}; + + use super::{BlockTagged, RowGroup}; + + #[derive(Debug)] + struct Entry { + value: usize, + block: BlockNumber, + } + + impl BlockTagged for Entry { + fn block(&self) -> BlockNumber { + self.block + } + } + + #[track_caller] + fn check_runs(values: &[usize], blocks: &[BlockNumber], exp: &[(BlockNumber, &[usize])]) { + assert_eq!(values.len(), blocks.len()); + + let rows = values + .iter() + .zip(blocks.iter()) + .map(|(value, block)| Entry { + value: *value, + block: *block, + }) + .collect(); + let group = RowGroup { + entity_type: EntityType::new("Entry".to_string()), + rows, + }; + let act = group + .runs() + .map(|(block, entries)| { + ( + block, + entries.iter().map(|entry| entry.value).collect::>(), + ) + }) + .collect::>(); + let exp = Vec::from_iter( + exp.into_iter() + .map(|(block, values)| (*block, Vec::from_iter(values.iter().cloned()))), + ); + assert_eq!(exp, act); + } + + #[test] + fn run_iterator() { + type RunList<'a> = &'a [(i32, &'a [usize])]; + + let exp: RunList<'_> = &[(1, &[10, 11, 12])]; + check_runs(&[10, 11, 12], &[1, 1, 1], exp); + + let exp: RunList<'_> = &[(1, &[10, 11, 12]), (2, &[20, 21])]; + check_runs(&[10, 11, 12, 20, 21], &[1, 1, 1, 2, 2], exp); + + let exp: RunList<'_> = &[(1, &[10]), (2, &[20]), (1, &[11])]; + check_runs(&[10, 20, 11], &[1, 2, 1], exp); + } +} diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 2b9577f8f9c..e0cb8d817d3 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -337,7 +337,7 @@ impl DeploymentStore { // Overwrites: for group in &overwrites.groups { // we do not update the count since the number of entities remains the same - self.overwrite_entities(group, conn, layout, ptr, stopwatch)?; + self.overwrite_entities(group, conn, layout, stopwatch)?; } // Removals @@ -370,7 +370,6 @@ impl DeploymentStore { group: &'a RowGroup, conn: &PgConnection, layout: &'a Layout, - ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let section = stopwatch.start_section("check_interface_entity_uniqueness"); @@ -381,7 +380,7 @@ impl DeploymentStore { section.end(); let _section = stopwatch.start_section("apply_entity_modifications_update"); - layout.update(conn, group, block_number(ptr), stopwatch) + layout.update(conn, group, stopwatch) } fn remove_entities( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b456d4eede1..b1d9af420a4 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -801,7 +801,6 @@ impl Layout { &'a self, conn: &PgConnection, group: &'a RowGroup, - block: BlockNumber, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; @@ -819,14 +818,13 @@ impl Layout { )); } - let entity_keys: Vec<&str> = group - .rows - .iter() - .map(|row| row.key.entity_id.as_str()) - .collect(); - let section = stopwatch.start_section("update_modification_clamp_range_query"); - ClampRangeQuery::new(table, &entity_keys, block)?.execute(conn)?; + for (block, rows) in group.runs() { + let entity_keys: Vec<&str> = + rows.iter().map(|row| row.key.entity_id.as_str()).collect(); + + ClampRangeQuery::new(table, &entity_keys, block)?.execute(conn)?; + } section.end(); let _section = stopwatch.start_section("update_modification_insert_query"); @@ -838,6 +836,7 @@ impl Layout { for chunk in group.rows.chunks(chunk_size) { count += InsertQuery::new(table, chunk)?.execute(conn)?; } + Ok(count) } diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index b143a9db789..fc4a12cf685 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -269,9 +269,7 @@ fn update_entity_at( entity_type, entities_with_keys ); let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); - let updated = layout - .update(conn, &group, block, &MOCK_STOPWATCH) - .expect(&errmsg); + let updated = layout.update(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); assert_eq!(updated, entities_with_keys_owned.len()); } @@ -579,7 +577,7 @@ fn update() { let entities = vec![(key, entity.clone())]; let group = row_group(&entity_type, 0, entities); layout - .update(conn, &group, 0, &MOCK_STOPWATCH) + .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout @@ -634,7 +632,7 @@ fn update_many() { let entities: Vec<_> = keys.into_iter().zip(entities_vec.into_iter()).collect(); let group = row_group(&entity_type, 0, entities); layout - .update(conn, &group, 0, &MOCK_STOPWATCH) + .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); // check updates took effect @@ -703,7 +701,7 @@ fn serialize_bigdecimal() { let entities = vec![(key, entity.clone())]; let group = row_group(&entity_type, 0, entities); layout - .update(conn, &group, 0, &MOCK_STOPWATCH) + .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index f1715f63437..5e75f542847 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -323,7 +323,7 @@ fn update() { let entities = vec![(key, entity.clone())]; let group = row_group(&entity_type, 1, entities); layout - .update(conn, &group, 1, &MOCK_STOPWATCH) + .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout From 6893f18af7d1346b6518e7ab34ec1fcf28f45e5a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 25 Apr 2023 17:50:42 -0700 Subject: [PATCH 0231/2104] graph, store: Allow using multiple blocks for removing entities --- graph/src/components/store/write.rs | 26 ++++++++++++++++--- store/postgres/src/deployment_store.rs | 15 +++++------ store/postgres/src/relational.rs | 22 ++++++++-------- store/postgres/src/writable.rs | 4 +-- store/test-store/tests/postgres/relational.rs | 14 +++++----- .../tests/postgres/relational_bytes.rs | 18 ++++++------- 6 files changed, 57 insertions(+), 42 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index d81576106fb..474d854a42e 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -40,6 +40,23 @@ impl BlockTagged for EntityWrite { } } +pub struct EntityRef { + pub key: EntityKey, + pub block: BlockNumber, +} + +impl EntityRef { + pub fn new(key: EntityKey, block: BlockNumber) -> Self { + Self { key, block } + } +} + +impl BlockTagged for EntityRef { + fn block(&self) -> BlockNumber { + self.block + } +} + /// A list of entity changes grouped by the entity type pub struct RowGroup { pub entity_type: EntityType, @@ -190,7 +207,7 @@ pub struct Batch { /// Existing entities that need to be modified pub overwrites: RowGroups, /// Existing entities that need to be removed - pub removes: RowGroups, + pub removes: RowGroups, /// New data sources pub data_sources: DataSources, pub deterministic_errors: Vec, @@ -223,7 +240,8 @@ impl Batch { overwrites.group_entry(&row.key.entity_type).push(row); } EntityModification::Remove { key } => { - removes.group_entry(&key.entity_type).push(key) + let row = EntityRef::new(key, block); + removes.group_entry(&row.key.entity_type).push(row) } } } @@ -259,7 +277,7 @@ impl Batch { return Some(EntityOp::Write(entity)); } self.removes(entity_type) - .find(|key| key.entity_id.as_str() == id) + .find(|eref| eref.key.entity_id.as_str() == id) .map(|_| EntityOp::Remove) } @@ -302,7 +320,7 @@ impl Batch { } /// Iterate over all entity deletions/removals - pub fn removes(&self, entity_type: &EntityType) -> impl Iterator { + pub fn removes(&self, entity_type: &EntityType) -> impl Iterator { self.removes .group(entity_type) .into_iter() diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index e0cb8d817d3..18713a01cd9 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -5,7 +5,7 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::write::{EntityWrite, RowGroup, RowGroups}; +use graph::components::store::write::{EntityRef, EntityWrite, RowGroup, RowGroups}; use graph::components::store::{ Batch, DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, StoredDynamicDataSource, VersionStats, @@ -45,7 +45,7 @@ use graph::prelude::{ use graph::schema::{ApiSchema, InputSchema}; use web3::types::Address; -use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; +use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; use crate::deployment::{self, OnSync}; use crate::detail::ErrorDetail; use crate::dynds::DataSourcesTable; @@ -322,8 +322,7 @@ impl DeploymentStore { layout: &Layout, inserts: &RowGroups, overwrites: &RowGroups, - removes: &RowGroups, - ptr: &BlockPtr, + removes: &RowGroups, stopwatch: &StopwatchMetrics, ) -> Result { let mut count = 0; @@ -342,7 +341,7 @@ impl DeploymentStore { // Removals for group in &removes.groups { - count -= self.remove_entities(group, conn, layout, ptr, stopwatch)? as i32; + count -= self.remove_entities(group, conn, layout, stopwatch)? as i32; } Ok(count) } @@ -385,14 +384,13 @@ impl DeploymentStore { fn remove_entities( &self, - group: &RowGroup, + group: &RowGroup, conn: &PgConnection, layout: &Layout, - ptr: &BlockPtr, stopwatch: &StopwatchMetrics, ) -> Result { let _section = stopwatch.start_section("apply_entity_modifications_delete"); - layout.delete(conn, group, block_number(ptr), stopwatch) + layout.delete(conn, group, stopwatch) } /// Execute a closure with a connection to the database. @@ -1142,7 +1140,6 @@ impl DeploymentStore { &batch.inserts, &batch.overwrites, &batch.removes, - &batch.block_ptr, stopwatch, )?; section.end(); diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index b1d9af420a4..0ff418203df 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -24,7 +24,7 @@ use diesel::types::{FromSql, ToSql}; use diesel::{connection::SimpleConnection, Connection}; use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; use graph::cheap_clone::CheapClone; -use graph::components::store::write::{EntityWrite, RowGroup}; +use graph::components::store::write::{EntityRef, EntityWrite, RowGroup}; use graph::constraint_violation; use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; @@ -843,27 +843,27 @@ impl Layout { pub fn delete( &self, conn: &PgConnection, - group: &RowGroup, - block: BlockNumber, + group: &RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; if table.immutable { return Err(constraint_violation!( "entities of type `{}` can not be deleted since they are immutable. Entity ids are [{}]", - table.object, group.rows.iter().map(|key| &key.entity_id).join(", ") + table.object, group.rows.iter().map(|eref| &eref.key.entity_id).join(", ") )); } let _section = stopwatch.start_section("delete_modification_clamp_range_query"); let mut count = 0; - let ids: Vec<_> = group - .rows - .iter() - .map(|key| key.entity_id.as_str()) - .collect(); - for chunk in ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { - count += ClampRangeQuery::new(table, chunk, block)?.execute(conn)? + for (block, rows) in group.runs() { + let ids: Vec<_> = rows + .iter() + .map(|eref| eref.key.entity_id.as_str()) + .collect(); + for chunk in ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { + count += ClampRangeQuery::new(table, chunk, block)?.execute(conn)? + } } Ok(count) } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index d9b4a1918c0..81529b6a733 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -808,8 +808,8 @@ impl Queue { } } } - for key in batch.removes(&derived_query.entity_type) { - map.insert(key.clone(), None); + for eref in batch.removes(&derived_query.entity_type) { + map.insert(eref.key.clone(), None); } } } diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index fc4a12cf685..e199ec5fbc1 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -34,7 +34,7 @@ use graph_store_postgres::{ use test_store::*; -use crate::postgres::relational_bytes::{row_group, row_group_key}; +use crate::postgres::relational_bytes::{row_group, row_group_ref}; const THINGS_GQL: &str = r#" type _Schema_ @fulltext( @@ -745,9 +745,9 @@ fn delete() { let key = EntityKey::data("Scalar".to_owned(), "no such entity".to_owned()); let entity_type = EntityType::from("Scalar"); let mut entity_keys = vec![key]; - let group = row_group_key(&entity_type, 1, entity_keys.clone()); + let group = row_group_ref(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(conn, &group, 1, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(0, count); assert_eq!(2, count_scalar_entities(conn, layout)); @@ -758,9 +758,9 @@ fn delete() { .map(|key| key.entity_id = Word::from("two")) .expect("Failed to update key"); - let group = row_group_key(&entity_type, 1, entity_keys); + let group = row_group_ref(&entity_type, 1, entity_keys); let count = layout - .delete(conn, &group, 1, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(1, count); assert_eq!(1, count_scalar_entities(conn, layout)); @@ -786,9 +786,9 @@ fn insert_many_and_delete_many() { .into_iter() .map(|key| EntityKey::data(entity_type.as_str(), key)) .collect(); - let group = row_group_key(&entity_type, 1, entity_keys); + let group = row_group_ref(&entity_type, 1, entity_keys); let num_removed = layout - .delete(conn, &group, 1, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(2, num_removed); assert_eq!(1, count_scalar_entities(conn, layout)); diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 5e75f542847..d6221a522ac 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -1,7 +1,7 @@ //! Test relational schemas that use `Bytes` to store ids use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; -use graph::components::store::write::{EntityWrite, RowGroup}; +use graph::components::store::write::{EntityRef, EntityWrite, RowGroup}; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data::value::Word; @@ -91,14 +91,14 @@ pub fn row_group( group } -pub fn row_group_key( +pub fn row_group_ref( entity_type: &EntityType, - _block: BlockNumber, + block: BlockNumber, data: impl IntoIterator, -) -> RowGroup { +) -> RowGroup { let mut group = RowGroup::new(entity_type.clone()); for key in data { - group.push(key); + group.push(EntityRef::new(key, block)); } group } @@ -353,9 +353,9 @@ fn delete() { let key = EntityKey::data("Thing".to_owned(), "ffff".to_owned()); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.clone()]; - let group = row_group_key(&entity_type, 1, entity_keys.clone()); + let group = row_group_ref(&entity_type, 1, entity_keys.clone()); let count = layout - .delete(conn, &group, 1, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(0, count); @@ -364,9 +364,9 @@ fn delete() { .get_mut(0) .map(|key| key.entity_id = Word::from(TWO_ID)) .expect("Failed to update entity types"); - let group = row_group_key(&entity_type, 1, entity_keys); + let group = row_group_ref(&entity_type, 1, entity_keys); let count = layout - .delete(conn, &group, 1, &MOCK_STOPWATCH) + .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(1, count); }); From 9f3c5aa1468aa526c1367042865fda0f43f09c2e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 10:34:00 -0700 Subject: [PATCH 0232/2104] store: Allow persisting data sources at different blocks --- store/postgres/src/deployment_store.rs | 8 +------- store/postgres/src/dynds/mod.rs | 10 +--------- store/postgres/src/dynds/shared.rs | 6 ++---- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 18713a01cd9..490e5c2cb24 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1144,13 +1144,7 @@ impl DeploymentStore { )?; section.end(); - dynds::insert( - &conn, - &site, - &batch.data_sources, - &batch.block_ptr, - manifest_idx_and_name, - )?; + dynds::insert(&conn, &site, &batch.data_sources, manifest_idx_and_name)?; dynds::update_offchain_status(&conn, &site, &batch.offchain_to_remove)?; diff --git a/store/postgres/src/dynds/mod.rs b/store/postgres/src/dynds/mod.rs index 5731aed07f1..b62957d1e91 100644 --- a/store/postgres/src/dynds/mod.rs +++ b/store/postgres/src/dynds/mod.rs @@ -6,7 +6,6 @@ pub(crate) use private::DataSourcesTable; use crate::primary::Site; use diesel::PgConnection; use graph::{ - blockchain::BlockPtr, components::store::{write, StoredDynamicDataSource}, constraint_violation, data_source::CausalityRegion, @@ -29,18 +28,11 @@ pub(crate) fn insert( conn: &PgConnection, site: &Site, data_sources: &write::DataSources, - block_ptr: &BlockPtr, manifest_idx_and_name: &[(u32, String)], ) -> Result { match site.schema_version.private_data_sources() { true => DataSourcesTable::new(site.namespace.clone()).insert(conn, data_sources), - false => shared::insert( - conn, - &site.deployment, - data_sources, - block_ptr, - manifest_idx_and_name, - ), + false => shared::insert(conn, &site.deployment, data_sources, manifest_idx_and_name), } } diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index c3e5a77919e..ed4e400fcba 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -14,8 +14,7 @@ use graph::{ constraint_violation, data_source::CausalityRegion, prelude::{ - bigdecimal::ToPrimitive, serde_json, BigDecimal, BlockNumber, BlockPtr, DeploymentHash, - StoreError, + bigdecimal::ToPrimitive, serde_json, BigDecimal, BlockNumber, DeploymentHash, StoreError, }, }; @@ -104,7 +103,6 @@ pub(super) fn insert( conn: &PgConnection, deployment: &DeploymentHash, data_sources: &write::DataSources, - block_ptr: &BlockPtr, manifest_idx_and_name: &[(u32, String)], ) -> Result { use dynamic_ethereum_contract_data_source as decds; @@ -117,7 +115,7 @@ pub(super) fn insert( let dds: Vec<_> = data_sources .entries .iter() - .map(|(_, dds)| { + .map(|(block_ptr, dds)| { dds.iter().map(|ds| { let StoredDynamicDataSource { manifest_idx: _, From 12427fd5cc3efa829a46ecc098cf8fb7fdcbd787 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 26 Apr 2023 15:39:33 -0700 Subject: [PATCH 0233/2104] graph, store: Keep all pending changes in one large list When we start combining batches, we will want to mutate some changes in place to avoid the data copies that we would need if we kept them separated by the kind of change that is needed. --- graph/src/components/store/write.rs | 485 ++++++++++++------ store/postgres/src/deployment.rs | 9 +- store/postgres/src/deployment_store.rs | 71 +-- store/postgres/src/relational.rs | 41 +- store/postgres/src/relational_queries.rs | 40 +- store/postgres/src/writable.rs | 52 +- store/test-store/tests/postgres/relational.rs | 18 +- .../tests/postgres/relational_bytes.rs | 44 +- 8 files changed, 456 insertions(+), 304 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 474d854a42e..f041b0b92c2 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -5,65 +5,124 @@ use crate::{ blockchain::{block_stream::FirehoseCursor, BlockPtr}, cheap_clone::CheapClone, components::subgraph::Entity, - data::subgraph::schema::SubgraphError, + data::{subgraph::schema::SubgraphError, value::Word}, + data_source::CausalityRegion, prelude::DeploymentHash, }; use super::{ - BlockNumber, EntityKey, EntityModification, EntityType, StoreEvent, StoredDynamicDataSource, + BlockNumber, EntityKey, EntityModification, EntityType, StoreError, StoreEvent, + StoredDynamicDataSource, }; -/// Trait for something that has a block number associated with it -pub trait BlockTagged { - fn block(&self) -> BlockNumber; -} - +/// A data structure similar to `EntityModification`, but tagged with a +/// block. We might eventually replace `EntityModification` with this, but +/// until the dust settles, we'll keep them separate. +/// +/// This is geared towards how we persist entity changes: there are only +/// ever two operations we perform on them, clamping the range of an +/// existing entity version, and writing a new entity version. +/// +/// The difference between `Insert` and `Overwrite` is that `Overwrite` +/// requires that we clamp an existing prior version of the entity at +/// `block`. We only ever get an `Overwrite` if such a version actually +/// exists. `Insert` simply inserts a new row into the underlying table, +/// assuming that there is no need to fix up any prior version. #[derive(Debug)] -/// The data for a write operation; a write operation is either an insert of -/// a new entity or the overwriting of an existing entity. -/// A helper for objects that are tagged with a block number -pub struct EntityWrite { - pub key: EntityKey, - pub data: Entity, - pub block: BlockNumber, +pub enum EntityMod { + /// Insert the entity + Insert { + key: EntityKey, + data: Entity, + block: BlockNumber, + }, + /// Update the entity by overwriting it + Overwrite { + key: EntityKey, + data: Entity, + block: BlockNumber, + }, + /// Remove the entity + Remove { key: EntityKey, block: BlockNumber }, } -impl EntityWrite { - pub fn new(key: EntityKey, data: Entity, block: BlockNumber) -> Self { - Self { key, data, block } +impl EntityMod { + fn new(m: EntityModification, block: BlockNumber) -> Self { + match m { + EntityModification::Insert { key, data } => Self::Insert { key, data, block }, + EntityModification::Overwrite { key, data } => Self::Overwrite { key, data, block }, + EntityModification::Remove { key } => Self::Remove { key, block }, + } + } + + #[cfg(debug_assertions)] + pub fn new_test(m: EntityModification, block: BlockNumber) -> Self { + Self::new(m, block) + } + + pub fn id(&self) -> &Word { + match self { + EntityMod::Insert { key, .. } + | EntityMod::Overwrite { key, .. } + | EntityMod::Remove { key, .. } => &key.entity_id, + } } -} -impl BlockTagged for EntityWrite { fn block(&self) -> BlockNumber { - self.block + match self { + EntityMod::Insert { block, .. } + | EntityMod::Overwrite { block, .. } + | EntityMod::Remove { block, .. } => *block, + } } -} -pub struct EntityRef { - pub key: EntityKey, - pub block: BlockNumber, -} + /// Return `true` if `self` requires a write operation, i.e.,insert of a + /// new row, for either a new or an existing entity + fn is_write(&self) -> bool { + match self { + EntityMod::Insert { .. } | EntityMod::Overwrite { .. } => true, + EntityMod::Remove { .. } => false, + } + } -impl EntityRef { - pub fn new(key: EntityKey, block: BlockNumber) -> Self { - Self { key, block } + /// Return the details of the write if `self` is a write operation for a + /// new or an existing entity + fn as_write(&self) -> Option<(&Word, &Entity, CausalityRegion, BlockNumber)> { + match self { + EntityMod::Insert { key, data, block } | EntityMod::Overwrite { key, data, block } => { + Some((&key.entity_id, data, key.causality_region, *block)) + } + EntityMod::Remove { .. } => None, + } } -} -impl BlockTagged for EntityRef { - fn block(&self) -> BlockNumber { - self.block + /// Return `true` if `self` requires clamping of an existing version + fn is_clamp(&self) -> bool { + match self { + EntityMod::Insert { .. } => false, + EntityMod::Overwrite { .. } | EntityMod::Remove { .. } => true, + } + } + + fn key(&self) -> &EntityKey { + match self { + EntityMod::Insert { key, .. } + | EntityMod::Overwrite { key, .. } + | EntityMod::Remove { key, .. } => key, + } } } /// A list of entity changes grouped by the entity type -pub struct RowGroup { +#[derive(Debug)] +pub struct RowGroup { pub entity_type: EntityType, - pub rows: Vec, + /// All changes for this entity type, ordered by block; i.e., if `i < j` + /// then `rows[i].block() <= rows[j].block()` + pub rows: Vec, } -impl RowGroup { +impl RowGroup { pub fn new(entity_type: EntityType) -> Self { Self { entity_type, @@ -71,45 +130,93 @@ impl RowGroup { } } - pub fn push(&mut self, row: R) { - self.rows.push(row) + pub fn push(&mut self, emod: EntityModification, block: BlockNumber) -> Result<(), StoreError> { + debug_assert!(self + .rows + .last() + .map(|emod| emod.block() <= block) + .unwrap_or(true)); + let row = EntityMod::new(emod, block); + self.rows.push(row); + Ok(()) } fn row_count(&self) -> usize { self.rows.len() } -} -impl RowGroup { - pub fn runs(&self) -> impl Iterator { - RunIterator::new(self) + /// Iterate over all changes that need clamping of the block range of an + /// existing entity version + pub fn clamps_by_block(&self) -> impl Iterator { + ClampsByBlockIterator::new(self) + } + + /// Iterate over all changes that require writing a new entity version + pub fn writes(&self) -> impl Iterator { + self.rows.iter().filter(|row| row.is_write()) + } + + /// Return an iterator over all writes in chunks. The returned + /// `WriteChunker` is an iterator that produces `WriteChunk`s, which are + /// the iterators over the writes. Each `WriteChunk` has `chunk_size` + /// elements, except for the last one which might have fewer + pub fn write_chunks<'a>(&'a self, chunk_size: usize) -> WriteChunker<'a> { + WriteChunker::new(self, chunk_size) + } + + pub fn has_clamps(&self) -> bool { + self.rows.iter().any(|row| row.is_clamp()) + } + + pub fn last_op(&self, key: &EntityKey) -> Option> { + self.rows + .iter() + .rfind(|emod| emod.key() == key) + .map(EntityOp::from) + } + + pub fn effective_ops(&self) -> impl Iterator> { + let mut seen = HashSet::new(); + self.rows + .iter() + .rev() + .filter(move |emod| seen.insert(emod.id())) + .map(EntityOp::from) } } -struct RunIterator<'a, R> { +struct ClampsByBlockIterator<'a> { position: usize, - rows: &'a [R], + rows: &'a [EntityMod], } -impl<'a, R> RunIterator<'a, R> { - fn new(group: &'a RowGroup) -> Self { - RunIterator { +impl<'a> ClampsByBlockIterator<'a> { + fn new(group: &'a RowGroup) -> Self { + ClampsByBlockIterator { position: 0, rows: &group.rows, } } } -impl<'a, R: BlockTagged> Iterator for RunIterator<'a, R> { - type Item = (BlockNumber, &'a [R]); +impl<'a> Iterator for ClampsByBlockIterator<'a> { + type Item = (BlockNumber, &'a [EntityMod]); fn next(&mut self) -> Option { + // Make sure we start on a clamp + while self.position < self.rows.len() && !self.rows[self.position].is_clamp() { + self.position += 1; + } if self.position >= self.rows.len() { return None; } let block = self.rows[self.position].block(); let mut next = self.position; - while next < self.rows.len() && self.rows[next].block() == block { + // Collect consecutive clamps + while next < self.rows.len() + && self.rows[next].block() == block + && self.rows[next].is_clamp() + { next += 1; } let res = Some((block, &self.rows[self.position..next])); @@ -119,31 +226,24 @@ impl<'a, R: BlockTagged> Iterator for RunIterator<'a, R> { } /// A list of entity changes with one group per entity type -pub struct RowGroups { - pub groups: Vec>, +pub struct RowGroups { + pub groups: Vec, } -impl RowGroups { +impl RowGroups { fn new() -> Self { Self { groups: Vec::new() } } - fn group(&self, entity_type: &EntityType) -> Option<&RowGroup> { + fn group(&self, entity_type: &EntityType) -> Option<&RowGroup> { self.groups .iter() .find(|group| &group.entity_type == entity_type) } - /// Return a mutable reference to an existing group. - fn group_mut(&mut self, entity_type: &EntityType) -> Option<&mut RowGroup> { - self.groups - .iter_mut() - .find(|group| &group.entity_type == entity_type) - } - /// Return a mutable reference to an existing group, or create a new one /// if there isn't one yet and return a reference to that - fn group_entry(&mut self, entity_type: &EntityType) -> &mut RowGroup { + fn group_entry(&mut self, entity_type: &EntityType) -> &mut RowGroup { let pos = self .groups .iter() @@ -188,9 +288,23 @@ impl DataSources { /// lookup pub enum EntityOp<'a> { /// There is a new version of the entity that will be written - Write(&'a Entity), + Write { + key: &'a EntityKey, + entity: &'a Entity, + }, /// The entity has been removed - Remove, + Remove { key: &'a EntityKey }, +} + +impl<'a> From<&'a EntityMod> for EntityOp<'a> { + fn from(emod: &'a EntityMod) -> Self { + match emod { + EntityMod::Insert { data, key, .. } | EntityMod::Overwrite { data, key, .. } => { + EntityOp::Write { key, entity: data } + } + EntityMod::Remove { key, .. } => EntityOp::Remove { key }, + } + } } /// A write batch. This data structure encapsulates all the things that need @@ -202,12 +316,7 @@ pub struct Batch { pub block_ptr: BlockPtr, /// The firehose cursor corresponding to `block_ptr` pub firehose_cursor: FirehoseCursor, - /// New entities that need to be inserted - pub inserts: RowGroups, - /// Existing entities that need to be modified - pub overwrites: RowGroups, - /// Existing entities that need to be removed - pub removes: RowGroups, + mods: RowGroups, /// New data sources pub data_sources: DataSources, pub deterministic_errors: Vec, @@ -218,113 +327,59 @@ impl Batch { pub fn new( block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, - mods: Vec, + mut raw_mods: Vec, data_sources: Vec, deterministic_errors: Vec, offchain_to_remove: Vec, - ) -> Self { + ) -> Result { let block = block_ptr.number; - let mut inserts = RowGroups::new(); - let mut overwrites = RowGroups::new(); - let mut removes = RowGroups::new(); - - for m in mods { - match m { - EntityModification::Insert { key, data } => { - let row = EntityWrite::new(key, data, block); - inserts.group_entry(&row.key.entity_type).push(row); - } - EntityModification::Overwrite { key, data } => { - let row = EntityWrite::new(key, data, block); - overwrites.group_entry(&row.key.entity_type).push(row); - } - EntityModification::Remove { key } => { - let row = EntityRef::new(key, block); - removes.group_entry(&row.key.entity_type).push(row) - } - } + // Sort the modifications such that writes and clamps are + // consecutive. It's not needed for correctness but helps with some + // of the iterations, especially when we iterate with + // `clamps_by_block` so we get only one run for each block + raw_mods.sort_unstable_by_key(|emod| match emod { + EntityModification::Insert { .. } => 2, + EntityModification::Overwrite { .. } => 1, + EntityModification::Remove { .. } => 0, + }); + + let mut mods = RowGroups::new(); + + for m in raw_mods { + mods.group_entry(&m.entity_ref().entity_type) + .push(m, block)?; } let data_sources = DataSources::new(block_ptr.cheap_clone(), data_sources); let offchain_to_remove = DataSources::new(block_ptr.cheap_clone(), offchain_to_remove); - Self { + Ok(Self { block_ptr, firehose_cursor, - inserts, - overwrites, - removes, + mods, data_sources, deterministic_errors, offchain_to_remove, - } + }) } pub fn entity_count(&self) -> usize { - self.inserts.entity_count() + self.overwrites.entity_count() + self.removes.entity_count() + self.mods.entity_count() } /// Find out whether the latest operation for the entity with type /// `entity_type` and `id` is going to write that entity, i.e., insert /// or overwrite it, or if it is going to remove it. If no change will /// be made to the entity, return `None` - pub fn last_op(&self, entity_type: &EntityType, id: &str) -> Option> { - // Check if we are inserting or overwriting the entity - if let Some((_, entity)) = self - .writes(entity_type) - .find(|(_, entity)| entity.id() == id) - { - return Some(EntityOp::Write(entity)); - } - self.removes(entity_type) - .find(|eref| eref.key.entity_id.as_str() == id) - .map(|_| EntityOp::Remove) + pub fn last_op(&self, key: &EntityKey) -> Option> { + self.mods.group(&key.entity_type)?.last_op(key) } - /// Iterate over all entities that have a pending write - pub fn writes(&self, entity_type: &EntityType) -> impl Iterator { - self.inserts + pub fn effective_ops(&self, entity_type: &EntityType) -> impl Iterator { + self.mods .group(entity_type) + .map(|group| group.effective_ops()) .into_iter() - .map(|ew| &ew.rows) - .flatten() - .chain( - self.overwrites - .group(entity_type) - .into_iter() - .map(|ew| &ew.rows) - .flatten(), - ) - .map(|ew| (&ew.key, &ew.data)) - } - - /// Iterate over all entities that have a pending write, allowing for - /// mutation of the entity - pub fn writes_mut( - &mut self, - entity_type: &EntityType, - ) -> impl Iterator { - self.inserts - .group_mut(entity_type) - .into_iter() - .map(|rg| &mut rg.rows) - .flatten() - .chain( - self.overwrites - .group_mut(entity_type) - .into_iter() - .map(|rg| &mut rg.rows) - .flatten(), - ) - .map(|ew| (&ew.key, &mut ew.data)) - } - - /// Iterate over all entity deletions/removals - pub fn removes(&self, entity_type: &EntityType) -> impl Iterator { - self.removes - .group(entity_type) - .into_iter() - .map(|rg| &rg.rows) .flatten() } @@ -346,33 +401,128 @@ impl Batch { /// Generate a store event for all the changes that this batch makes pub fn store_event(&self, deployment: &DeploymentHash) -> StoreEvent { let entity_types = HashSet::from_iter( - self.inserts + self.mods .groups .iter() - .chain(self.overwrites.groups.iter()) .map(|group| group.entity_type.clone()), ); StoreEvent::from_types(deployment, entity_types) } + + pub fn groups<'a>(&'a self) -> impl Iterator { + self.mods.groups.iter() + } } -#[cfg(test)] -mod test { - use crate::components::store::{BlockNumber, EntityType}; +pub struct WriteChunker<'a> { + group: &'a RowGroup, + chunk_size: usize, + position: usize, +} - use super::{BlockTagged, RowGroup}; +impl<'a> WriteChunker<'a> { + fn new(group: &'a RowGroup, chunk_size: usize) -> Self { + Self { + group, + chunk_size, + position: 0, + } + } +} - #[derive(Debug)] - struct Entry { - value: usize, - block: BlockNumber, +impl<'a> Iterator for WriteChunker<'a> { + type Item = WriteChunk<'a>; + + fn next(&mut self) -> Option { + // Produce a chunk according to the current `self.position` + let res = if self.position < self.group.rows.len() { + Some(WriteChunk { + group: self.group, + chunk_size: self.chunk_size, + position: self.position, + }) + } else { + None + }; + + // Advance `self.position` to the start of the next chunk + let mut count = 0; + while count < self.chunk_size && self.position < self.group.rows.len() { + if self.group.rows[self.position].is_write() { + count += 1; + } + self.position += 1; + } + + res + } +} + +#[derive(Debug)] +pub struct WriteChunk<'a> { + group: &'a RowGroup, + chunk_size: usize, + position: usize, +} + +impl<'a> WriteChunk<'a> { + pub fn is_empty(&'a self) -> bool { + self.iter().next().is_none() + } + + pub fn iter(&self) -> WriteChunkIter<'a> { + WriteChunkIter { + group: self.group, + chunk_size: self.chunk_size, + position: self.position, + count: 0, + } } +} + +impl<'a> IntoIterator for &WriteChunk<'a> { + type Item = (&'a Word, &'a Entity, CausalityRegion, BlockNumber); - impl BlockTagged for Entry { - fn block(&self) -> BlockNumber { - self.block + type IntoIter = WriteChunkIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + WriteChunkIter { + group: self.group, + chunk_size: self.chunk_size, + position: self.position, + count: 0, } } +} + +pub struct WriteChunkIter<'a> { + group: &'a RowGroup, + chunk_size: usize, + position: usize, + count: usize, +} + +impl<'a> Iterator for WriteChunkIter<'a> { + type Item = (&'a Word, &'a Entity, CausalityRegion, BlockNumber); + + fn next(&mut self) -> Option { + while self.count < self.chunk_size && self.position < self.group.rows.len() { + let insert = self.group.rows[self.position].as_write(); + self.position += 1; + if insert.is_some() { + self.count += 1; + return insert; + } + } + return None; + } +} + +#[cfg(test)] +mod test { + use crate::components::store::{write::EntityMod, BlockNumber, EntityKey, EntityType}; + + use super::RowGroup; #[track_caller] fn check_runs(values: &[usize], blocks: &[BlockNumber], exp: &[(BlockNumber, &[usize])]) { @@ -381,8 +531,8 @@ mod test { let rows = values .iter() .zip(blocks.iter()) - .map(|(value, block)| Entry { - value: *value, + .map(|(value, block)| EntityMod::Remove { + key: EntityKey::data("RowGroup".to_string(), value.to_string()), block: *block, }) .collect(); @@ -391,11 +541,14 @@ mod test { rows, }; let act = group - .runs() + .clamps_by_block() .map(|(block, entries)| { ( block, - entries.iter().map(|entry| entry.value).collect::>(), + entries + .iter() + .map(|entry| entry.id().parse().unwrap()) + .collect::>(), ) }) .collect::>(); diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 8f38c24410f..50204a37dbb 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -816,18 +816,21 @@ pub fn update_deployment_status( .map_err(StoreError::from) } -/// Insert the errors and check if the subgraph needs to be set as unhealthy. +/// Insert the errors and check if the subgraph needs to be set as +/// unhealthy. The `latest_block` is only used to check whether the subgraph +/// is healthy as of that block; errors are inserted according to the +/// `block_ptr` they contain pub(crate) fn insert_subgraph_errors( conn: &PgConnection, id: &DeploymentHash, deterministic_errors: &[SubgraphError], - block: BlockNumber, + latest_block: BlockNumber, ) -> Result<(), StoreError> { for error in deterministic_errors { insert_subgraph_error(conn, error)?; } - check_health(conn, id, block) + check_health(conn, id, latest_block) } #[cfg(debug_assertions)] diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 490e5c2cb24..94719a4b3f1 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -5,7 +5,7 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::write::{EntityRef, EntityWrite, RowGroup, RowGroups}; +use graph::components::store::write::RowGroup; use graph::components::store::{ Batch, DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, StoredDynamicDataSource, VersionStats, @@ -13,6 +13,7 @@ use graph::components::store::{ use graph::components::versions::VERSIONS; use graph::data::query::Trace; use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; +use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::futures03::FutureExt; use graph::prelude::{ @@ -272,7 +273,8 @@ impl DeploymentStore { &self, conn: &PgConnection, layout: &Layout, - key: &EntityKey, + entity_type: &EntityType, + entity_id: &Word, ) -> Result<(), StoreError> { // Collect all types that share an interface implementation with this // entity type, and make sure there are no conflicting IDs. @@ -291,24 +293,24 @@ impl DeploymentStore { .expect("API schema should be present") .clone(); let types_for_interface = schema.types_for_interface(); - let entity_type = key.entity_type.to_string(); + let entity_type_str = entity_type.to_string(); let types_with_shared_interface = Vec::from_iter( schema - .interfaces_for_type(&key.entity_type) + .interfaces_for_type(entity_type) .into_iter() .flatten() .flat_map(|interface| &types_for_interface[&EntityType::from(interface)]) .map(EntityType::from) - .filter(|type_name| type_name != &key.entity_type), + .filter(|type_name| type_name != entity_type), ); if !types_with_shared_interface.is_empty() { if let Some(conflicting_entity) = - layout.conflicting_entity(conn, &key.entity_id, types_with_shared_interface)? + layout.conflicting_entity(conn, entity_id, types_with_shared_interface)? { return Err(StoreError::ConflictingId( - entity_type, - key.entity_id.to_string(), + entity_type_str, + entity_id.to_string(), conflicting_entity, )); } @@ -316,47 +318,36 @@ impl DeploymentStore { Ok(()) } - fn apply_entity_modifications( + fn apply_entity_modifications<'a>( &self, conn: &PgConnection, layout: &Layout, - inserts: &RowGroups, - overwrites: &RowGroups, - removes: &RowGroups, + groups: impl Iterator, stopwatch: &StopwatchMetrics, ) -> Result { let mut count = 0; - // Apply modification groups. - // Inserts: - for group in &inserts.groups { + for group in groups { + if group.has_clamps() { + count -= self.remove_entities(group, conn, layout, stopwatch)? as i32; + } count += self.insert_entities(group, conn, layout, stopwatch)? as i32 } - // Overwrites: - for group in &overwrites.groups { - // we do not update the count since the number of entities remains the same - self.overwrite_entities(group, conn, layout, stopwatch)?; - } - - // Removals - for group in &removes.groups { - count -= self.remove_entities(group, conn, layout, stopwatch)? as i32; - } Ok(count) } fn insert_entities<'a>( &'a self, - group: &'a RowGroup, + group: &'a RowGroup, conn: &PgConnection, layout: &'a Layout, stopwatch: &StopwatchMetrics, ) -> Result { let section = stopwatch.start_section("check_interface_entity_uniqueness"); - for row in group.rows.iter() { + for row in group.writes() { // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness(conn, layout, &row.key)?; + self.check_interface_entity_uniqueness(conn, layout, &group.entity_type, &row.id())?; } section.end(); @@ -364,27 +355,9 @@ impl DeploymentStore { layout.insert(conn, group, stopwatch) } - fn overwrite_entities<'a>( - &'a self, - group: &'a RowGroup, - conn: &PgConnection, - layout: &'a Layout, - stopwatch: &StopwatchMetrics, - ) -> Result { - let section = stopwatch.start_section("check_interface_entity_uniqueness"); - for row in group.rows.iter() { - // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness(conn, layout, &row.key)?; - } - section.end(); - - let _section = stopwatch.start_section("apply_entity_modifications_update"); - layout.update(conn, group, stopwatch) - } - fn remove_entities( &self, - group: &RowGroup, + group: &RowGroup, conn: &PgConnection, layout: &Layout, stopwatch: &StopwatchMetrics, @@ -1137,9 +1110,7 @@ impl DeploymentStore { let count = self.apply_entity_modifications( &conn, layout.as_ref(), - &batch.inserts, - &batch.overwrites, - &batch.removes, + batch.groups(), stopwatch, )?; section.end(); diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 0ff418203df..6fcdf9eec1b 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -24,7 +24,7 @@ use diesel::types::{FromSql, ToSql}; use diesel::{connection::SimpleConnection, Connection}; use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; use graph::cheap_clone::CheapClone; -use graph::components::store::write::{EntityRef, EntityWrite, RowGroup}; +use graph::components::store::write::RowGroup; use graph::constraint_violation; use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; @@ -654,7 +654,7 @@ impl Layout { pub fn insert<'a>( &'a self, conn: &PgConnection, - group: &'a RowGroup, + group: &'a RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; @@ -664,10 +664,13 @@ impl Layout { // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in group.rows.chunks(chunk_size) { - count += InsertQuery::new(table, chunk)? - .get_results(conn) - .map(|ids| ids.len())? + for chunk in group.write_chunks(chunk_size) { + // Empty chunks would lead to invalid SQL + if !chunk.is_empty() { + count += InsertQuery::new(table, &chunk)? + .get_results(conn) + .map(|ids| ids.len())? + } } Ok(count) } @@ -800,15 +803,15 @@ impl Layout { pub fn update<'a>( &'a self, conn: &PgConnection, - group: &'a RowGroup, + group: &'a RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; - if table.immutable { + if table.immutable && group.has_clamps() { let ids = group .rows .iter() - .map(|row| row.key.entity_id.as_str()) + .map(|row| row.id().as_str()) .collect::>() .join(", "); return Err(constraint_violation!( @@ -819,9 +822,8 @@ impl Layout { } let section = stopwatch.start_section("update_modification_clamp_range_query"); - for (block, rows) in group.runs() { - let entity_keys: Vec<&str> = - rows.iter().map(|row| row.key.entity_id.as_str()).collect(); + for (block, rows) in group.clamps_by_block() { + let entity_keys: Vec<&str> = rows.iter().map(|row| row.id().as_str()).collect(); ClampRangeQuery::new(table, &entity_keys, block)?.execute(conn)?; } @@ -833,8 +835,8 @@ impl Layout { // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); - for chunk in group.rows.chunks(chunk_size) { - count += InsertQuery::new(table, chunk)?.execute(conn)?; + for chunk in group.write_chunks(chunk_size) { + count += InsertQuery::new(table, &chunk)?.execute(conn)?; } Ok(count) @@ -843,24 +845,21 @@ impl Layout { pub fn delete( &self, conn: &PgConnection, - group: &RowGroup, + group: &RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { let table = self.table_for_entity(&group.entity_type)?; if table.immutable { return Err(constraint_violation!( "entities of type `{}` can not be deleted since they are immutable. Entity ids are [{}]", - table.object, group.rows.iter().map(|eref| &eref.key.entity_id).join(", ") + table.object, group.rows.iter().map(|eref| eref.id()).join(", ") )); } let _section = stopwatch.start_section("delete_modification_clamp_range_query"); let mut count = 0; - for (block, rows) in group.runs() { - let ids: Vec<_> = rows - .iter() - .map(|eref| eref.key.entity_id.as_str()) - .collect(); + for (block, rows) in group.clamps_by_block() { + let ids: Vec<_> = rows.iter().map(|eref| eref.id().as_str()).collect(); for chunk in ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { count += ClampRangeQuery::new(table, chunk, block)?.execute(conn)? } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 4f3ef87e73b..b0051e6ff6b 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -12,7 +12,7 @@ use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, BigInt, Binary, Bool, Integer, Jsonb, Range, Text}; use diesel::Connection; -use graph::components::store::write::EntityWrite; +use graph::components::store::write::WriteChunk; use graph::components::store::{DerivedEntityQuery, EntityKey}; use graph::data::store::NULL; use graph::data::value::{Object, Word}; @@ -1750,18 +1750,18 @@ impl<'a> LoadQuery for FindDerivedQuery<'a> { impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} #[derive(Debug)] -struct FulltextValues<'a>(HashMap>); +struct FulltextValues<'a>(HashMap<&'a Word, Vec<(&'a str, Value)>>); impl<'a> FulltextValues<'a> { - fn new(table: &'a Table, rows: &'a [EntityWrite]) -> Self { + fn new(table: &'a Table, rows: &'a WriteChunk<'a>) -> Self { let mut map = HashMap::new(); for column in table.columns.iter().filter(|column| column.is_fulltext()) { - for row in rows { + for (id, entity, _, _) in rows { let mut fulltext = Vec::new(); if let Some(fields) = column.fulltext_fields.as_ref() { let fulltext_field_values = fields .iter() - .filter_map(|field| row.data.get(field)) + .filter_map(|field| entity.get(field)) .cloned() .collect::>(); if !fulltext_field_values.is_empty() { @@ -1769,7 +1769,7 @@ impl<'a> FulltextValues<'a> { } } if !fulltext.is_empty() { - map.insert(row.data.id(), fulltext); + map.insert(id, fulltext); } } } @@ -1792,21 +1792,21 @@ impl<'a> FulltextValues<'a> { #[derive(Debug)] pub struct InsertQuery<'a> { table: &'a Table, - rows: &'a [EntityWrite], + rows: &'a WriteChunk<'a>, fulltext_values: FulltextValues<'a>, unique_columns: Vec<&'a Column>, } impl<'a> InsertQuery<'a> { - pub fn new(table: &'a Table, rows: &'a [EntityWrite]) -> Result, StoreError> { - for row in rows { + pub fn new(table: &'a Table, rows: &'a WriteChunk<'a>) -> Result, StoreError> { + for (id, entity, _, _) in rows { for column in table.columns.iter() { - if !column.is_nullable() && !row.data.contains_key(&column.field) { + if !column.is_nullable() && !entity.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( "can not insert entity {}[{}] since value for non-nullable attribute {} is missing. \ To fix this, mark the attribute as nullable in the GraphQL schema or change the \ mapping code to always set this attribute.", - table.object, row.data.id(), column.field + table.object, id, column.field ))); } } @@ -1826,18 +1826,18 @@ impl<'a> InsertQuery<'a> { /// Build the column name list using the subset of all keys among present entities. fn unique_columns( table: &'a Table, - rows: &'a [EntityWrite], + rows: &'a WriteChunk<'a>, fulltext_values: &FulltextValues<'a>, ) -> Vec<&'a Column> { table .columns .iter() .filter(|column| { - rows.iter().any(|row| { + rows.iter().any(|(id, entity, _, _)| { if column.is_fulltext() { - !fulltext_values.get(&row.data.id(), &column.field).is_null() + !fulltext_values.get(id, &column.field).is_null() } else { - row.data.get(&column.field).is_some() + entity.get(&column.field).is_some() } }) }) @@ -1915,21 +1915,21 @@ impl<'a> QueryFragment for InsertQuery<'a> { // Use a `Peekable` iterator to help us decide how to finalize each line. let mut iter = self.rows.iter().peekable(); - while let Some(row) = iter.next() { + while let Some((id, entity, causality_region, block)) = iter.next() { out.push_sql("("); for column in &self.unique_columns { let value = if column.is_fulltext() { - self.fulltext_values.get(&row.key.entity_id, &column.field) + self.fulltext_values.get(id, &column.field) } else { - row.data.get(&column.field).unwrap_or(&NULL) + entity.get(&column.field).unwrap_or(&NULL) }; QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } - Self::literal_range_current(&self.table, row.block, &mut out)?; + Self::literal_range_current(&self.table, block, &mut out)?; if self.table.has_causality_region { out.push_sql(", "); - out.push_bind_param::(&row.key.causality_region)?; + out.push_bind_param::(&causality_region)?; }; out.push_sql(")"); diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 81529b6a733..1035a375c31 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -696,8 +696,8 @@ impl Queue { impl<'a> From> for Op { fn from(value: EntityOp) -> Self { match value { - EntityOp::Write(entity) => Self::Write(entity.clone()), - EntityOp::Remove => Self::Remove, + EntityOp::Write { key: _, entity } => Self::Write(entity.clone()), + EntityOp::Remove { .. } => Self::Remove, } } } @@ -715,9 +715,7 @@ impl Queue { match req.as_ref() { Request::Write { batch, .. } => { if tracker.visible(&batch.block_ptr) { - batch - .last_op(&key.entity_type, &key.entity_id) - .map(Op::from) + batch.last_op(key).map(Op::from) } else { None } @@ -751,11 +749,11 @@ impl Queue { if tracker.visible(&batch.block_ptr) { // See if we have changes for any of the keys. for key in &keys { - match batch.last_op(&key.entity_type, &key.entity_id) { - Some(EntityOp::Write(entity)) => { + match batch.last_op(key) { + Some(EntityOp::Write { key: _, entity }) => { map.insert(key.clone(), Some(entity.clone())); } - Some(EntityOp::Remove) => { + Some(EntityOp::Remove { .. }) => { map.insert(key.clone(), None); } None => { /* nothing to do */ } @@ -790,6 +788,28 @@ impl Queue { ) -> Result, StoreError> { let mut tracker = BlockTracker::new(); + fn is_related(derived_query: &DerivedEntityQuery, entity: &Entity) -> bool { + entity + .get(&derived_query.entity_field) + .map(|related_id| related_id.as_str() == Some(&derived_query.value)) + .unwrap_or(false) + } + + fn effective_ops<'a>( + batch: &'a Batch, + derived_query: &'a DerivedEntityQuery, + ) -> impl Iterator)> + 'a { + batch + .effective_ops(&derived_query.entity_type) + .filter_map(|op| match op { + EntityOp::Write { key, entity } if is_related(derived_query, entity) => { + Some((key.clone(), Some(entity.clone()))) + } + EntityOp::Write { .. } => None, + EntityOp::Remove { key } => Some((key.clone(), None)), + }) + } + // Get entities from entries in the queue let entities_in_queue = self.queue.fold( BTreeMap::new(), @@ -798,19 +818,7 @@ impl Queue { match req.as_ref() { Request::Write { batch, .. } => { if tracker.visible(&batch.block_ptr) { - for (key, entity) in batch.writes(&derived_query.entity_type) { - if let Some(related_id) = - entity.get(derived_query.entity_field.as_str()) - { - // we check only the field against the value - if related_id.as_str() == Some(&derived_query.value) { - map.insert(key.clone(), Some(entity.clone())); - } - } - } - for eref in batch.removes(&derived_query.entity_type) { - map.insert(eref.key.clone(), None); - } + map.extend(effective_ops(batch, derived_query)); } } Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } @@ -1199,7 +1207,7 @@ impl WritableStoreTrait for WritableStore { data_sources, deterministic_errors, processed_data_sources, - ); + )?; self.writer.write(batch, stopwatch).await?; *self.block_ptr.lock().unwrap() = Some(block_ptr_to); diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index e199ec5fbc1..173fc32eb8d 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -34,7 +34,7 @@ use graph_store_postgres::{ use test_store::*; -use crate::postgres::relational_bytes::{row_group, row_group_ref}; +use crate::postgres::relational_bytes::{row_group_delete, row_group_insert, row_group_update}; const THINGS_GQL: &str = r#" type _Schema_ @fulltext( @@ -235,7 +235,7 @@ fn insert_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone()); let inserted = layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); assert_eq!(inserted, entities_with_keys_owned.len()); } @@ -268,7 +268,7 @@ fn update_entity_at( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys ); - let group = row_group(&entity_type, block, entities_with_keys_owned.clone()); + let group = row_group_update(&entity_type, block, entities_with_keys_owned.clone()); let updated = layout.update(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); assert_eq!(updated, entities_with_keys_owned.len()); } @@ -575,7 +575,7 @@ fn update() { let entity_type = EntityType::from("Scalar"); let entities = vec![(key, entity.clone())]; - let group = row_group(&entity_type, 0, entities); + let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); @@ -630,7 +630,7 @@ fn update_many() { let entities_vec = vec![one, two, three]; let entities: Vec<_> = keys.into_iter().zip(entities_vec.into_iter()).collect(); - let group = row_group(&entity_type, 0, entities); + let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); @@ -699,7 +699,7 @@ fn serialize_bigdecimal() { let key = EntityKey::data("Scalar".to_owned(), entity.id()); let entity_type = EntityType::from("Scalar"); let entities = vec![(key, entity.clone())]; - let group = row_group(&entity_type, 0, entities); + let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); @@ -745,7 +745,7 @@ fn delete() { let key = EntityKey::data("Scalar".to_owned(), "no such entity".to_owned()); let entity_type = EntityType::from("Scalar"); let mut entity_keys = vec![key]; - let group = row_group_ref(&entity_type, 1, entity_keys.clone()); + let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); @@ -758,7 +758,7 @@ fn delete() { .map(|key| key.entity_id = Word::from("two")) .expect("Failed to update key"); - let group = row_group_ref(&entity_type, 1, entity_keys); + let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); @@ -786,7 +786,7 @@ fn insert_many_and_delete_many() { .into_iter() .map(|key| EntityKey::data(entity_type.as_str(), key)) .collect(); - let group = row_group_ref(&entity_type, 1, entity_keys); + let group = row_group_delete(&entity_type, 1, entity_keys); let num_removed = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index d6221a522ac..559114113d1 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -1,13 +1,13 @@ //! Test relational schemas that use `Bytes` to store ids use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; -use graph::components::store::write::{EntityRef, EntityWrite, RowGroup}; +use graph::components::store::write::RowGroup; use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; -use graph::prelude::{BlockNumber, EntityQuery, MetricsRegistry}; +use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry}; use graph::schema::InputSchema; use hex_literal::hex; use lazy_static::lazy_static; @@ -79,26 +79,44 @@ fn remove_test_data(conn: &PgConnection) { .expect("Failed to drop test schema"); } -pub fn row_group( +pub fn row_group_update( entity_type: &EntityType, block: BlockNumber, data: impl IntoIterator, -) -> RowGroup { +) -> RowGroup { let mut group = RowGroup::new(entity_type.clone()); - for (key, entity) in data { - group.push(EntityWrite::new(key, entity, block)); + for (key, data) in data { + group + .push(EntityModification::Overwrite { key, data }, block) + .unwrap(); } group } -pub fn row_group_ref( +pub fn row_group_insert( + entity_type: &EntityType, + block: BlockNumber, + data: impl IntoIterator, +) -> RowGroup { + let mut group = RowGroup::new(entity_type.clone()); + for (key, data) in data { + group + .push(EntityModification::Insert { key, data }, block) + .unwrap(); + } + group +} + +pub fn row_group_delete( entity_type: &EntityType, block: BlockNumber, data: impl IntoIterator, -) -> RowGroup { +) -> RowGroup { let mut group = RowGroup::new(entity_type.clone()); for key in data { - group.push(EntityRef::new(key, block)); + group + .push(EntityModification::Remove { key }, block) + .unwrap(); } group } @@ -108,7 +126,7 @@ fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity let entity_type = EntityType::from(entity_type); let entities = vec![(key.clone(), entity)]; - let group = row_group(&entity_type, 0, entities); + let group = row_group_insert(&entity_type, 0, entities); let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); } @@ -321,7 +339,7 @@ fn update() { let entity_id = entity.id(); let entity_type = key.entity_type.clone(); let entities = vec![(key, entity.clone())]; - let group = row_group(&entity_type, 1, entities); + let group = row_group_update(&entity_type, 1, entities); layout .update(conn, &group, &MOCK_STOPWATCH) .expect("Failed to update"); @@ -353,7 +371,7 @@ fn delete() { let key = EntityKey::data("Thing".to_owned(), "ffff".to_owned()); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.clone()]; - let group = row_group_ref(&entity_type, 1, entity_keys.clone()); + let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); @@ -364,7 +382,7 @@ fn delete() { .get_mut(0) .map(|key| key.entity_id = Word::from(TWO_ID)) .expect("Failed to update entity types"); - let group = row_group_ref(&entity_type, 1, entity_keys); + let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); From 0f59591500aefe859d8dfe00956fd54b2a29f3a1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 12:47:40 -0700 Subject: [PATCH 0234/2104] store: Streamline logic in DeploymentStore.apply_entity_modifications --- graph/src/components/store/write.rs | 7 ++++ store/postgres/src/deployment_store.rs | 52 ++++++++++---------------- store/postgres/src/relational.rs | 5 +++ 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index f041b0b92c2..90b5c839602 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -104,6 +104,13 @@ impl EntityMod { } } + pub fn creates_entity(&self) -> bool { + match self { + EntityMod::Insert { .. } => true, + EntityMod::Overwrite { .. } | EntityMod::Remove { .. } => false, + } + } + fn key(&self) -> &EntityKey { match self { EntityMod::Insert { key, .. } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 94719a4b3f1..cefbf15ef74 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -328,42 +328,30 @@ impl DeploymentStore { let mut count = 0; for group in groups { - if group.has_clamps() { - count -= self.remove_entities(group, conn, layout, stopwatch)? as i32; + // Clamp entities before inserting them to avoid having versions + // with overlapping block ranges + let section = stopwatch.start_section("apply_entity_modifications_delete"); + count -= layout.delete(conn, group, stopwatch)? as i32; + section.end(); + + let section = stopwatch.start_section("check_interface_entity_uniqueness"); + for row in group.writes().filter(|emod| emod.creates_entity()) { + // WARNING: This will potentially execute 2 queries for each entity key. + self.check_interface_entity_uniqueness( + conn, + layout, + &group.entity_type, + &row.id(), + )?; } - count += self.insert_entities(group, conn, layout, stopwatch)? as i32 - } - - Ok(count) - } + section.end(); - fn insert_entities<'a>( - &'a self, - group: &'a RowGroup, - conn: &PgConnection, - layout: &'a Layout, - stopwatch: &StopwatchMetrics, - ) -> Result { - let section = stopwatch.start_section("check_interface_entity_uniqueness"); - for row in group.writes() { - // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness(conn, layout, &group.entity_type, &row.id())?; + let section = stopwatch.start_section("apply_entity_modifications_insert"); + count += layout.insert(conn, group, stopwatch)? as i32; + section.end(); } - section.end(); - let _section = stopwatch.start_section("apply_entity_modifications_insert"); - layout.insert(conn, group, stopwatch) - } - - fn remove_entities( - &self, - group: &RowGroup, - conn: &PgConnection, - layout: &Layout, - stopwatch: &StopwatchMetrics, - ) -> Result { - let _section = stopwatch.start_section("apply_entity_modifications_delete"); - layout.delete(conn, group, stopwatch) + Ok(count) } /// Execute a closure with a connection to the database. diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 6fcdf9eec1b..1dacee21e43 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -848,6 +848,11 @@ impl Layout { group: &RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { + if !group.has_clamps() { + // Nothing to do + return Ok(0); + } + let table = self.table_for_entity(&group.entity_type)?; if table.immutable { return Err(constraint_violation!( From ea7305eac995ac97ce2dbcd592923148c94b933d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 13:47:27 -0700 Subject: [PATCH 0235/2104] graph, store: Use a struct, not a tuple, to pass around writes --- graph/src/components/store/write.rs | 39 ++++++++++++++++++------ store/postgres/src/relational_queries.rs | 28 ++++++++--------- 2 files changed, 44 insertions(+), 23 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 90b5c839602..98f44adae10 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -46,6 +46,32 @@ pub enum EntityMod { Remove { key: EntityKey, block: BlockNumber }, } +pub struct EntityWrite<'a> { + pub id: &'a Word, + pub entity: &'a Entity, + pub causality_region: CausalityRegion, + pub block: BlockNumber, +} + +impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { + type Error = (); + + fn try_from(emod: &'a EntityMod) -> Result { + match emod { + EntityMod::Insert { key, data, block } | EntityMod::Overwrite { key, data, block } => { + Ok(EntityWrite { + id: &key.entity_id, + entity: data, + causality_region: key.causality_region, + block: *block, + }) + } + + EntityMod::Remove { .. } => Err(()), + } + } +} + impl EntityMod { fn new(m: EntityModification, block: BlockNumber) -> Self { match m { @@ -87,13 +113,8 @@ impl EntityMod { /// Return the details of the write if `self` is a write operation for a /// new or an existing entity - fn as_write(&self) -> Option<(&Word, &Entity, CausalityRegion, BlockNumber)> { - match self { - EntityMod::Insert { key, data, block } | EntityMod::Overwrite { key, data, block } => { - Some((&key.entity_id, data, key.causality_region, *block)) - } - EntityMod::Remove { .. } => None, - } + fn as_write(&self) -> Option { + EntityWrite::try_from(self).ok() } /// Return `true` if `self` requires clamping of an existing version @@ -488,7 +509,7 @@ impl<'a> WriteChunk<'a> { } impl<'a> IntoIterator for &WriteChunk<'a> { - type Item = (&'a Word, &'a Entity, CausalityRegion, BlockNumber); + type Item = EntityWrite<'a>; type IntoIter = WriteChunkIter<'a>; @@ -510,7 +531,7 @@ pub struct WriteChunkIter<'a> { } impl<'a> Iterator for WriteChunkIter<'a> { - type Item = (&'a Word, &'a Entity, CausalityRegion, BlockNumber); + type Item = EntityWrite<'a>; fn next(&mut self) -> Option { while self.count < self.chunk_size && self.position < self.group.rows.len() { diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index b0051e6ff6b..4762047a644 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1756,12 +1756,12 @@ impl<'a> FulltextValues<'a> { fn new(table: &'a Table, rows: &'a WriteChunk<'a>) -> Self { let mut map = HashMap::new(); for column in table.columns.iter().filter(|column| column.is_fulltext()) { - for (id, entity, _, _) in rows { + for row in rows { let mut fulltext = Vec::new(); if let Some(fields) = column.fulltext_fields.as_ref() { let fulltext_field_values = fields .iter() - .filter_map(|field| entity.get(field)) + .filter_map(|field| row.entity.get(field)) .cloned() .collect::>(); if !fulltext_field_values.is_empty() { @@ -1769,7 +1769,7 @@ impl<'a> FulltextValues<'a> { } } if !fulltext.is_empty() { - map.insert(id, fulltext); + map.insert(row.id, fulltext); } } } @@ -1799,14 +1799,14 @@ pub struct InsertQuery<'a> { impl<'a> InsertQuery<'a> { pub fn new(table: &'a Table, rows: &'a WriteChunk<'a>) -> Result, StoreError> { - for (id, entity, _, _) in rows { + for row in rows { for column in table.columns.iter() { - if !column.is_nullable() && !entity.contains_key(&column.field) { + if !column.is_nullable() && !row.entity.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( "can not insert entity {}[{}] since value for non-nullable attribute {} is missing. \ To fix this, mark the attribute as nullable in the GraphQL schema or change the \ mapping code to always set this attribute.", - table.object, id, column.field + table.object, row.id, column.field ))); } } @@ -1833,11 +1833,11 @@ impl<'a> InsertQuery<'a> { .columns .iter() .filter(|column| { - rows.iter().any(|(id, entity, _, _)| { + rows.iter().any(|row| { if column.is_fulltext() { - !fulltext_values.get(id, &column.field).is_null() + !fulltext_values.get(row.id, &column.field).is_null() } else { - entity.get(&column.field).is_some() + row.entity.get(&column.field).is_some() } }) }) @@ -1915,21 +1915,21 @@ impl<'a> QueryFragment for InsertQuery<'a> { // Use a `Peekable` iterator to help us decide how to finalize each line. let mut iter = self.rows.iter().peekable(); - while let Some((id, entity, causality_region, block)) = iter.next() { + while let Some(row) = iter.next() { out.push_sql("("); for column in &self.unique_columns { let value = if column.is_fulltext() { - self.fulltext_values.get(id, &column.field) + self.fulltext_values.get(row.id, &column.field) } else { - entity.get(&column.field).unwrap_or(&NULL) + row.entity.get(&column.field).unwrap_or(&NULL) }; QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } - Self::literal_range_current(&self.table, block, &mut out)?; + Self::literal_range_current(&self.table, row.block, &mut out)?; if self.table.has_causality_region { out.push_sql(", "); - out.push_bind_param::(&causality_region)?; + out.push_bind_param::(&row.causality_region)?; }; out.push_sql(")"); From 9e10c9b7b0ccd25be6f13d5d6b210348093c1be0 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 13:57:47 -0700 Subject: [PATCH 0236/2104] graph, store: Allow inserting entities with an end to their block_range --- graph/src/components/store/write.rs | 52 ++++++++++++++++++++---- store/postgres/src/block_range.rs | 21 ++++------ store/postgres/src/relational_queries.rs | 8 +++- 3 files changed, 56 insertions(+), 25 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 98f44adae10..309f715fa0e 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -28,6 +28,15 @@ use super::{ /// `block`. We only ever get an `Overwrite` if such a version actually /// exists. `Insert` simply inserts a new row into the underlying table, /// assuming that there is no need to fix up any prior version. +/// +/// The `end` field for `Insert` and `Overwrite` indicates whether the +/// entity exists now: if it is `None`, the entity currently exists, but if +/// it is `Some(_)`, it was deleted, for example, by folding a `Remove` or +/// `Overwrite` into this operation. The entity version will only be visible +/// before `end`, excluding `end`. This folding, which happens in +/// `append_row`, eliminates an update in the database which would otherwise +/// be needed to clamp the open block range of the entity to the block +/// contained in `end` #[derive(Debug)] pub enum EntityMod { /// Insert the entity @@ -35,12 +44,14 @@ pub enum EntityMod { key: EntityKey, data: Entity, block: BlockNumber, + end: Option, }, /// Update the entity by overwriting it Overwrite { key: EntityKey, data: Entity, block: BlockNumber, + end: Option, }, /// Remove the entity Remove { key: EntityKey, block: BlockNumber }, @@ -51,6 +62,9 @@ pub struct EntityWrite<'a> { pub entity: &'a Entity, pub causality_region: CausalityRegion, pub block: BlockNumber, + // The end of the block range for which this write is valid. The value + // of `end` itself is not included in the range + pub end: Option, } impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { @@ -58,14 +72,24 @@ impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { fn try_from(emod: &'a EntityMod) -> Result { match emod { - EntityMod::Insert { key, data, block } | EntityMod::Overwrite { key, data, block } => { - Ok(EntityWrite { - id: &key.entity_id, - entity: data, - causality_region: key.causality_region, - block: *block, - }) + EntityMod::Insert { + key, + data, + block, + end, } + | EntityMod::Overwrite { + key, + data, + block, + end, + } => Ok(EntityWrite { + id: &key.entity_id, + entity: data, + causality_region: key.causality_region, + block: *block, + end: *end, + }), EntityMod::Remove { .. } => Err(()), } @@ -75,8 +99,18 @@ impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { impl EntityMod { fn new(m: EntityModification, block: BlockNumber) -> Self { match m { - EntityModification::Insert { key, data } => Self::Insert { key, data, block }, - EntityModification::Overwrite { key, data } => Self::Overwrite { key, data, block }, + EntityModification::Insert { key, data } => Self::Insert { + key, + data, + block, + end: None, + }, + EntityModification::Overwrite { key, data } => Self::Overwrite { + key, + data, + block, + end: None, + }, EntityModification::Remove { key } => Self::Remove { key, block }, } } diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index 98eeea144af..c492c989ad9 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -53,16 +53,6 @@ lazy_static! { #[derive(Clone, Debug)] pub struct BlockRange(Bound, Bound); -// Doing this properly by implementing Clone for Bound is currently -// a nightly-only feature, so we need to work around that -fn clone_bound(bound: Bound<&BlockNumber>) -> Bound { - match bound { - Bound::Included(nr) => Bound::Included(*nr), - Bound::Excluded(nr) => Bound::Excluded(*nr), - Bound::Unbounded => Bound::Unbounded, - } -} - pub(crate) fn first_block_in_range( bound: &(Bound, Bound), ) -> Option { @@ -87,10 +77,13 @@ pub(crate) fn block_number(block_ptr: &BlockPtr) -> BlockNumber { impl From> for BlockRange { fn from(range: RangeFrom) -> BlockRange { - BlockRange( - clone_bound(range.start_bound()), - clone_bound(range.end_bound()), - ) + BlockRange(range.start_bound().cloned(), range.end_bound().cloned()) + } +} + +impl From> for BlockRange { + fn from(range: std::ops::Range) -> BlockRange { + BlockRange(Bound::Included(range.start), Bound::Excluded(range.end)) } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 4762047a644..c420bad74a8 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1871,12 +1871,16 @@ impl<'a> InsertQuery<'a> { pub fn literal_range_current( table: &Table, block: BlockNumber, + end: Option, out: &mut AstPass, ) -> QueryResult<()> { if table.immutable { out.push_bind_param::(&block) } else { - let block_range: BlockRange = (block..).into(); + let block_range: BlockRange = match end { + Some(end) => (block..end).into(), + None => (block..).into(), + }; out.push_bind_param::, _>(&block_range) } } @@ -1926,7 +1930,7 @@ impl<'a> QueryFragment for InsertQuery<'a> { QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } - Self::literal_range_current(&self.table, row.block, &mut out)?; + Self::literal_range_current(&self.table, row.block, row.end, &mut out)?; if self.table.has_causality_region { out.push_sql(", "); out.push_bind_param::(&row.causality_region)?; From 43fafec5c0203a54a959e212c42690a9a2cbcbc9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 7 May 2023 13:02:36 +0200 Subject: [PATCH 0237/2104] store: Encapsulate logic to iterate over visible part of queue better --- store/postgres/src/writable.rs | 182 +++++++++++++++++++-------------- 1 file changed, 103 insertions(+), 79 deletions(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 1035a375c31..7ee0b59ec22 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -373,9 +373,8 @@ impl SyncStore { /// number at which queries should run so that they only consider data that /// is not affected by any requests currently queued. /// -/// The tracker relies on `update` being called in the order newest request -/// in the queue to oldest request so that reverts are seen before the -/// writes that they revert. +/// The best way to use the trtacker is to use the `fold_map` and `find` +/// methods. struct BlockTracker { /// The smallest block number that has been reverted to revert: BlockNumber, @@ -419,6 +418,80 @@ impl BlockTracker { fn visible(&self, block_ptr: &BlockPtr) -> bool { block_ptr.number <= self.revert } + + /// Iterate over all batches currently in the queue, from newest to + /// oldest, and call `f` for each batch whose changes will actually be + /// visible in the database once the entire queue has been processed. + /// + /// The iteration ends the first time that `f` returns `Some(_)`. The + /// queue will be locked during the iteration, so `f` should not do any + /// slow work. + /// + /// The returned `BlockNumber` is the block at which queries should run + /// to only consider the state of the database before any of the queued + /// changes have been applied. + fn find_map(queue: &BoundedQueue>, f: F) -> (Option, BlockNumber) + where + F: Fn(&Batch) -> Option, + { + let mut tracker = BlockTracker::new(); + // Going from newest to oldest entry in the queue as `find_map` does + // ensures that we see reverts before we see the corresponding write + // request. We ignore any write request that writes blocks that have + // a number strictly higher than the revert with the smallest block + // number, as all such writes will be undone once the revert is + // processed. + let res = queue.find_map(|req| { + tracker.update(req.as_ref()); + match req.as_ref() { + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + f(batch) + } else { + None + } + } + Request::RevertTo { .. } | Request::Stop => None, + } + }); + (res, tracker.query_block()) + } + + /// Iterate over all batches currently in the queue, from newest to + /// oldest, and call `f` for each batch whose changes will actually be + /// visible in the database once the entire queue has been processed. + /// + /// Return the value that the last invocation of `f` returned, together + /// with the block at which queries should run to only consider the + /// state of the database before any of the queued changes have been + /// applied. + /// + /// The queue will be locked during the iteration, so `f` should not do + /// any slow work. + fn fold(queue: &BoundedQueue>, init: B, mut f: F) -> (B, BlockNumber) + where + F: FnMut(B, &Batch) -> B, + { + let mut tracker = BlockTracker::new(); + + let accum = queue.fold(init, |accum, req| { + tracker.update(req.as_ref()); + match req.as_ref() { + Request::Write { batch, .. } => { + if tracker.visible(&batch.block_ptr) { + f(accum, batch) + } else { + accum + } + } + Request::RevertTo { .. } | Request::Stop => { + /* nothing to do */ + accum + } + } + }); + (accum, tracker.query_block()) + } } /// A write request received from the `WritableStore` frontend that gets @@ -702,32 +775,13 @@ impl Queue { } } - // Going from newest to oldest entry in the queue as `find_map` does - // ensures that we see reverts before we see the corresponding write - // request. We ignore any write request that writes blocks that have - // a number strictly higher than the revert with the smallest block - // number, as all such writes will be undone once the revert is - // processed. - let mut tracker = BlockTracker::new(); - - let op = self.queue.find_map(|req| { - tracker.update(req.as_ref()); - match req.as_ref() { - Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - batch.last_op(key).map(Op::from) - } else { - None - } - } - Request::RevertTo { .. } | Request::Stop => None, - } - }); + let (op, query_block) = + BlockTracker::find_map(&self.queue, |batch| batch.last_op(key).map(Op::from)); match op { Some(Op::Write(entity)) => Ok(Some(entity)), Some(Op::Remove) => Ok(None), - None => self.store.get(key, tracker.query_block()), + None => self.store.get(key, query_block), } } @@ -736,32 +790,21 @@ impl Queue { &self, mut keys: BTreeSet, ) -> Result, StoreError> { - // See the implementation of `get` for how we handle reverts - let mut tracker = BlockTracker::new(); - - // Get entities from entries in the queue - let entities_in_queue = self.queue.fold( + let (entities_in_queue, query_block) = BlockTracker::fold( + &self.queue, BTreeMap::new(), - |mut map: BTreeMap>, req| { - tracker.update(req.as_ref()); - match req.as_ref() { - Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - // See if we have changes for any of the keys. - for key in &keys { - match batch.last_op(key) { - Some(EntityOp::Write { key: _, entity }) => { - map.insert(key.clone(), Some(entity.clone())); - } - Some(EntityOp::Remove { .. }) => { - map.insert(key.clone(), None); - } - None => { /* nothing to do */ } - } - } + |mut map: BTreeMap>, batch| { + // See if we have changes for any of the keys. + for key in &keys { + match batch.last_op(key) { + Some(EntityOp::Write { key: _, entity }) => { + map.insert(key.clone(), Some(entity.clone())); + } + Some(EntityOp::Remove { .. }) => { + map.insert(key.clone(), None); } + None => { /* nothing to do */ } } - Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } } map }, @@ -769,7 +812,7 @@ impl Queue { // Look entities for the remaining keys up in the store keys.retain(|key| !entities_in_queue.contains_key(key)); - let mut map = self.store.get_many(keys, tracker.query_block())?; + let mut map = self.store.get_many(keys, query_block)?; // Extend the store results with the entities from the queue. for (key, entity) in entities_in_queue { @@ -786,8 +829,6 @@ impl Queue { &self, derived_query: &DerivedEntityQuery, ) -> Result, StoreError> { - let mut tracker = BlockTracker::new(); - fn is_related(derived_query: &DerivedEntityQuery, entity: &Entity) -> bool { entity .get(&derived_query.entity_field) @@ -811,18 +852,11 @@ impl Queue { } // Get entities from entries in the queue - let entities_in_queue = self.queue.fold( + let (entities_in_queue, query_block) = BlockTracker::fold( + &self.queue, BTreeMap::new(), - |mut map: BTreeMap>, req| { - tracker.update(req.as_ref()); - match req.as_ref() { - Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - map.extend(effective_ops(batch, derived_query)); - } - } - Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } - } + |mut map: BTreeMap>, batch| { + map.extend(effective_ops(batch, derived_query)); map }, ); @@ -832,7 +866,7 @@ impl Queue { // We filter to exclude the entities ids that we already have from the queue let mut items_from_database = self.store - .get_derived(derived_query, tracker.query_block(), excluded_keys)?; + .get_derived(derived_query, query_block, excluded_keys)?; // Extend the store results with the entities from the queue. // This overwrites any entitiy from the database with the same key from queue @@ -850,27 +884,17 @@ impl Queue { &self, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { - // See the implementation of `get` for how we handle reverts - let mut tracker = BlockTracker::new(); - // We need to produce a list of dynamic data sources that are // ordered by their creation block. We first look through all the // dds that are still in the queue, and then load dds from the store // as long as they were written at a block before whatever is still // in the queue. The overall list of dds is the list of dds from the // store plus the ones still in memory sorted by their block number. - let mut queue_dds = self.queue.fold(Vec::new(), |mut dds, req| { - tracker.update(req.as_ref()); - match req.as_ref() { - Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - dds.extend(batch.new_data_sources().cloned()); - } - } - Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } - } - dds - }); + let (mut queue_dds, query_block) = + BlockTracker::fold(&self.queue, Vec::new(), |mut dds, batch| { + dds.extend(batch.new_data_sources().cloned()); + dds + }); // Using a stable sort is important here so that dds created at the // same block stay in the order in which they were added (and // therefore will be loaded from the store in that order once the @@ -879,7 +903,7 @@ impl Queue { let mut dds = self .store - .load_dynamic_data_sources(tracker.query_block(), manifest_idx_and_name) + .load_dynamic_data_sources(query_block, manifest_idx_and_name) .await?; dds.append(&mut queue_dds); From 42e2b9920b805be49f24e111f24786fd39510c9b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 8 May 2023 10:01:47 +0200 Subject: [PATCH 0238/2104] graph, store: Calculate entity count change from in-memory data When changes to entities get combined, inserting a row can either create a new entity, or update an existing version, and the number of rows inserted is no longer an accurate count of new entities. Instead of relying on data coming from the database, we now use the in-memory representation of changes to determine how applying a batch changes the number of entities in a deployment. --- graph/src/components/store/write.rs | 22 +++++++++++++++++++ store/postgres/src/deployment_store.rs | 6 +++-- store/postgres/src/relational.rs | 9 +++----- store/postgres/src/relational_queries.rs | 10 --------- store/test-store/tests/postgres/relational.rs | 7 ++++-- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 309f715fa0e..089fc94d733 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -173,6 +173,22 @@ impl EntityMod { | EntityMod::Remove { key, .. } => key, } } + + fn entity_count_change(&self) -> i32 { + match self { + EntityMod::Insert { end: None, .. } => 1, + EntityMod::Insert { end: Some(_), .. } => { + // Insert followed by a remove + 0 + } + EntityMod::Overwrite { end: None, .. } => 0, + EntityMod::Overwrite { end: Some(_), .. } => { + // Overwrite followed by a remove + -1 + } + EntityMod::Remove { .. } => -1, + } + } } /// A list of entity changes grouped by the entity type @@ -207,6 +223,12 @@ impl RowGroup { self.rows.len() } + /// Return the change in entity count that will result from applying + /// writing this row group to the database + pub fn entity_count_change(&self) -> i32 { + self.rows.iter().map(|row| row.entity_count_change()).sum() + } + /// Iterate over all changes that need clamping of the block range of an /// existing entity version pub fn clamps_by_block(&self) -> impl Iterator { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index cefbf15ef74..39b7442600b 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -328,10 +328,12 @@ impl DeploymentStore { let mut count = 0; for group in groups { + count += group.entity_count_change(); + // Clamp entities before inserting them to avoid having versions // with overlapping block ranges let section = stopwatch.start_section("apply_entity_modifications_delete"); - count -= layout.delete(conn, group, stopwatch)? as i32; + layout.delete(conn, group, stopwatch)?; section.end(); let section = stopwatch.start_section("check_interface_entity_uniqueness"); @@ -347,7 +349,7 @@ impl DeploymentStore { section.end(); let section = stopwatch.start_section("apply_entity_modifications_insert"); - count += layout.insert(conn, group, stopwatch)? as i32; + layout.insert(conn, group, stopwatch)?; section.end(); } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 1dacee21e43..bfcc52b5e27 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -656,10 +656,9 @@ impl Layout { conn: &PgConnection, group: &'a RowGroup, stopwatch: &StopwatchMetrics, - ) -> Result { + ) -> Result<(), StoreError> { let table = self.table_for_entity(&group.entity_type)?; let _section = stopwatch.start_section("insert_modification_insert_query"); - let mut count = 0; // We insert the entities in chunks to make sure each operation does // not exceed the maximum number of bindings allowed in queries @@ -667,12 +666,10 @@ impl Layout { for chunk in group.write_chunks(chunk_size) { // Empty chunks would lead to invalid SQL if !chunk.is_empty() { - count += InsertQuery::new(table, &chunk)? - .get_results(conn) - .map(|ids| ids.len())? + InsertQuery::new(table, &chunk)?.execute(conn)?; } } - Ok(count) + Ok(()) } pub fn conflicting_entity( diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index c420bad74a8..48eda4903f1 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1942,9 +1942,6 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(",\n"); } } - out.push_sql("\nreturning "); - out.push_sql(PRIMARY_KEY_COLUMN); - out.push_sql("::text"); Ok(()) } @@ -1956,13 +1953,6 @@ impl<'a> QueryId for InsertQuery<'a> { const HAS_STATIC_QUERY_ID: bool = false; } -impl<'a> LoadQuery for InsertQuery<'a> { - fn internal_load(self, conn: &PgConnection) -> QueryResult> { - conn.query_by_name(&self) - .map(|data| ReturnedEntityData::bytes_as_str(self.table, data)) - } -} - impl<'a, Conn> RunQueryDsl for InsertQuery<'a> {} #[derive(Debug, Clone)] diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 173fc32eb8d..c8976454ccd 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -236,8 +236,11 @@ fn insert_entity_at( entity_type, entities_with_keys ); let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone()); - let inserted = layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); - assert_eq!(inserted, entities_with_keys_owned.len()); + layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); + assert_eq!( + group.entity_count_change(), + entities_with_keys_owned.len() as i32 + ); } fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entities: Vec) { From 1848d5e0e30671a415216c7eda11df0f0fbcdf0a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 15:02:38 -0700 Subject: [PATCH 0239/2104] graph: Add appending one batch to another The result of the append is a batch that, when written to the database, has the same effect as writing the two batches in two separate transactions. --- graph/src/components/store/write.rs | 209 ++++++++++++++++++++++++++-- 1 file changed, 201 insertions(+), 8 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 089fc94d733..32c12be9c93 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -5,6 +5,7 @@ use crate::{ blockchain::{block_stream::FirehoseCursor, BlockPtr}, cheap_clone::CheapClone, components::subgraph::Entity, + constraint_violation, data::{subgraph::schema::SubgraphError, value::Word}, data_source::CausalityRegion, prelude::DeploymentHash, @@ -57,6 +58,8 @@ pub enum EntityMod { Remove { key: EntityKey, block: BlockNumber }, } +/// A helper struct for passing entity writes to the outside world, viz. the +/// SQL query generation that inserts rows pub struct EntityWrite<'a> { pub id: &'a Word, pub entity: &'a Entity, @@ -189,6 +192,58 @@ impl EntityMod { EntityMod::Remove { .. } => -1, } } + + fn clamp(&mut self, block: BlockNumber) -> Result<(), StoreError> { + use EntityMod::*; + + match self { + Insert { end, .. } | Overwrite { end, .. } => { + if end.is_some() { + return Err(constraint_violation!( + "can not clamp {:?} to block {}", + self, + block + )); + } + *end = Some(block); + } + Remove { .. } => { + return Err(constraint_violation!( + "can not clamp block range for removal of {:?} to {}", + self, + block + )) + } + } + Ok(()) + } + + /// Turn an `Overwrite` into an `Insert`, return an error if this is a `Remove` + fn as_insert(self, entity_type: &EntityType) -> Result { + use EntityMod::*; + + match self { + Insert { .. } => Ok(self), + Overwrite { + key, + data, + block, + end, + } => Ok(Insert { + key, + data, + block, + end, + }), + Remove { key, .. } => { + return Err(constraint_violation!( + "a remove for {}[{}] can not be converted into an insert", + entity_type, + key.entity_id + )) + } + } + } } /// A list of entity changes grouped by the entity type @@ -196,7 +251,8 @@ impl EntityMod { pub struct RowGroup { pub entity_type: EntityType, /// All changes for this entity type, ordered by block; i.e., if `i < j` - /// then `rows[i].block() <= rows[j].block()` + /// then `rows[i].block() <= rows[j].block()`. Several methods on this + /// struct rely on the fact that this ordering is observed. pub rows: Vec, } @@ -215,8 +271,7 @@ impl RowGroup { .map(|emod| emod.block() <= block) .unwrap_or(true)); let row = EntityMod::new(emod, block); - self.rows.push(row); - Ok(()) + self.append_row(row) } fn row_count(&self) -> usize { @@ -267,6 +322,86 @@ impl RowGroup { .filter(move |emod| seen.insert(emod.id())) .map(EntityOp::from) } + + /// Find the most recent entry for `id` + fn prev_row_mut(&mut self, id: &Word) -> Option<&mut EntityMod> { + self.rows.iter_mut().rfind(|emod| emod.id() == id) + } + + /// Append `row` to `self.rows` by combining it with a previously + /// existing row, if that is possible + fn append_row(&mut self, row: EntityMod) -> Result<(), StoreError> { + if let Some(prev_row) = self.prev_row_mut(row.id()) { + use EntityMod::*; + + if row.block() <= prev_row.block() { + return Err(constraint_violation!( + "can not append operations that go backwards from {:?} to {:?}", + prev_row, + row + )); + } + + // The heart of the matter: depending on what `row` is, clamp + // `prev_row` and either ignore `row` since it is not needed, or + // turn it into an `Insert`, which also does not require + // clamping an old version + match (&*prev_row, &row) { + (Insert { end: None, .. } | Overwrite { end: None, .. }, Insert { .. }) + | (Remove { .. }, Overwrite { .. } | Remove { .. }) + | ( + Insert { end: Some(_), .. } | Overwrite { end: Some(_), .. }, + Overwrite { .. } | Remove { .. }, + ) => { + return Err(constraint_violation!( + "impossible combination of entity operations: {:?} and then {:?}", + prev_row, + row + )) + } + ( + Insert { end: Some(_), .. } | Overwrite { end: Some(_), .. } | Remove { .. }, + Insert { .. }, + ) => { + // prev_row was deleted + self.rows.push(row); + } + ( + Insert { end: None, .. } | Overwrite { end: None, .. }, + Overwrite { block, .. }, + ) => { + prev_row.clamp(*block)?; + self.rows.push(row.as_insert(&self.entity_type)?); + } + (Insert { end: None, .. } | Overwrite { end: None, .. }, Remove { block, .. }) => { + prev_row.clamp(*block)?; + } + } + } else { + self.rows.push(row); + } + Ok(()) + } + + fn append(&mut self, group: RowGroup) -> Result<(), StoreError> { + if self.entity_type != group.entity_type { + return Err(constraint_violation!( + "Can not append a row group for {} to a row group for {}", + group.entity_type, + self.entity_type + )); + } + + for row in group.rows { + self.append_row(row)?; + } + + Ok(()) + } + + pub fn ids(&self) -> impl Iterator { + self.rows.iter().map(|emod| emod.id().as_str()) + } } struct ClampsByBlockIterator<'a> { @@ -345,6 +480,13 @@ impl RowGroups { fn entity_count(&self) -> usize { self.groups.iter().map(|group| group.row_count()).sum() } + + fn append(&mut self, other: RowGroups) -> Result<(), StoreError> { + for group in other.groups { + self.group_entry(&group.entity_type).append(group)?; + } + Ok(()) + } } /// Data sources data grouped by block @@ -365,6 +507,10 @@ impl DataSources { pub fn is_empty(&self) -> bool { self.entries.iter().all(|(_, dss)| dss.is_empty()) } + + fn append(&mut self, mut other: DataSources) { + self.entries.append(&mut other.entries); + } } /// Indicate to code that looks up entities from the in-memory batch whether @@ -382,19 +528,34 @@ pub enum EntityOp<'a> { impl<'a> From<&'a EntityMod> for EntityOp<'a> { fn from(emod: &'a EntityMod) -> Self { + use EntityMod::*; + match emod { - EntityMod::Insert { data, key, .. } | EntityMod::Overwrite { data, key, .. } => { - EntityOp::Write { key, entity: data } + Insert { + data, + key, + end: None, + .. } - EntityMod::Remove { key, .. } => EntityOp::Remove { key }, + | Overwrite { + data, + key, + end: None, + .. + } => EntityOp::Write { key, entity: data }, + Insert { + key, end: Some(_), .. + } + | Overwrite { + key, end: Some(_), .. + } + | Remove { key, .. } => EntityOp::Remove { key }, } } } /// A write batch. This data structure encapsulates all the things that need /// to be changed to persist the output of mappings up to a certain block. -/// For now, a batch will only contain changes for a single block, but will -/// eventually contain data for multiple blocks. pub struct Batch { /// The last block for which this batch contains changes pub block_ptr: BlockPtr, @@ -405,6 +566,7 @@ pub struct Batch { pub data_sources: DataSources, pub deterministic_errors: Vec, pub offchain_to_remove: DataSources, + pub error: Option, } impl Batch { @@ -444,9 +606,40 @@ impl Batch { data_sources, deterministic_errors, offchain_to_remove, + error: None, }) } + fn append_inner(&mut self, mut batch: Batch) -> Result<(), StoreError> { + if batch.block_ptr.number <= self.block_ptr.number { + return Err(constraint_violation!("Batches must go forward. Can't append a batch with block pointer {} to one with block pointer {}", batch.block_ptr, self.block_ptr)); + } + + self.block_ptr = batch.block_ptr; + self.firehose_cursor = batch.firehose_cursor; + self.mods.append(batch.mods)?; + self.data_sources.append(batch.data_sources); + self.deterministic_errors + .append(&mut batch.deterministic_errors); + self.offchain_to_remove.append(batch.offchain_to_remove); + Ok(()) + } + + /// Append `batch` to `self` so that writing `self` afterwards has the + /// same effect as writing `self` first and then `batch` in separate + /// transactions. + /// + /// When this method returns an `Err`, the batch is marked as not + /// healthy by setting `self.error` to `Some(_)` and must not be written + /// as it will be in an indeterminate state. + pub fn append(&mut self, batch: Batch) -> Result<(), StoreError> { + let res = self.append_inner(batch); + if let Err(e) = &res { + self.error = Some(e.clone()); + } + res + } + pub fn entity_count(&self) -> usize { self.mods.entity_count() } From b629a0ae07bee0a2978a626d877e0fe85544252e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 16:24:31 -0700 Subject: [PATCH 0240/2104] graph, store: Attempt to extend existing write requests When a new write request gets queued, try to append it to an existing one whenever possible, instead of creating a new request. --- graph/src/components/store/write.rs | 111 +++++++---- graph/src/util/bounded_queue.rs | 24 +++ store/postgres/src/writable.rs | 290 +++++++++++++++++++++------- 3 files changed, 316 insertions(+), 109 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 32c12be9c93..ed8088b4e46 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -244,6 +244,46 @@ impl EntityMod { } } } + + fn as_entity_op(&self, at: BlockNumber) -> EntityOp<'_> { + debug_assert!(self.block() <= at); + + use EntityMod::*; + + match self { + Insert { + data, + key, + end: None, + .. + } + | Overwrite { + data, + key, + end: None, + .. + } => EntityOp::Write { key, entity: data }, + Insert { + data, + key, + end: Some(end), + .. + } + | Overwrite { + data, + key, + end: Some(end), + .. + } if at < *end => EntityOp::Write { key, entity: data }, + Insert { + key, end: Some(_), .. + } + | Overwrite { + key, end: Some(_), .. + } + | Remove { key, .. } => EntityOp::Remove { key }, + } + } } /// A list of entity changes grouped by the entity type @@ -307,20 +347,29 @@ impl RowGroup { self.rows.iter().any(|row| row.is_clamp()) } - pub fn last_op(&self, key: &EntityKey) -> Option> { + pub fn last_op(&self, key: &EntityKey, at: BlockNumber) -> Option> { self.rows .iter() - .rfind(|emod| emod.key() == key) - .map(EntityOp::from) + // We are scanning backwards, i.e., in descendng order of + // `emod.block()`. Therefore, the first `emod` we encounter + // whose block is before `at` is the one in effect + .rfind(|emod| emod.key() == key && emod.block() <= at) + .map(|emod| emod.as_entity_op(at)) } - pub fn effective_ops(&self) -> impl Iterator> { + pub fn effective_ops(&self, at: BlockNumber) -> impl Iterator> { let mut seen = HashSet::new(); self.rows .iter() .rev() - .filter(move |emod| seen.insert(emod.id())) - .map(EntityOp::from) + .filter(move |emod| { + if emod.block() <= at { + seen.insert(emod.id()) + } else { + false + } + }) + .map(move |emod| emod.as_entity_op(at)) } /// Find the most recent entry for `id` @@ -526,39 +575,13 @@ pub enum EntityOp<'a> { Remove { key: &'a EntityKey }, } -impl<'a> From<&'a EntityMod> for EntityOp<'a> { - fn from(emod: &'a EntityMod) -> Self { - use EntityMod::*; - - match emod { - Insert { - data, - key, - end: None, - .. - } - | Overwrite { - data, - key, - end: None, - .. - } => EntityOp::Write { key, entity: data }, - Insert { - key, end: Some(_), .. - } - | Overwrite { - key, end: Some(_), .. - } - | Remove { key, .. } => EntityOp::Remove { key }, - } - } -} - /// A write batch. This data structure encapsulates all the things that need /// to be changed to persist the output of mappings up to a certain block. pub struct Batch { /// The last block for which this batch contains changes pub block_ptr: BlockPtr, + /// The first block for which this batch contains changes + pub first_block: BlockNumber, /// The firehose cursor corresponding to `block_ptr` pub firehose_cursor: FirehoseCursor, mods: RowGroups, @@ -599,8 +622,10 @@ impl Batch { let data_sources = DataSources::new(block_ptr.cheap_clone(), data_sources); let offchain_to_remove = DataSources::new(block_ptr.cheap_clone(), offchain_to_remove); + let first_block = block_ptr.number; Ok(Self { block_ptr, + first_block, firehose_cursor, mods, data_sources, @@ -648,22 +673,30 @@ impl Batch { /// `entity_type` and `id` is going to write that entity, i.e., insert /// or overwrite it, or if it is going to remove it. If no change will /// be made to the entity, return `None` - pub fn last_op(&self, key: &EntityKey) -> Option> { - self.mods.group(&key.entity_type)?.last_op(key) + pub fn last_op(&self, key: &EntityKey, block: BlockNumber) -> Option> { + self.mods.group(&key.entity_type)?.last_op(key, block) } - pub fn effective_ops(&self, entity_type: &EntityType) -> impl Iterator { + pub fn effective_ops( + &self, + entity_type: &EntityType, + at: BlockNumber, + ) -> impl Iterator { self.mods .group(entity_type) - .map(|group| group.effective_ops()) + .map(|group| group.effective_ops(at)) .into_iter() .flatten() } - pub fn new_data_sources(&self) -> impl Iterator { + pub fn new_data_sources( + &self, + at: BlockNumber, + ) -> impl Iterator { self.data_sources .entries .iter() + .filter(move |(ptr, _)| ptr.number <= at) .map(|(_, ds)| ds) .flatten() .filter(|ds| { diff --git a/graph/src/util/bounded_queue.rs b/graph/src/util/bounded_queue.rs index 5e0a666d260..f618c7eca7d 100644 --- a/graph/src/util/bounded_queue.rs +++ b/graph/src/util/bounded_queue.rs @@ -86,6 +86,19 @@ impl BoundedQueue { item.clone() } + /// Same as `peek`, but also call `f` while the queue is still locked + /// and safe from modification + pub async fn peek_with(&self, f: F) -> T + where + F: FnOnce(&T), + { + let _permit = self.pop_semaphore.acquire().await.unwrap(); + let queue = self.queue.lock().unwrap(); + let item = queue.front().expect("the queue is not empty"); + f(item); + item.clone() + } + /// Push an item into the queue. If the queue is currently full this method /// blocks until an item is available pub async fn push(&self, item: T) { @@ -129,6 +142,17 @@ impl BoundedQueue { queue.iter().rev().find_map(f) } + /// Execute `f` on the newest entry in the queue atomically, i.e., while + /// the queue is locked. The function `f` should therefore not do any + /// slow work + pub fn map_newest(&self, f: F) -> R + where + F: FnOnce(Option<&T>) -> R, + { + let queue = self.queue.lock().unwrap(); + f(queue.back()) + } + /// Iterate over the entries in the queue from newest to oldest entry /// atomically, applying `f` to each entry and returning the result of /// the last invocation of `f`. diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 7ee0b59ec22..bada947c4f8 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,6 +1,8 @@ use std::collections::BTreeSet; +use std::ops::Deref; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; +use std::sync::{Mutex, RwLock, TryLockError as RwLockError}; +use std::time::{Duration, Instant}; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; @@ -375,8 +377,10 @@ impl SyncStore { /// /// The best way to use the trtacker is to use the `fold_map` and `find` /// methods. +#[derive(Debug)] struct BlockTracker { - /// The smallest block number that has been reverted to + /// The smallest block number that has been reverted to. Only writes + /// before this block will be visible revert: BlockNumber, /// The largest block number that is not affected by entries in the /// queue @@ -391,19 +395,15 @@ impl BlockTracker { } } - fn update(&mut self, req: &Request) { - match req { - Request::Write { batch, .. } => { - self.block = self.block.min(batch.block_ptr.number - 1); - } - Request::RevertTo { block_ptr, .. } => { - // `block_ptr` is the block pointer we are reverting _to_, - // and is not affected by the revert - self.revert = self.revert.min(block_ptr.number); - self.block = self.block.min(block_ptr.number); - } - Request::Stop => { /* do nothing */ } - } + fn write(&mut self, block_ptr: &BlockPtr) { + self.block = self.block.min(block_ptr.number - 1); + } + + fn revert(&mut self, block_ptr: &BlockPtr) { + // `block_ptr` is the block pointer we are reverting _to_, + // and is not affected by the revert + self.revert = self.revert.min(block_ptr.number); + self.block = self.block.min(block_ptr.number); } /// The block at which a query should run so it does not see the result @@ -413,12 +413,6 @@ impl BlockTracker { self.block } - /// Return `true` if a write at this block will be visible, i.e., not - /// reverted by a previous queue entry - fn visible(&self, block_ptr: &BlockPtr) -> bool { - block_ptr.number <= self.revert - } - /// Iterate over all batches currently in the queue, from newest to /// oldest, and call `f` for each batch whose changes will actually be /// visible in the database once the entire queue has been processed. @@ -432,7 +426,7 @@ impl BlockTracker { /// changes have been applied. fn find_map(queue: &BoundedQueue>, f: F) -> (Option, BlockNumber) where - F: Fn(&Batch) -> Option, + F: Fn(&Batch, BlockNumber) -> Option, { let mut tracker = BlockTracker::new(); // Going from newest to oldest entry in the queue as `find_map` does @@ -441,18 +435,23 @@ impl BlockTracker { // a number strictly higher than the revert with the smallest block // number, as all such writes will be undone once the revert is // processed. - let res = queue.find_map(|req| { - tracker.update(req.as_ref()); - match req.as_ref() { - Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - f(batch) - } else { - None + let res = queue.find_map(|req| match req.as_ref() { + Request::Write { batch, .. } => { + let batch = batch.read().unwrap(); + tracker.write(&batch.block_ptr); + if batch.first_block <= tracker.revert { + let res = f(batch.deref(), tracker.revert); + if res.is_some() { + return res; } } - Request::RevertTo { .. } | Request::Stop => None, + None + } + Request::RevertTo { block_ptr, .. } => { + tracker.revert(block_ptr); + None } + Request::Stop => None, }); (res, tracker.query_block()) } @@ -470,21 +469,26 @@ impl BlockTracker { /// any slow work. fn fold(queue: &BoundedQueue>, init: B, mut f: F) -> (B, BlockNumber) where - F: FnMut(B, &Batch) -> B, + F: FnMut(B, &Batch, BlockNumber) -> B, { let mut tracker = BlockTracker::new(); let accum = queue.fold(init, |accum, req| { - tracker.update(req.as_ref()); match req.as_ref() { Request::Write { batch, .. } => { - if tracker.visible(&batch.block_ptr) { - f(accum, batch) - } else { - accum + let batch = batch.read().unwrap(); + let mut accum = accum; + tracker.write(&batch.block_ptr); + if batch.first_block <= tracker.revert { + accum = f(accum, batch.deref(), tracker.revert); } + accum + } + Request::RevertTo { block_ptr, .. } => { + tracker.revert(block_ptr); + accum } - Request::RevertTo { .. } | Request::Stop => { + Request::Stop => { /* nothing to do */ accum } @@ -496,17 +500,24 @@ impl BlockTracker { /// A write request received from the `WritableStore` frontend that gets /// queued +/// +/// The `processed` flag is set to true as soon as the background writer is +/// working on that request. Once it has been set, no changes can be made to +/// the request enum Request { Write { + queued: Instant, store: Arc, stopwatch: StopwatchMetrics, - batch: Batch, + batch: RwLock, + processed: AtomicBool, }, RevertTo { store: Arc, /// The subgraph head will be at this block pointer after the revert block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, + processed: AtomicBool, }, Stop, } @@ -514,13 +525,16 @@ enum Request { impl std::fmt::Debug for Request { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::Write { batch, store, .. } => write!( - f, - "write[{}, {:p}, {} entities]", - batch.block_ptr.number, - store.as_ref(), - batch.entity_count() - ), + Self::Write { batch, store, .. } => { + let batch = batch.read().unwrap(); + write!( + f, + "write[{}, {:p}, {} entities]", + batch.block_ptr.number, + store.as_ref(), + batch.entity_count() + ) + } Self::RevertTo { block_ptr, store, .. } => write!(f, "revert[{}, {:p}]", block_ptr.number, store.as_ref()), @@ -535,19 +549,75 @@ enum ExecResult { } impl Request { + fn write(store: Arc, stopwatch: StopwatchMetrics, batch: Batch) -> Self { + Self::Write { + queued: Instant::now(), + store, + stopwatch, + batch: RwLock::new(batch), + processed: AtomicBool::new(false), + } + } + + fn revert(store: Arc, block_ptr: BlockPtr, firehose_cursor: FirehoseCursor) -> Self { + Self::RevertTo { + store, + block_ptr, + firehose_cursor, + processed: AtomicBool::new(false), + } + } + + fn start_process(&self) { + match self { + Request::Write { processed, .. } | Request::RevertTo { processed, .. } => { + processed.store(true, Ordering::SeqCst) + } + Request::Stop => { /* nothing to do */ } + } + } + + fn processed(&self) -> bool { + match self { + Request::Write { processed, .. } | Request::RevertTo { processed, .. } => { + processed.load(Ordering::SeqCst) + } + Request::Stop => false, + } + } + fn execute(&self) -> Result { match self { Request::Write { batch, store, stopwatch, - } => store - .transact_block_operations(batch, stopwatch) - .map(|()| ExecResult::Continue), + queued: _, + processed: _, + } => { + let start = Instant::now(); + let batch = batch.read().unwrap(); + if let Some(err) = &batch.error { + // This can happen when appending to the batch failed + // because of a constraint violation. Returning an `Err` + // here will poison and shut down the queue + return Err(err.clone()); + } + let res = store + .transact_block_operations(batch.deref(), stopwatch) + .map(|()| ExecResult::Continue); + info!(store.logger, "Committed write batch"; + "block" => batch.block_ptr.number, + "block_count" => batch.block_ptr.number - batch.first_block + 1, + "entities" => batch.entity_count(), + "time_ms" => start.elapsed().as_millis()); + res + } Request::RevertTo { store, block_ptr, firehose_cursor, + processed: _, } => store .revert_block_operations(block_ptr.clone(), firehose_cursor) .map(|()| ExecResult::Continue), @@ -578,6 +648,11 @@ struct Queue { poisoned: AtomicBool, stopwatch: StopwatchMetrics, + + /// Wether we should attempt to combine writes into large batches + /// spanning multiple blocks. This is initially `true` and gets set to + /// `false` when the subgraph is marked as synced. + batch_writes: AtomicBool, } /// Support for controlling the background writer (pause/resume) only for @@ -660,7 +735,7 @@ impl Queue { // incorrect results. let req = { let _section = queue.stopwatch.start_section("queue_wait"); - queue.queue.peek().await + queue.queue.peek_with(|req| req.start_process()).await }; let res = { let _section = queue.stopwatch.start_section("queue_execute"); @@ -714,6 +789,7 @@ impl Queue { write_err, poisoned: AtomicBool::new(false), stopwatch, + batch_writes: AtomicBool::new(true), }; let queue = Arc::new(queue); @@ -729,6 +805,84 @@ impl Queue { Ok(()) } + /// Try to append the `batch` to an existing write request. We will only + /// append if several conditions are true: + /// + /// 1. The subgraph is not synced + /// 2. The newest request (back of the queue) is not already being + /// processed by the writing thread + /// 3. The newest write request is not older than `MAX_BATCH_TIME` + /// + /// In all other cases, we queue a new write request. + /// + /// This strategy has the downside that if writes happen very fast and + /// the queue never has more than one entry, we do never append to an + /// existing batch and always queue new requests. But in that case, + /// nothing has to ever wait for the database, and the gains from + /// batching would not be noticable. + async fn push_write(&self, batch: Batch) -> Result<(), StoreError> { + const MAX_BATCH_TIME: Duration = Duration::from_secs(30); + + let batch = if !self.batch_writes.load(Ordering::SeqCst) { + Some(batch) + } else { + self.queue.map_newest(move |newest| { + let newest = match newest { + Some(newest) => newest, + None => { + return Ok(Some(batch)); + } + }; + // This check at first seems redundant with getting the lock + // on the batch in the request below, but is very important + // for correctness: if the writer has finished processing + // the request and released its lock on the batch, without + // this check, we would modify a request that has already + // been written, and our changes would therefore never be + // written + if newest.processed() { + return Ok(Some(batch)); + } + match newest.as_ref() { + Request::Write { + batch: existing, + queued, + .. + } => { + if queued.elapsed() < MAX_BATCH_TIME { + // We are being very defensive here: if anything + // is holding the lock on the batch, do not + // modify it. We create a new request instead of + // waiting for the lock to avoid slowing writes + // down because of other activity. It would + // probably be fine to wait for the lock. + match existing.try_write() { + Ok(mut existing) => existing.append(batch).map(|()| None), + Err(RwLockError::WouldBlock) => return Ok(Some(batch)), + Err(RwLockError::Poisoned(e)) => { + panic!("rwlock on batch was poisoned {:?}", e); + } + } + } else { + Ok(Some(batch)) + } + } + Request::RevertTo { .. } | Request::Stop => Ok(Some(batch)), + } + })? + }; + + if let Some(batch) = batch { + let req = Request::write( + self.store.cheap_clone(), + self.stopwatch.cheap_clone(), + batch, + ); + self.push(req).await?; + } + Ok(()) + } + /// Wait for the background writer to finish processing queued entries async fn flush(&self) -> Result<(), StoreError> { self.check_err()?; @@ -775,8 +929,9 @@ impl Queue { } } - let (op, query_block) = - BlockTracker::find_map(&self.queue, |batch| batch.last_op(key).map(Op::from)); + let (op, query_block) = BlockTracker::find_map(&self.queue, |batch, at| { + batch.last_op(key, at).map(Op::from) + }); match op { Some(Op::Write(entity)) => Ok(Some(entity)), @@ -793,10 +948,10 @@ impl Queue { let (entities_in_queue, query_block) = BlockTracker::fold( &self.queue, BTreeMap::new(), - |mut map: BTreeMap>, batch| { + |mut map: BTreeMap>, batch, at| { // See if we have changes for any of the keys. for key in &keys { - match batch.last_op(key) { + match batch.last_op(key, at) { Some(EntityOp::Write { key: _, entity }) => { map.insert(key.clone(), Some(entity.clone())); } @@ -839,9 +994,10 @@ impl Queue { fn effective_ops<'a>( batch: &'a Batch, derived_query: &'a DerivedEntityQuery, + at: BlockNumber, ) -> impl Iterator)> + 'a { batch - .effective_ops(&derived_query.entity_type) + .effective_ops(&derived_query.entity_type, at) .filter_map(|op| match op { EntityOp::Write { key, entity } if is_related(derived_query, entity) => { Some((key.clone(), Some(entity.clone()))) @@ -855,8 +1011,8 @@ impl Queue { let (entities_in_queue, query_block) = BlockTracker::fold( &self.queue, BTreeMap::new(), - |mut map: BTreeMap>, batch| { - map.extend(effective_ops(batch, derived_query)); + |mut map: BTreeMap>, batch, at| { + map.extend(effective_ops(batch, derived_query, at)); map }, ); @@ -891,8 +1047,8 @@ impl Queue { // in the queue. The overall list of dds is the list of dds from the // store plus the ones still in memory sorted by their block number. let (mut queue_dds, query_block) = - BlockTracker::fold(&self.queue, Vec::new(), |mut dds, batch| { - dds.extend(batch.new_data_sources().cloned()); + BlockTracker::fold(&self.queue, Vec::new(), |mut dds, batch, at| { + dds.extend(batch.new_data_sources(at).cloned()); dds }); // Using a stable sort is important here so that dds created at the @@ -915,6 +1071,7 @@ impl Queue { } fn deployment_synced(&self) { + self.batch_writes.store(false, Ordering::SeqCst); self.stopwatch.disable() } } @@ -963,16 +1120,13 @@ impl Writer { } async fn write(&self, batch: Batch, stopwatch: &StopwatchMetrics) -> Result<(), StoreError> { + const MAX_BATCH_TIME: Duration = Duration::from_secs(30); + match self { Writer::Sync(store) => store.transact_block_operations(&batch, stopwatch), Writer::Async { queue, .. } => { self.check_queue_running()?; - let req = Request::Write { - store: queue.store.cheap_clone(), - stopwatch: queue.stopwatch.cheap_clone(), - batch, - }; - queue.push(req).await + queue.push_write(batch).await } } } @@ -986,11 +1140,7 @@ impl Writer { Writer::Sync(store) => store.revert_block_operations(block_ptr_to, &firehose_cursor), Writer::Async { queue, .. } => { self.check_queue_running()?; - let req = Request::RevertTo { - store: queue.store.cheap_clone(), - block_ptr: block_ptr_to, - firehose_cursor, - }; + let req = Request::revert(queue.store.cheap_clone(), block_ptr_to, firehose_cursor); queue.push(req).await } } From e24f285381342ebde0abe83a1382d26c4990b957 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 18:33:57 -0700 Subject: [PATCH 0241/2104] graph, store: Limit the size of a write batch by cache weight --- graph/src/components/store/write.rs | 30 +++++++++++++++++++++++++++++ store/postgres/src/writable.rs | 19 +++++++++++++----- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index ed8088b4e46..e8a365a1f23 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -9,6 +9,7 @@ use crate::{ data::{subgraph::schema::SubgraphError, value::Word}, data_source::CausalityRegion, prelude::DeploymentHash, + util::cache_weight::CacheWeight, }; use super::{ @@ -724,6 +725,35 @@ impl Batch { } } +impl CacheWeight for Batch { + fn indirect_weight(&self) -> usize { + self.mods.indirect_weight() + } +} + +impl CacheWeight for RowGroups { + fn indirect_weight(&self) -> usize { + self.groups.indirect_weight() + } +} + +impl CacheWeight for RowGroup { + fn indirect_weight(&self) -> usize { + self.rows.indirect_weight() + } +} + +impl CacheWeight for EntityMod { + fn indirect_weight(&self) -> usize { + match self { + EntityMod::Insert { key, data, .. } | EntityMod::Overwrite { key, data, .. } => { + key.indirect_weight() + data.indirect_weight() + } + EntityMod::Remove { key, .. } => key.indirect_weight(), + } + } +} + pub struct WriteChunker<'a> { group: &'a RowGroup, chunk_size: usize, diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index bada947c4f8..b05e5cd152e 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -13,8 +13,8 @@ use graph::constraint_violation; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ - BlockNumber, Entity, MetricsRegistry, SubgraphDeploymentEntity, SubgraphStore as _, - BLOCK_NUMBER_MAX, + BlockNumber, CacheWeight, Entity, MetricsRegistry, SubgraphDeploymentEntity, + SubgraphStore as _, BLOCK_NUMBER_MAX, }; use graph::schema::InputSchema; use graph::slog::{info, warn}; @@ -607,9 +607,10 @@ impl Request { .transact_block_operations(batch.deref(), stopwatch) .map(|()| ExecResult::Continue); info!(store.logger, "Committed write batch"; - "block" => batch.block_ptr.number, + "block_number" => batch.block_ptr.number, "block_count" => batch.block_ptr.number - batch.first_block + 1, "entities" => batch.entity_count(), + "weight" => batch.weight(), "time_ms" => start.elapsed().as_millis()); res } @@ -812,16 +813,18 @@ impl Queue { /// 2. The newest request (back of the queue) is not already being /// processed by the writing thread /// 3. The newest write request is not older than `MAX_BATCH_TIME` + /// 4. The newest write request is not bigger than `MAX_BATCH_WEIGHT` /// /// In all other cases, we queue a new write request. /// - /// This strategy has the downside that if writes happen very fast and + /// This strategy has the downside that if writes happen very fast and /// the queue never has more than one entry, we do never append to an /// existing batch and always queue new requests. But in that case, /// nothing has to ever wait for the database, and the gains from /// batching would not be noticable. async fn push_write(&self, batch: Batch) -> Result<(), StoreError> { const MAX_BATCH_TIME: Duration = Duration::from_secs(30); + const MAX_BATCH_WEIGHT: usize = 10_000 * 1000; let batch = if !self.batch_writes.load(Ordering::SeqCst) { Some(batch) @@ -857,7 +860,13 @@ impl Queue { // down because of other activity. It would // probably be fine to wait for the lock. match existing.try_write() { - Ok(mut existing) => existing.append(batch).map(|()| None), + Ok(mut existing) => { + if existing.weight() < MAX_BATCH_WEIGHT { + existing.append(batch).map(|()| None) + } else { + Ok(Some(batch)) + } + } Err(RwLockError::WouldBlock) => return Ok(Some(batch)), Err(RwLockError::Poisoned(e)) => { panic!("rwlock on batch was poisoned {:?}", e); From d0cdd43d22ccb30999b24ba4c24b4d8d6e211a64 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 27 Apr 2023 18:45:59 -0700 Subject: [PATCH 0242/2104] docs, graph, store: Make write batching configurable --- docs/environment-variables.md | 6 ++++++ graph/src/env/store.rs | 17 +++++++++++++++++ store/postgres/src/writable.rs | 18 ++++++++++-------- 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 83ee938f059..31063738242 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -238,3 +238,9 @@ those. to 0.5 for the `REBUILD_THRESHOLD` and 0.05 for the `DELETE_THRESHOLD`; they must be between 0 and 1, and `REBUILD_THRESHOLD` must be bigger than `DELETE_THRESHOLD`. +- `GRAPH_STORE_WRITE_BATCH_DURATION`: how long to accumulate changes during + syncing into a batch before a write has to happen in seconds. The default + is 300s. Setting this to 0 disables write batching. +- `GRAPH_STORE_WRITE_BATCH_SIZE`: how many changes to accumulate during + syncing in kilobytes before a write has to happen. The default is 10_000 + which corresponds to 10MB. Setting this to 0 disables write batching. diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 8492b0e1b49..48150df9f4c 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -98,6 +98,17 @@ pub struct EnvVarsStore { /// blocks) than its history limit. The default value is 1.2 and the /// value must be at least 1.01 pub history_slack_factor: f64, + /// How long to accumulate changes into a batch before a write has to + /// happen. Set by the environment variable + /// `GRAPH_STORE_WRITE_BATCH_DURATION` in seconds. The default is 300s. + /// Setting this to 0 disables write batching. + pub write_batch_duration: Duration, + /// How many changes to accumulate in bytes before a write has to + /// happen. Set by the environment variable + /// `GRAPH_STORE_WRITE_BATCH_SIZE`, which is in kilobytes. The default + /// is 10_000 which corresponds to 10MB. Setting this to 0 disables + /// write batching. + pub write_batch_size: usize, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -137,6 +148,8 @@ impl From for EnvVarsStore { rebuild_threshold: x.rebuild_threshold.0, delete_threshold: x.delete_threshold.0, history_slack_factor: x.history_slack_factor.0, + write_batch_duration: Duration::from_secs(x.write_batch_duration_in_secs), + write_batch_size: x.write_batch_size * 1_000, } } } @@ -186,6 +199,10 @@ pub struct InnerStore { delete_threshold: ZeroToOneF64, #[envconfig(from = "GRAPH_STORE_HISTORY_SLACK_FACTOR", default = "1.2")] history_slack_factor: HistorySlackF64, + #[envconfig(from = "GRAPH_STORE_WRITE_BATCH_DURATION", default = "300")] + write_batch_duration_in_secs: u64, + #[envconfig(from = "GRAPH_STORE_WRITE_BATCH_SIZE", default = "10000")] + write_batch_size: usize, } #[derive(Clone, Copy, Debug)] diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index b05e5cd152e..7d9bb91d72f 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -812,8 +812,10 @@ impl Queue { /// 1. The subgraph is not synced /// 2. The newest request (back of the queue) is not already being /// processed by the writing thread - /// 3. The newest write request is not older than `MAX_BATCH_TIME` - /// 4. The newest write request is not bigger than `MAX_BATCH_WEIGHT` + /// 3. The newest write request is not older than + /// `GRAPH_STORE_WRITE_BATCH_DURATION` + /// 4. The newest write request is not bigger than + /// `GRAPH_STORE_WRITE_BATCH_SIZE` /// /// In all other cases, we queue a new write request. /// @@ -823,10 +825,10 @@ impl Queue { /// nothing has to ever wait for the database, and the gains from /// batching would not be noticable. async fn push_write(&self, batch: Batch) -> Result<(), StoreError> { - const MAX_BATCH_TIME: Duration = Duration::from_secs(30); - const MAX_BATCH_WEIGHT: usize = 10_000 * 1000; - - let batch = if !self.batch_writes.load(Ordering::SeqCst) { + let batch = if ENV_VARS.store.write_batch_size == 0 + || ENV_VARS.store.write_batch_duration.is_zero() + || !self.batch_writes.load(Ordering::SeqCst) + { Some(batch) } else { self.queue.map_newest(move |newest| { @@ -852,7 +854,7 @@ impl Queue { queued, .. } => { - if queued.elapsed() < MAX_BATCH_TIME { + if queued.elapsed() < ENV_VARS.store.write_batch_duration { // We are being very defensive here: if anything // is holding the lock on the batch, do not // modify it. We create a new request instead of @@ -861,7 +863,7 @@ impl Queue { // probably be fine to wait for the lock. match existing.try_write() { Ok(mut existing) => { - if existing.weight() < MAX_BATCH_WEIGHT { + if existing.weight() < ENV_VARS.store.write_batch_size { existing.append(batch).map(|()| None) } else { Ok(Some(batch)) From 4f5b61c12dd3c90a7a7394c44d1048f4aad8b9bc Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 28 Apr 2023 19:09:32 -0700 Subject: [PATCH 0243/2104] store: Append to existing writes more aggressively So far, new write requests were appended to existing ones only opportunistically; this commit makes it so that we actually hold back writing to allow processing to append to an existing write request. Only write a batch if its size is beyond WRITE_BATCH_SIZE or if it is older than WRITE_BATCH_DURATION. Since we don't have a way to wait on the batch growing big enough, poll for that every 2s This also requires that stopping or flushing the WritableStore sets `batch_writes` to `false`, mostly for tests, to ensure that all pending writes get written out before the background writer shuts down. --- graph/src/components/store/write.rs | 3 + store/postgres/src/writable.rs | 153 +++++++++++++++++++---- store/test-store/tests/postgres/store.rs | 4 +- tests/src/fixture/mod.rs | 25 +++- 4 files changed, 159 insertions(+), 26 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index e8a365a1f23..848cea7879a 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -495,6 +495,7 @@ impl<'a> Iterator for ClampsByBlockIterator<'a> { } /// A list of entity changes with one group per entity type +#[derive(Debug)] pub struct RowGroups { pub groups: Vec, } @@ -540,6 +541,7 @@ impl RowGroups { } /// Data sources data grouped by block +#[derive(Debug)] pub struct DataSources { pub entries: Vec<(BlockPtr, Vec)>, } @@ -578,6 +580,7 @@ pub enum EntityOp<'a> { /// A write batch. This data structure encapsulates all the things that need /// to be changed to persist the output of mappings up to a certain block. +#[derive(Debug)] pub struct Batch { /// The last block for which this batch contains changes pub block_ptr: BlockPtr, diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 7d9bb91d72f..20dd09c1507 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -18,6 +18,8 @@ use graph::prelude::{ }; use graph::schema::InputSchema; use graph::slog::{info, warn}; +use graph::tokio::select; +use graph::tokio::sync::Notify; use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ @@ -509,6 +511,11 @@ enum Request { queued: Instant, store: Arc, stopwatch: StopwatchMetrics, + // The batch is in a `RwLock` because `push_write` will try to add + // to the batch under the right conditions, and other operations + // will try to read the batch. The batch only becomes truly readonly + // when we decide to process it at which point we set `processed` to + // `true` batch: RwLock, processed: AtomicBool, }, @@ -625,6 +632,26 @@ impl Request { Request::Stop => Ok(ExecResult::Stop), } } + + /// Return `true` if we should process this request right away. Return + /// `false` if we should wait for a little longer with processing the + /// request + fn should_process(&self) -> bool { + match self { + Request::Write { queued, batch, .. } => { + batch.read().unwrap().weight() >= ENV_VARS.store.write_batch_size + || queued.elapsed() >= ENV_VARS.store.write_batch_duration + } + Request::RevertTo { .. } | Request::Stop => true, + } + } + + fn is_write(&self) -> bool { + match self { + Request::Write { .. } => true, + Request::RevertTo { .. } | Request::Stop => false, + } + } } /// A queue that asynchronously writes requests queued with `push` to the @@ -654,6 +681,10 @@ struct Queue { /// spanning multiple blocks. This is initially `true` and gets set to /// `false` when the subgraph is marked as synced. batch_writes: AtomicBool, + + /// Notify the background writer as soon as we are told to stop + /// batching or there is a batch that is big enough to proceed. + batch_ready_notify: Arc, } /// Support for controlling the background writer (pause/resume) only for @@ -723,11 +754,44 @@ impl Queue { capacity: usize, registry: Arc, ) -> (Arc, JoinHandle<()>) { - async fn start_writer(queue: Arc, logger: Logger) { + async fn start_writer(queue: Arc, logger: Logger, batch_stop_notify: Arc) { loop { #[cfg(debug_assertions)] test_support::take_step(&queue.store.site.as_ref().into()).await; + // If batching is enabled, hold off on writing a batch for a + // little bit to give processing a chance to add more + // changes. We start processing a batch if it is big enough + // or old enough, or if there is more than one request in + // the queue. The latter condition makes sure that we do not + // wait for a batch to grow when `push_write` would never + // add to it again. + if queue.batch_writes() && queue.queue.len() <= 1 { + loop { + let _section = queue.stopwatch.start_section("queue_wait"); + let req = queue.queue.peek().await; + + // When this is true, push_write would never add to + // `req`, and we therefore execute the request as + // waiting for more changes to it would be pointless + if !queue.batch_writes() || queue.queue.len() > 1 || req.should_process() { + break; + } + + // Wait until something has changed before checking + // again, either because we were notified that the + // batch should be processed or after some time + // passed. The latter is just for safety in case + // there is a mistake with notifications. + let sleep = graph::tokio::time::sleep(Duration::from_secs(2)); + let notify = batch_stop_notify.notified(); + select!( + () = sleep => (), + () = notify => (), + ); + } + } + // We peek at the front of the queue, rather than pop it // right away, so that query methods like `get` have access // to the data while it is being written. If we popped here, @@ -736,6 +800,9 @@ impl Queue { // incorrect results. let req = { let _section = queue.stopwatch.start_section("queue_wait"); + // Mark the request as being processed so push_write + // will not modify it again, even after we are done with + // it here queue.queue.peek_with(|req| req.start_process()).await }; let res = { @@ -784,6 +851,7 @@ impl Queue { registry, ); + let batch_ready_notify = Arc::new(Notify::new()); let queue = Self { store, queue, @@ -791,10 +859,15 @@ impl Queue { poisoned: AtomicBool::new(false), stopwatch, batch_writes: AtomicBool::new(true), + batch_ready_notify: batch_ready_notify.clone(), }; let queue = Arc::new(queue); - let handle = graph::spawn(start_writer(queue.cheap_clone(), logger)); + let handle = graph::spawn(start_writer( + queue.cheap_clone(), + logger, + batch_ready_notify, + )); (queue, handle) } @@ -802,32 +875,42 @@ impl Queue { /// Add a write request to the queue async fn push(&self, req: Request) -> Result<(), StoreError> { self.check_err()?; + // If we see anything but a write we have to turn off batching as + // that would risk adding changes from after a revert into a batch + // that gets processed before the revert + if !req.is_write() { + self.stop_batching(); + } self.queue.push(Arc::new(req)).await; Ok(()) } - /// Try to append the `batch` to an existing write request. We will only - /// append if several conditions are true: + /// Try to append the `batch` to the newest request in the queue if that + /// is a write request. We will only append if several conditions are + /// true: /// /// 1. The subgraph is not synced - /// 2. The newest request (back of the queue) is not already being - /// processed by the writing thread - /// 3. The newest write request is not older than + /// 2. The newest request (back of the queue) is a write + /// 3. The newest request is not already being processed by the + /// writing thread + /// 4. The newest write request is not older than /// `GRAPH_STORE_WRITE_BATCH_DURATION` - /// 4. The newest write request is not bigger than + /// 5. The newest write request is not bigger than /// `GRAPH_STORE_WRITE_BATCH_SIZE` /// - /// In all other cases, we queue a new write request. + /// In all other cases, we queue a new write request. Note that (3) + /// means that the oldest request (front of the queue) does not + /// necessarily fulfill (4) and (5) even if it is a write and the + /// subgraph is not synced yet. /// - /// This strategy has the downside that if writes happen very fast and - /// the queue never has more than one entry, we do never append to an - /// existing batch and always queue new requests. But in that case, - /// nothing has to ever wait for the database, and the gains from - /// batching would not be noticable. + /// This strategy is closely tied to how start_writer waits for writes + /// to fill up before writing them to maximize the chances that we build + /// a 'full' write batch, i.e., one that is either big enough or old + /// enough async fn push_write(&self, batch: Batch) -> Result<(), StoreError> { let batch = if ENV_VARS.store.write_batch_size == 0 || ENV_VARS.store.write_batch_duration.is_zero() - || !self.batch_writes.load(Ordering::SeqCst) + || !self.batch_writes() { Some(batch) } else { @@ -858,18 +941,29 @@ impl Queue { // We are being very defensive here: if anything // is holding the lock on the batch, do not // modify it. We create a new request instead of - // waiting for the lock to avoid slowing writes - // down because of other activity. It would - // probably be fine to wait for the lock. + // waiting for the lock since writing a batch + // holds a read lock on the batch for the + // duration of the write, and we do not want to + // slow down queueing requests unnecessarily match existing.try_write() { Ok(mut existing) => { if existing.weight() < ENV_VARS.store.write_batch_size { - existing.append(batch).map(|()| None) + let res = existing.append(batch).map(|()| None); + if existing.weight() >= ENV_VARS.store.write_batch_size { + self.batch_ready_notify.notify_one(); + } + res } else { Ok(Some(batch)) } } - Err(RwLockError::WouldBlock) => return Ok(Some(batch)), + Err(RwLockError::WouldBlock) => { + // This branch can cause batches that + // are not 'full' at the head of the + // queue, something that start_writer + // has to take into account + return Ok(Some(batch)); + } Err(RwLockError::Poisoned(e)) => { panic!("rwlock on batch was poisoned {:?}", e); } @@ -897,11 +991,19 @@ impl Queue { /// Wait for the background writer to finish processing queued entries async fn flush(&self) -> Result<(), StoreError> { self.check_err()?; + // Turn off batching so the queue doesn't wait for a batch to become + // full, but restore the old behavior once the queue is empty. + let batching = self.batch_writes.load(Ordering::SeqCst); + self.stop_batching(); + self.queue.wait_empty().await; + + self.batch_writes.store(batching, Ordering::SeqCst); self.check_err() } async fn stop(&self) -> Result<(), StoreError> { + self.stop_batching(); self.push(Request::Stop).await } @@ -1082,9 +1184,18 @@ impl Queue { } fn deployment_synced(&self) { - self.batch_writes.store(false, Ordering::SeqCst); + self.stop_batching(); self.stopwatch.disable() } + + fn batch_writes(&self) -> bool { + self.batch_writes.load(Ordering::SeqCst) + } + + fn stop_batching(&self) { + self.batch_writes.store(false, Ordering::SeqCst); + self.batch_ready_notify.notify_one(); + } } /// A shim to allow bypassing any pipelined store handling if need be diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index d6231bf58f8..68b44c7958f 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -1286,7 +1286,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { ), ("2".to_owned(), entity! { schema => id: "2", name: "Tessa" }), ]; - transact_entity_operations( + transact_and_wait( &store.subgraph_store(), &deployment, TEST_BLOCK_1_PTR.clone(), @@ -1314,7 +1314,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { }; // Commit update & delete ops - transact_entity_operations( + transact_and_wait( &store.subgraph_store(), &deployment, TEST_BLOCK_2_PTR.clone(), diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index e31d02dbf9d..3dc053aab73 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -171,7 +171,7 @@ impl TestContext { wait_for_sync( &self.logger, - &self.store, + self.store.clone(), &self.deployment.clone(), stop_block, ) @@ -190,7 +190,7 @@ impl TestContext { wait_for_sync( &self.logger, - &self.store, + self.store.clone(), &self.deployment.clone(), stop_block, ) @@ -457,13 +457,32 @@ pub fn cleanup( pub async fn wait_for_sync( logger: &Logger, - store: &SubgraphStore, + store: Arc, deployment: &DeploymentLocator, stop_block: BlockPtr, ) -> Result<(), SubgraphError> { + /// We flush here to speed up how long the write queue waits before it + /// considers a batch complete and writable. Without flushing, we would + /// have to wait for `GRAPH_STORE_WRITE_BATCH_DURATION` before all + /// changes have been written to the database + async fn flush(logger: &Logger, store: &Arc, deployment: &DeploymentLocator) { + store + .clone() + .writable(logger.clone(), deployment.id, Arc::new(vec![])) + .await + .unwrap() + .flush() + .await + .unwrap(); + } + let mut err_count = 0; + + flush(logger, &store, deployment).await; + while err_count < 10 { tokio::time::sleep(Duration::from_millis(1000)).await; + flush(logger, &store, deployment).await; let block_ptr = match store.least_block_ptr(&deployment.hash).await { Ok(Some(ptr)) => ptr, From 743e1896b43da5ae7519efa454be82e4334a1017 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 29 Apr 2023 21:26:41 -0700 Subject: [PATCH 0244/2104] graph, store: Make RowGroup.rows private --- graph/src/components/store/write.rs | 2 +- store/postgres/src/relational.rs | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 848cea7879a..c57c0709b79 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -294,7 +294,7 @@ pub struct RowGroup { /// All changes for this entity type, ordered by block; i.e., if `i < j` /// then `rows[i].block() <= rows[j].block()`. Several methods on this /// struct rely on the fact that this ordering is observed. - pub rows: Vec, + rows: Vec, } impl RowGroup { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index bfcc52b5e27..cad6e6b319a 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -805,12 +805,7 @@ impl Layout { ) -> Result { let table = self.table_for_entity(&group.entity_type)?; if table.immutable && group.has_clamps() { - let ids = group - .rows - .iter() - .map(|row| row.id().as_str()) - .collect::>() - .join(", "); + let ids = group.ids().collect::>().join(", "); return Err(constraint_violation!( "entities of type `{}` can not be updated since they are immutable. Entity ids are [{}]", group.entity_type, @@ -854,7 +849,7 @@ impl Layout { if table.immutable { return Err(constraint_violation!( "entities of type `{}` can not be deleted since they are immutable. Entity ids are [{}]", - table.object, group.rows.iter().map(|eref| eref.id()).join(", ") + table.object, group.ids().join(", ") )); } From e4d7563f846e90a3f5a1575e226ded06e67da6a9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 9 May 2023 16:57:42 +0200 Subject: [PATCH 0245/2104] ci: Reduce batch duration to avoid timeouts --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd8dead4c92..c0847d9d898 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -126,6 +126,8 @@ jobs: env: N_CONCURRENT_TESTS: "4" TESTS_GANACHE_HARD_WAIT_SECONDS: "30" + # Reduce how long a batch will stick around to avoid timeouts + GRAPH_STORE_WRITE_BATCH_DURATION: 30 with: command: test args: --verbose --package graph-tests parallel_integration_tests -- --nocapture @@ -137,6 +139,8 @@ jobs: env: N_CONCURRENT_TESTS: "4" TESTS_GANACHE_HARD_WAIT_SECONDS: "30" + # Reduce how long a batch will stick around to avoid timeouts + GRAPH_STORE_WRITE_BATCH_DURATION: 30 with: command: test args: --verbose --package graph-tests parallel_integration_tests -- --nocapture From f5b7cb5d38aa9744db3e7989c87a205be36925a1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 17 May 2023 09:21:15 -0700 Subject: [PATCH 0246/2104] graph: Always check that RowGroup::push does not go backwards --- graph/src/components/store/write.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index c57c0709b79..7bc7f4a0d3e 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -306,11 +306,21 @@ impl RowGroup { } pub fn push(&mut self, emod: EntityModification, block: BlockNumber) -> Result<(), StoreError> { - debug_assert!(self + let is_forward = self .rows .last() .map(|emod| emod.block() <= block) - .unwrap_or(true)); + .unwrap_or(true); + if !is_forward { + // unwrap: we only get here when `last()` is `Some` + let last_block = self.rows.last().map(|emod| emod.block()).unwrap(); + return Err(constraint_violation!( + "we already have a modification for block {}, can not append {:?}", + last_block, + emod + )); + } + let row = EntityMod::new(emod, block); self.append_row(row) } From 1df5a6687089b9be60435e15b9a8595b8f554d4e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 9 May 2023 16:08:41 +0200 Subject: [PATCH 0247/2104] graph: Add unit tests for some RowGroup functionality --- graph/src/components/store/write.rs | 183 +++++++++++++++++++++++++++- 1 file changed, 181 insertions(+), 2 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 7bc7f4a0d3e..df3864079f8 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -39,7 +39,7 @@ use super::{ /// `append_row`, eliminates an update in the database which would otherwise /// be needed to clamp the open block range of the entity to the block /// contained in `end` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub enum EntityMod { /// Insert the entity Insert { @@ -578,6 +578,7 @@ impl DataSources { /// Indicate to code that looks up entities from the in-memory batch whether /// the entity in question will be written or removed at the block of the /// lookup +#[derive(Debug, PartialEq)] pub enum EntityOp<'a> { /// There is a new version of the entity that will be written Write { @@ -873,7 +874,15 @@ impl<'a> Iterator for WriteChunkIter<'a> { #[cfg(test)] mod test { - use crate::components::store::{write::EntityMod, BlockNumber, EntityKey, EntityType}; + use crate::{ + components::store::{ + write::EntityMod, write::EntityOp, BlockNumber, EntityKey, EntityType, StoreError, + }, + entity, + prelude::DeploymentHash, + schema::InputSchema, + }; + use lazy_static::lazy_static; use super::RowGroup; @@ -925,4 +934,174 @@ mod test { let exp: RunList<'_> = &[(1, &[10]), (2, &[20]), (1, &[11])]; check_runs(&[10, 20, 11], &[1, 2, 1], exp); } + + const GQL: &str = "type Thing @entity { id: ID!, count: Int! }"; + lazy_static! { + static ref DEPLOYMENT: DeploymentHash = DeploymentHash::new("batchAppend").unwrap(); + static ref SCHEMA: InputSchema = InputSchema::parse(GQL, DEPLOYMENT.clone()).unwrap(); + static ref ENTITY_TYPE: EntityType = EntityType::new("Thing".to_string()); + } + + /// Convenient notation for changes to a fixed entity + #[derive(Clone, Debug)] + enum Mod { + Ins(BlockNumber), + Ovw(BlockNumber), + Rem(BlockNumber), + // clamped insert + InsC(BlockNumber, BlockNumber), + // clamped overwrite + OvwC(BlockNumber, BlockNumber), + } + + impl From<&Mod> for EntityMod { + fn from(value: &Mod) -> Self { + use Mod::*; + + let value = value.clone(); + let key = EntityKey::data("Thing", "one"); + match value { + Ins(block) => EntityMod::Insert { + key, + data: entity! { SCHEMA => id: "one", count: block }, + block, + end: None, + }, + Ovw(block) => EntityMod::Overwrite { + key, + data: entity! { SCHEMA => id: "one", count: block }, + block, + end: None, + }, + Rem(block) => EntityMod::Remove { key, block }, + InsC(block, end) => EntityMod::Insert { + key, + data: entity! { SCHEMA => id: "one", count: block }, + block, + end: Some(end), + }, + OvwC(block, end) => EntityMod::Overwrite { + key, + data: entity! { SCHEMA => id: "one", count: block }, + block, + end: Some(end), + }, + } + } + } + + /// Helper to construct a `RowGroup` + #[derive(Debug)] + struct Group { + group: RowGroup, + } + + impl Group { + fn new() -> Self { + Self { + group: RowGroup::new(ENTITY_TYPE.clone()), + } + } + + fn append(&mut self, mods: &[Mod]) -> Result<(), StoreError> { + for m in mods { + self.group.append_row(EntityMod::from(m))? + } + Ok(()) + } + + fn with(mods: &[Mod]) -> Result { + let mut group = Self::new(); + group.append(mods)?; + Ok(group) + } + } + + impl PartialEq<&[Mod]> for Group { + fn eq(&self, mods: &&[Mod]) -> bool { + let mods: Vec<_> = mods.iter().map(|m| EntityMod::from(m)).collect(); + self.group.rows == mods + } + } + + #[test] + fn append() { + use Mod::*; + + let res = Group::with(&[Ins(1), Ins(2)]); + assert!(res.is_err()); + + let res = Group::with(&[Ovw(1), Ins(2)]); + assert!(res.is_err()); + + let res = Group::with(&[Ins(1), Rem(2), Rem(3)]); + assert!(res.is_err()); + + let res = Group::with(&[Ovw(1), Rem(2), Rem(3)]); + assert!(res.is_err()); + + let res = Group::with(&[Ovw(1), Rem(2), Ovw(3)]); + assert!(res.is_err()); + + let group = Group::with(&[Ins(1), Ovw(2), Rem(3)]).unwrap(); + assert_eq!(group, &[InsC(1, 2), InsC(2, 3)]); + + let group = Group::with(&[Ovw(1), Rem(4)]).unwrap(); + assert_eq!(group, &[OvwC(1, 4)]); + + let group = Group::with(&[Ins(1), Rem(4)]).unwrap(); + assert_eq!(group, &[InsC(1, 4)]); + + let group = Group::with(&[Ins(1), Rem(2), Ins(3)]).unwrap(); + assert_eq!(group, &[InsC(1, 2), Ins(3)]); + + let group = Group::with(&[Ovw(1), Rem(2), Ins(3)]).unwrap(); + assert_eq!(group, &[OvwC(1, 2), Ins(3)]); + } + + #[test] + fn last_op() { + #[track_caller] + fn is_remove(group: &RowGroup, at: BlockNumber) { + let key = EntityKey::data("Thing", "one"); + let op = group.last_op(&key, at).unwrap(); + + assert!( + matches!(op, EntityOp::Remove { .. }), + "op must be a remove at {} but is {:?}", + at, + op + ); + } + #[track_caller] + fn is_write(group: &RowGroup, at: BlockNumber) { + let key = EntityKey::data("Thing", "one"); + let op = group.last_op(&key, at).unwrap(); + + assert!( + matches!(op, EntityOp::Write { .. }), + "op must be a write at {} but is {:?}", + at, + op + ); + } + + use Mod::*; + + let key = EntityKey::data("Thing", "one"); + + // This will result in two mods int the group: + // [ InsC(1,2), InsC(2,3) ] + let group = Group::with(&[Ins(1), Ovw(2), Rem(3)]).unwrap().group; + + is_remove(&group, 5); + is_remove(&group, 4); + is_remove(&group, 3); + + is_write(&group, 2); + is_write(&group, 1); + + let op = group.last_op(&key, 0); + assert_eq!(None, op); + } } From 76619a60a63d0a451fb4dbf9c4ef2a00cd386d3c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 17 May 2023 10:44:14 -0700 Subject: [PATCH 0248/2104] store: Fix typo in comment --- store/postgres/src/writable.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 20dd09c1507..b29173206db 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -377,7 +377,7 @@ impl SyncStore { /// number at which queries should run so that they only consider data that /// is not affected by any requests currently queued. /// -/// The best way to use the trtacker is to use the `fold_map` and `find` +/// The best way to use the tracker is to use the `fold_map` and `find` /// methods. #[derive(Debug)] struct BlockTracker { From 75f39611e7e087a2b137ec52e52de9bb658c69af Mon Sep 17 00:00:00 2001 From: Jacob T Firek <106350168+jtfirek@users.noreply.github.com> Date: Sun, 21 May 2023 00:11:14 -0500 Subject: [PATCH 0249/2104] fix: old link in `getting-started.md` (#4634) --- docs/getting-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting-started.md b/docs/getting-started.md index 52f7ea190a1..b504f357bdb 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -44,7 +44,7 @@ The high-level dataflow for a dApp using The Graph is as follows: Three repositories are relevant to building on The Graph: 1. [Graph Node](../README.md) – A server implementation for indexing, caching, and serving queries against data from Ethereum. 2. [Graph CLI](https://github.com/graphprotocol/graph-cli) – A CLI for building and compiling projects that are deployed to the Graph Node. -3. [Graph TypeScript Library](https://github.com/graphprotocol/graph-ts) – TypeScript/AssemblyScript library for writing subgraph mappings to be deployed to The Graph. +3. [Graph TypeScript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) – TypeScript/AssemblyScript library for writing subgraph mappings to be deployed to The Graph. ### 0.4 Getting Started Overview Below, we outline the required steps to build a subgraph from scratch, which will serve queries from a GraphQL endpoint. The three major steps are: From 92d4938464b23dce174c38933826fb06fe409235 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Mon, 22 May 2023 12:10:50 +0530 Subject: [PATCH 0250/2104] graph,chain : create and implement MappingTriggerTrait with error context for all chains --- chain/arweave/src/trigger.rs | 39 +++++++----- chain/cosmos/src/trigger.rs | 101 +++++++++++++++++--------------- chain/ethereum/src/trigger.rs | 16 +++++ chain/near/src/trigger.rs | 39 +++++++----- chain/substreams/src/trigger.rs | 10 +++- graph/src/blockchain/mock.rs | 8 ++- graph/src/blockchain/mod.rs | 8 ++- graph/src/data_source/mod.rs | 13 +++- 8 files changed, 155 insertions(+), 79 deletions(-) diff --git a/chain/arweave/src/trigger.rs b/chain/arweave/src/trigger.rs index dc5fbbcad39..7652c03e9a5 100644 --- a/chain/arweave/src/trigger.rs +++ b/chain/arweave/src/trigger.rs @@ -1,4 +1,5 @@ use graph::blockchain::Block; +use graph::blockchain::MappingTriggerTrait; use graph::blockchain::TriggerData; use graph::cheap_clone::CheapClone; use graph::prelude::web3::types::H256; @@ -87,6 +88,22 @@ impl ArweaveTrigger { ArweaveTrigger::Transaction(tx) => tx.block.ptr().hash_as_h256(), } } + + fn error_context(&self) -> std::string::String { + match self { + ArweaveTrigger::Block(..) => { + format!("Block #{} ({})", self.block_number(), self.block_hash()) + } + ArweaveTrigger::Transaction(tx) => { + format!( + "Tx #{}, block #{}({})", + base64_url::encode(&tx.tx.id), + self.block_number(), + self.block_hash() + ) + } + } + } } impl Ord for ArweaveTrigger { @@ -113,20 +130,14 @@ impl PartialOrd for ArweaveTrigger { } impl TriggerData for ArweaveTrigger { - fn error_context(&self) -> std::string::String { - match self { - ArweaveTrigger::Block(..) => { - format!("Block #{} ({})", self.block_number(), self.block_hash()) - } - ArweaveTrigger::Transaction(tx) => { - format!( - "Tx #{}, block #{}({})", - base64_url::encode(&tx.tx.id), - self.block_number(), - self.block_hash() - ) - } - } + fn error_context(&self) -> String { + self.error_context() + } +} + +impl MappingTriggerTrait for ArweaveTrigger { + fn error_context(&self) -> String { + self.error_context() } } diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index cecf6edebd3..794f006665c 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -1,6 +1,6 @@ use std::{cmp::Ordering, sync::Arc}; -use graph::blockchain::{Block, BlockHash, TriggerData}; +use graph::blockchain::{Block, BlockHash, MappingTriggerTrait, TriggerData}; use graph::cheap_clone::CheapClone; use graph::prelude::{BlockNumber, Error}; use graph::runtime::HostExportError; @@ -161,52 +161,7 @@ impl CosmosTrigger { CosmosTrigger::Message(message_data) => message_data.block().map(|b| b.hash()), } } -} - -impl Ord for CosmosTrigger { - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - // Events have no intrinsic ordering information, so we keep the order in - // which they are included in the `events` field - (Self::Event { .. }, Self::Event { .. }) => Ordering::Equal, - - // Keep the order when comparing two message triggers - (Self::Message(..), Self::Message(..)) => Ordering::Equal, - - // Transactions are ordered by their index inside the block - (Self::Transaction(a), Self::Transaction(b)) => { - if let (Ok(a_tx_result), Ok(b_tx_result)) = (a.tx_result(), b.tx_result()) { - a_tx_result.index.cmp(&b_tx_result.index) - } else { - Ordering::Equal - } - } - - // Keep the order when comparing two block triggers - (Self::Block(..), Self::Block(..)) => Ordering::Equal, - // Event triggers always come first - (Self::Event { .. }, _) => Ordering::Greater, - (_, Self::Event { .. }) => Ordering::Less, - - // Block triggers always come last - (Self::Block(..), _) => Ordering::Less, - (_, Self::Block(..)) => Ordering::Greater, - - // Message triggers before Transaction triggers - (Self::Message(..), Self::Transaction(..)) => Ordering::Greater, - (Self::Transaction(..), Self::Message(..)) => Ordering::Less, - } - } -} - -impl PartialOrd for CosmosTrigger { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl TriggerData for CosmosTrigger { fn error_context(&self) -> std::string::String { match self { CosmosTrigger::Block(..) => { @@ -261,6 +216,60 @@ impl TriggerData for CosmosTrigger { } } +impl Ord for CosmosTrigger { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + // Events have no intrinsic ordering information, so we keep the order in + // which they are included in the `events` field + (Self::Event { .. }, Self::Event { .. }) => Ordering::Equal, + + // Keep the order when comparing two message triggers + (Self::Message(..), Self::Message(..)) => Ordering::Equal, + + // Transactions are ordered by their index inside the block + (Self::Transaction(a), Self::Transaction(b)) => { + if let (Ok(a_tx_result), Ok(b_tx_result)) = (a.tx_result(), b.tx_result()) { + a_tx_result.index.cmp(&b_tx_result.index) + } else { + Ordering::Equal + } + } + + // Keep the order when comparing two block triggers + (Self::Block(..), Self::Block(..)) => Ordering::Equal, + + // Event triggers always come first + (Self::Event { .. }, _) => Ordering::Greater, + (_, Self::Event { .. }) => Ordering::Less, + + // Block triggers always come last + (Self::Block(..), _) => Ordering::Less, + (_, Self::Block(..)) => Ordering::Greater, + + // Message triggers before Transaction triggers + (Self::Message(..), Self::Transaction(..)) => Ordering::Greater, + (Self::Transaction(..), Self::Message(..)) => Ordering::Less, + } + } +} + +impl PartialOrd for CosmosTrigger { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl TriggerData for CosmosTrigger { + fn error_context(&self) -> String { + self.error_context() + } +} + +impl MappingTriggerTrait for CosmosTrigger { + fn error_context(&self) -> String { + self.error_context() + } +} #[cfg(test)] mod tests { use crate::codec::TxResult; diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index fcf0c6e4bc4..0f0a2b51557 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -1,3 +1,4 @@ +use graph::blockchain::MappingTriggerTrait; use graph::blockchain::TriggerData; use graph::data::subgraph::API_VERSION_0_0_2; use graph::data::subgraph::API_VERSION_0_0_6; @@ -61,6 +62,21 @@ pub enum MappingTrigger { }, } +impl MappingTriggerTrait for MappingTrigger { + fn error_context(&self) -> std::string::String { + let transaction_id = match self { + MappingTrigger::Log { log, .. } => log.transaction_hash, + MappingTrigger::Call { call, .. } => call.transaction_hash, + MappingTrigger::Block { .. } => None, + }; + + match transaction_id { + Some(tx_hash) => format!("transaction {:x}", tx_hash), + None => String::new(), + } + } +} + // Logging the block is too verbose, so this strips the block from the trigger for Debug. impl std::fmt::Debug for MappingTrigger { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index 0fad89b2d63..58ed08a3c93 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -1,4 +1,5 @@ use graph::blockchain::Block; +use graph::blockchain::MappingTriggerTrait; use graph::blockchain::TriggerData; use graph::cheap_clone::CheapClone; use graph::prelude::hex; @@ -91,6 +92,22 @@ impl NearTrigger { NearTrigger::Receipt(receipt) => receipt.block.ptr().hash_as_h256(), } } + + fn error_context(&self) -> std::string::String { + match self { + NearTrigger::Block(..) => { + format!("Block #{} ({})", self.block_number(), self.block_hash()) + } + NearTrigger::Receipt(receipt) => { + format!( + "receipt id {}, block #{} ({})", + hex::encode(&receipt.receipt.receipt_id.as_ref().unwrap().bytes), + self.block_number(), + self.block_hash() + ) + } + } + } } impl Ord for NearTrigger { @@ -117,20 +134,14 @@ impl PartialOrd for NearTrigger { } impl TriggerData for NearTrigger { - fn error_context(&self) -> std::string::String { - match self { - NearTrigger::Block(..) => { - format!("Block #{} ({})", self.block_number(), self.block_hash()) - } - NearTrigger::Receipt(receipt) => { - format!( - "receipt id {}, block #{} ({})", - hex::encode(&receipt.receipt.receipt_id.as_ref().unwrap().bytes), - self.block_number(), - self.block_hash() - ) - } - } + fn error_context(&self) -> String { + self.error_context() + } +} + +impl MappingTriggerTrait for NearTrigger { + fn error_context(&self) -> String { + self.error_context() } } diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 1df42a49bd2..ecd3d3d2e8a 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -2,7 +2,9 @@ use std::{collections::HashMap, str::FromStr, sync::Arc}; use anyhow::Error; use graph::{ - blockchain::{self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities}, + blockchain::{ + self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities, MappingTriggerTrait, + }, components::{ store::{DeploymentLocator, EntityKey, EntityType, SubgraphFork}, subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, @@ -25,6 +27,12 @@ use crate::{codec::entity_change::Operation, Block, Chain, NoopDataSourceTemplat #[derive(Eq, PartialEq, PartialOrd, Ord, Debug)] pub struct TriggerData {} +impl MappingTriggerTrait for TriggerData { + fn error_context(&self) -> String { + "Failed to process substreams block".to_string() + } +} + impl blockchain::TriggerData for TriggerData { // TODO(filipe): Can this be improved with some data from the block? fn error_context(&self) -> String { diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index f6f04b00adc..fedab33c4fa 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -14,7 +14,8 @@ use std::{convert::TryFrom, sync::Arc}; use super::{ block_stream::{self, BlockStream, FirehoseCursor}, client::ChainClient, - BlockIngestor, EmptyNodeCapabilities, HostFn, IngestorError, TriggerWithHandler, + BlockIngestor, EmptyNodeCapabilities, HostFn, IngestorError, MappingTriggerTrait, + TriggerWithHandler, }; use super::{ @@ -224,6 +225,11 @@ impl TriggerData for MockTriggerData { #[derive(Debug)] pub struct MockMappingTrigger {} +impl MappingTriggerTrait for MockMappingTrigger { + fn error_context(&self) -> String { + todo!() + } +} #[derive(Clone, Default)] pub struct MockTriggerFilter; diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 0a694644e63..a3e7a91e378 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -167,7 +167,7 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { /// Decoded trigger ready to be processed by the mapping. /// New implementations should have this be the same as `TriggerData`. - type MappingTrigger: Send + Sync + Debug; + type MappingTrigger: MappingTriggerTrait + Send + Sync + Debug; /// Trigger filter used as input to the triggers adapter. type TriggerFilter: TriggerFilter; @@ -335,6 +335,12 @@ pub trait TriggerData { fn error_context(&self) -> String; } +pub trait MappingTriggerTrait { + /// If there is an error when processing this trigger, this will called to add relevant context. + /// For example an useful return is: `"block # (), transaction ". + fn error_context(&self) -> String; +} + pub struct HostFnCtx<'a> { pub logger: Logger, pub block_ptr: BlockPtr, diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 29d07c99605..e7307d68ac1 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -8,8 +8,8 @@ mod tests; use crate::{ blockchain::{ - BlockPtr, Blockchain, DataSource as _, DataSourceTemplate as _, TriggerData as _, - UnresolvedDataSource as _, UnresolvedDataSourceTemplate as _, + BlockPtr, Blockchain, DataSource as _, DataSourceTemplate as _, MappingTriggerTrait, + TriggerData as _, UnresolvedDataSource as _, UnresolvedDataSourceTemplate as _, }, components::{ link_resolver::LinkResolver, @@ -415,6 +415,15 @@ pub enum MappingTrigger { Offchain(offchain::TriggerData), } +impl MappingTrigger { + pub fn error_context(&self) -> String { + match self { + Self::Onchain(trigger) => trigger.error_context(), + Self::Offchain(trigger) => format!("{:?}", trigger.source), + } + } +} + macro_rules! clone_data_source { ($t:ident) => { impl Clone for $t { From c4983a6ff5ac72229474dc80faf084883eeedb04 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Mon, 22 May 2023 12:12:12 +0530 Subject: [PATCH 0251/2104] runtime: include error_context from MappingTrigger in error messages. --- runtime/wasm/src/module/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 8f053536c4a..44aa40b76a5 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -207,8 +207,10 @@ impl WasmInstance { let handler_name = trigger.handler_name().to_owned(); let gas = self.gas.clone(); let logging_extras = trigger.logging_extras().cheap_clone(); + let error_context = trigger.trigger.error_context(); let asc_trigger = trigger.to_asc_ptr(self.instance_ctx_mut().deref_mut(), &gas)?; - self.invoke_handler(&handler_name, asc_trigger, logging_extras) + + self.invoke_handler(&handler_name, asc_trigger, logging_extras, error_context) } pub fn take_ctx(&mut self) -> WasmInstanceContext { @@ -238,6 +240,7 @@ impl WasmInstance { handler: &str, arg: AscPtr, logging_extras: Arc, + error_context: String, ) -> Result<(BlockState, Gas), MappingError> { let func = self .instance @@ -287,7 +290,8 @@ impl WasmInstance { }; if let Some(deterministic_error) = deterministic_error { - let message = format!("{:#}", deterministic_error).replace('\n', "\t"); + let message = + format!("{:#}, {}", deterministic_error, error_context).replace('\n', "\t"); // Log the error and restore the updates snapshot, effectively reverting the handler. error!(&self.instance_ctx().ctx.logger, From f20eaeec48865b1ffe08a27413d1aa8088bd8e6a Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Mon, 22 May 2023 12:43:04 +0530 Subject: [PATCH 0252/2104] runtime: add error_context to context of deterministic_error in invoke_handler --- runtime/wasm/src/module/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 44aa40b76a5..d591a818552 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -290,8 +290,8 @@ impl WasmInstance { }; if let Some(deterministic_error) = deterministic_error { - let message = - format!("{:#}, {}", deterministic_error, error_context).replace('\n', "\t"); + let deterministic_error = deterministic_error.context(error_context); + let message = format!("{:#}", deterministic_error).replace('\n', "\t"); // Log the error and restore the updates snapshot, effectively reverting the handler. error!(&self.instance_ctx().ctx.logger, From 3f0084a5cd07c661a27623a82600920f376bb0b1 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Mon, 22 May 2023 13:42:57 +0530 Subject: [PATCH 0253/2104] runtime: ignore error_context for offchain triggers --- graph/src/data_source/mod.rs | 6 +++--- runtime/wasm/src/module/mod.rs | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index e7307d68ac1..19e20b28d22 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -416,10 +416,10 @@ pub enum MappingTrigger { } impl MappingTrigger { - pub fn error_context(&self) -> String { + pub fn error_context(&self) -> Option { match self { - Self::Onchain(trigger) => trigger.error_context(), - Self::Offchain(trigger) => format!("{:?}", trigger.source), + Self::Onchain(trigger) => Some(trigger.error_context()), + Self::Offchain(_) => None, // TODO: Add error context for offchain triggers } } } diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index d591a818552..fe05cf923be 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -240,7 +240,7 @@ impl WasmInstance { handler: &str, arg: AscPtr, logging_extras: Arc, - error_context: String, + error_context: Option, ) -> Result<(BlockState, Gas), MappingError> { let func = self .instance @@ -290,7 +290,10 @@ impl WasmInstance { }; if let Some(deterministic_error) = deterministic_error { - let deterministic_error = deterministic_error.context(error_context); + let deterministic_error = match error_context { + Some(error_context) => deterministic_error.context(error_context), + None => deterministic_error, + }; let message = format!("{:#}", deterministic_error).replace('\n', "\t"); // Log the error and restore the updates snapshot, effectively reverting the handler. From 9ec9447a1afffc626fe702740d7f78d08b2b34e5 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Thu, 25 May 2023 17:51:01 +0100 Subject: [PATCH 0254/2104] Optimize match and decode, turn on static filters (#4631) * perf: Optimize trigger matching * env: default GRAPH_STATIC_FILTERS_THRESHOLD to 10000 * perf: Better optimization for trigger matching --- chain/arweave/src/trigger.rs | 4 + chain/cosmos/src/trigger.rs | 4 + chain/ethereum/src/data_source.rs | 15 +-- chain/ethereum/src/trigger.rs | 18 +++ chain/near/src/trigger.rs | 4 + chain/substreams/src/trigger.rs | 6 +- core/src/subgraph/context.rs | 4 +- core/src/subgraph/context/instance.rs | 130 ++++++++++++++++++++-- core/src/subgraph/runner.rs | 2 +- core/src/subgraph/trigger_processor.rs | 6 +- graph/src/blockchain/mock.rs | 4 + graph/src/blockchain/mod.rs | 8 ++ graph/src/components/trigger_processor.rs | 6 +- graph/src/data_source/mod.rs | 7 ++ graph/src/data_source/offchain.rs | 20 +++- graph/src/env/mod.rs | 3 +- 16 files changed, 202 insertions(+), 39 deletions(-) diff --git a/chain/arweave/src/trigger.rs b/chain/arweave/src/trigger.rs index 7652c03e9a5..25ce9c1eb7d 100644 --- a/chain/arweave/src/trigger.rs +++ b/chain/arweave/src/trigger.rs @@ -139,6 +139,10 @@ impl MappingTriggerTrait for ArweaveTrigger { fn error_context(&self) -> String { self.error_context() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } pub struct TransactionWithBlockPtr { diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index 794f006665c..6bd6be949ee 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -214,6 +214,10 @@ impl CosmosTrigger { } } } + + fn address_match(&self) -> Option<&[u8]> { + None + } } impl Ord for CosmosTrigger { diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index 4e84c8c8be6..72c60cd86c8 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -504,20 +504,13 @@ impl DataSource { } fn matches_trigger_address(&self, trigger: &EthereumTrigger) -> bool { - let ds_address = match self.address { - Some(addr) => addr, - + let Some(ds_address) = self.address else { // 'wildcard' data sources match any trigger address. - None => return true, + return true }; - let trigger_address = match trigger { - EthereumTrigger::Block(_, EthereumBlockTriggerType::WithCallTo(address)) => address, - EthereumTrigger::Call(call) => &call.to, - EthereumTrigger::Log(log, _) => &log.address, - - // Unfiltered block triggers match any data source address. - EthereumTrigger::Block(_, EthereumBlockTriggerType::Every) => return true, + let Some(trigger_address) = trigger.address() else { + return true }; ds_address == *trigger_address diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index 0f0a2b51557..6f5c71d5f1f 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -282,6 +282,20 @@ impl EthereumTrigger { EthereumTrigger::Log(log, _) => log.block_hash.unwrap(), } } + + /// `None` means the trigger matches any address. + pub fn address(&self) -> Option<&Address> { + match self { + EthereumTrigger::Block(_, EthereumBlockTriggerType::WithCallTo(address)) => { + Some(address) + } + EthereumTrigger::Call(call) => Some(&call.to), + EthereumTrigger::Log(log, _) => Some(&log.address), + + // Unfiltered block triggers match any data source address. + EthereumTrigger::Block(_, EthereumBlockTriggerType::Every) => None, + } + } } impl Ord for EthereumTrigger { @@ -348,6 +362,10 @@ impl TriggerData for EthereumTrigger { None => String::new(), } } + + fn address_match(&self) -> Option<&[u8]> { + self.address().map(|address| address.as_bytes()) + } } /// Ethereum block data. diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index 58ed08a3c93..e24ca0cdf77 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -143,6 +143,10 @@ impl MappingTriggerTrait for NearTrigger { fn error_context(&self) -> String { self.error_context() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } pub struct ReceiptWithOutcome { diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index ecd3d3d2e8a..a382b37e5fa 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -38,6 +38,10 @@ impl blockchain::TriggerData for TriggerData { fn error_context(&self) -> String { "Failed to process substreams block".to_string() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } impl ToAscPtr for TriggerData { @@ -172,7 +176,7 @@ where async fn process_trigger( &self, logger: &Logger, - _hosts: &[Arc], + _: Box + Send + '_>, block: &Arc, _trigger: &data_source::TriggerData, mut state: BlockState, diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index 6dd8c196a93..547a83f7611 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -97,7 +97,7 @@ impl> IndexingContext { ) -> Result, MappingError> { self.process_trigger_in_hosts( logger, - self.instance.hosts(), + self.instance.hosts_for_trigger(trigger), block, trigger, state, @@ -113,7 +113,7 @@ impl> IndexingContext { pub async fn process_trigger_in_hosts( &self, logger: &Logger, - hosts: &[Arc], + hosts: Box + Send + '_>, block: &Arc, trigger: &TriggerData, state: BlockState, diff --git a/core/src/subgraph/context/instance.rs b/core/src/subgraph/context/instance.rs index d760dad1386..d566bd8081d 100644 --- a/core/src/subgraph/context/instance.rs +++ b/core/src/subgraph/context/instance.rs @@ -3,7 +3,7 @@ use graph::{ blockchain::Blockchain, data_source::{ causality_region::CausalityRegionSeq, offchain, CausalityRegion, DataSource, - DataSourceTemplate, + DataSourceTemplate, TriggerData, }, prelude::*, }; @@ -18,12 +18,8 @@ pub struct SubgraphInstance> { templates: Arc>>, host_metrics: Arc, - /// Runtime hosts, one for each data source mapping. - /// - /// The runtime hosts are created and added in the same order the - /// data sources appear in the subgraph manifest. Incoming block - /// stream events are processed by the mappings in this same order. - hosts: Vec>, + /// The hosts represent the data sources in the subgraph. There is one host per data source. + hosts: Hosts, /// Maps the hash of a module to a channel to the thread in which the module is instantiated. module_cache: HashMap<[u8; 32], Sender>, @@ -53,7 +49,7 @@ where host_builder, subgraph_id, network, - hosts: Vec::new(), + hosts: Hosts::new(), module_cache: HashMap::new(), templates, host_metrics, @@ -111,6 +107,7 @@ where sender } }; + self.host_builder.build( self.network.clone(), self.subgraph_id.clone(), @@ -148,7 +145,7 @@ where let host = Arc::new(self.new_host(logger.clone(), data_source, &module_bytes)?); - Ok(if self.hosts.contains(&host) { + Ok(if self.hosts.hosts().contains(&host) { None } else { self.hosts.push(host.clone()); @@ -180,6 +177,7 @@ where } self.hosts + .hosts() .iter() .filter(|host| matches!(host.done_at(), Some(done_at) if done_at >= reverted_block)) .map(|host| { @@ -204,11 +202,121 @@ where } } - pub fn hosts(&self) -> &[Arc] { - &self.hosts + /// Returns all hosts which match the trigger's address. + /// This is a performance optimization to reduce the number of calls to `match_and_decode`. + pub fn hosts_for_trigger( + &self, + trigger: &TriggerData, + ) -> Box + Send + '_> { + self.hosts.iter_by_address(trigger.address_match()) } pub(super) fn causality_region_next_value(&mut self) -> CausalityRegion { self.causality_region_seq.next_val() } + + #[cfg(debug_assertions)] + pub fn hosts(&self) -> &[Arc] { + &self.hosts.hosts() + } +} + +/// Runtime hosts, one for each data source mapping. +/// +/// The runtime hosts are created and added to the vec in the same order the data sources appear in +/// the subgraph manifest. Incoming block stream events are processed by the mappings in this same +/// order. +/// +/// This structure also maintains a partition of the hosts by address, for faster trigger matching. +/// This partition uses the host's index in the main vec, to maintain the correct ordering. +struct Hosts> { + hosts: Vec>, + + // The `usize` is the index of the host in `hosts`. + hosts_by_address: HashMap, Vec>, + hosts_without_address: Vec, +} + +impl> Hosts { + fn new() -> Self { + Self { + hosts: Vec::new(), + hosts_by_address: HashMap::new(), + hosts_without_address: Vec::new(), + } + } + + fn hosts(&self) -> &[Arc] { + &self.hosts + } + + fn last(&self) -> Option<&Arc> { + self.hosts.last() + } + + fn len(&self) -> usize { + self.hosts.len() + } + + fn push(&mut self, host: Arc) { + self.hosts.push(host.cheap_clone()); + let idx = self.hosts.len() - 1; + let address = host.data_source().address(); + match address { + Some(address) => { + self.hosts_by_address + .entry(address.into()) + .or_default() + .push(idx); + } + None => { + self.hosts_without_address.push(idx); + } + } + } + + fn pop(&mut self) { + let Some(host) = self.hosts.pop() else { return }; + let address = host.data_source().address(); + match address { + Some(address) => { + // Unwrap and assert: The same host we just popped must be the last one in `hosts_by_address`. + let hosts = self.hosts_by_address.get_mut(address.as_slice()).unwrap(); + let idx = hosts.pop().unwrap(); + assert_eq!(idx, self.hosts.len()); + } + None => { + // Unwrap and assert: The same host we just popped must be the last one in `hosts_without_address`. + let idx = self.hosts_without_address.pop().unwrap(); + assert_eq!(idx, self.hosts.len()); + } + } + } + + /// Returns an iterator over all hosts that match the given address, in the order they were inserted in `hosts`. + /// Note that this always includes the hosts without an address, since they match all addresses. + /// If no address is provided, returns an iterator over all hosts. + fn iter_by_address( + &self, + address: Option>, + ) -> Box + Send + '_> { + let Some(address) = address else { + return Box::new(self.hosts.iter().map(|host| host.as_ref())); + }; + + let mut matching_hosts: Vec = self + .hosts_by_address + .get(address.as_slice()) + .into_iter() + .flatten() // Flatten non-existing `address` into empty. + .copied() + .chain(self.hosts_without_address.iter().copied()) + .collect(); + matching_hosts.sort(); + Box::new( + matching_hosts + .into_iter() + .map(move |idx| self.hosts[idx].as_ref()), + ) + } } diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 34601dbdc75..627ed8219cf 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -343,7 +343,7 @@ where .ctx .process_trigger_in_hosts( &logger, - &runtime_hosts, + Box::new(runtime_hosts.iter().map(|host| host.as_ref())), &block, &TriggerData::Onchain(trigger), block_state, diff --git a/core/src/subgraph/trigger_processor.rs b/core/src/subgraph/trigger_processor.rs index 02002a32c88..87d3a004e15 100644 --- a/core/src/subgraph/trigger_processor.rs +++ b/core/src/subgraph/trigger_processor.rs @@ -19,10 +19,10 @@ where C: Blockchain, T: RuntimeHostBuilder, { - async fn process_trigger( - &self, + async fn process_trigger<'a>( + &'a self, logger: &Logger, - hosts: &[Arc], + hosts: Box + Send + 'a>, block: &Arc, trigger: &TriggerData, mut state: BlockState, diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index fedab33c4fa..057178adf7d 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -220,6 +220,10 @@ impl TriggerData for MockTriggerData { fn error_context(&self) -> String { todo!() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } #[derive(Debug)] diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index a3e7a91e378..05db64be34e 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -333,6 +333,14 @@ pub trait TriggerData { /// If there is an error when processing this trigger, this will called to add relevant context. /// For example an useful return is: `"block # (), transaction ". fn error_context(&self) -> String; + + /// If this trigger can only possibly match data sources with a specific address, then it can be + /// returned here for improved trigger matching performance, which helps subgraphs with many + /// data sources. But this optimization is not required, so returning `None` is always correct. + /// + /// When this does return `Some`, make sure that the `DataSource::address` of matching data + /// sources is equal to the addresssed returned here. + fn address_match(&self) -> Option<&[u8]>; } pub trait MappingTriggerTrait { diff --git a/graph/src/components/trigger_processor.rs b/graph/src/components/trigger_processor.rs index ce45caaac59..ae58b9cd683 100644 --- a/graph/src/components/trigger_processor.rs +++ b/graph/src/components/trigger_processor.rs @@ -16,10 +16,10 @@ where C: Blockchain, T: RuntimeHostBuilder, { - async fn process_trigger( - &self, + async fn process_trigger<'a>( + &'a self, logger: &Logger, - hosts: &[Arc], + hosts: Box + Send + 'a>, block: &Arc, trigger: &TriggerData, mut state: BlockState, diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 19e20b28d22..c929ba83530 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -407,6 +407,13 @@ impl TriggerData { Self::Offchain(trigger) => format!("{:?}", trigger.source), } } + + pub fn address_match(&self) -> Option> { + match self { + Self::Onchain(trigger) => trigger.address_match().map(|address| address.to_owned()), + Self::Offchain(trigger) => trigger.source.address(), + } + } } #[derive(Debug)] diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index efd4aa051f7..f634782e2c3 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -198,12 +198,8 @@ impl DataSource { }) } - /// The concept of an address may or not make sense for an offchain data source, but this is - /// used as the value to be returned to mappings from the `dataSource.address()` host function. pub fn address(&self) -> Option> { - match self.source { - Source::Ipfs(ref cid) => Some(cid.to_bytes()), - } + self.source.address() } pub(super) fn is_duplicate_of(&self, b: &DataSource) -> bool { @@ -241,6 +237,20 @@ pub enum Source { Ipfs(CidFile), } +impl Source { + /// The concept of an address may or not make sense for an offchain data source, but graph node + /// will use this in a few places where some sort of not necessarily unique id is useful: + /// 1. This is used as the value to be returned to mappings from the `dataSource.address()` host + /// function, so changing this is a breaking change. + /// 2. This is used to match with triggers with hosts in `fn hosts_for_trigger`, so make sure + /// the `source` of the data source is equal the `source` of the `TriggerData`. + pub fn address(&self) -> Option> { + match self { + Source::Ipfs(ref cid) => Some(cid.to_bytes()), + } + } +} + #[derive(Clone, Debug)] pub struct Mapping { pub language: String, diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index f02c35aa3b2..7e7f071e144 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -342,8 +342,7 @@ struct Inner { external_http_base_url: Option, #[envconfig(from = "EXTERNAL_WS_BASE_URL")] external_ws_base_url: Option, - // Setting this to be unrealistically high so it doesn't get triggered. - #[envconfig(from = "GRAPH_STATIC_FILTERS_THRESHOLD", default = "100000000")] + #[envconfig(from = "GRAPH_STATIC_FILTERS_THRESHOLD", default = "10000")] static_filters_threshold: usize, // JSON-RPC specific. #[envconfig(from = "ETHEREUM_REORG_THRESHOLD", default = "250")] From cb0471e566c02e83bd984c95507bedf55921a6ce Mon Sep 17 00:00:00 2001 From: Jacob T Firek <106350168+jtfirek@users.noreply.github.com> Date: Fri, 26 May 2023 05:17:53 -0400 Subject: [PATCH 0255/2104] CONTRIBUTING.md: Updating deprecated rustfmt-preview (#4645) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aa67ac5d73d..9fe5c464bd3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,7 +15,7 @@ Install development helpers: ```sh cargo install cargo-watch -rustup component add rustfmt-preview +rustup component add rustfmt ``` Set environment variables: From 15941da4fc98693e5558afbf303e150a2adb2970 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Fri, 26 May 2023 10:20:53 +0100 Subject: [PATCH 0256/2104] fix: fix compilation errors (#4648) --- chain/arweave/src/trigger.rs | 8 ++++---- chain/cosmos/src/trigger.rs | 8 ++++---- chain/near/src/trigger.rs | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/chain/arweave/src/trigger.rs b/chain/arweave/src/trigger.rs index 25ce9c1eb7d..871dd5e11dd 100644 --- a/chain/arweave/src/trigger.rs +++ b/chain/arweave/src/trigger.rs @@ -133,16 +133,16 @@ impl TriggerData for ArweaveTrigger { fn error_context(&self) -> String { self.error_context() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } impl MappingTriggerTrait for ArweaveTrigger { fn error_context(&self) -> String { self.error_context() } - - fn address_match(&self) -> Option<&[u8]> { - None - } } pub struct TransactionWithBlockPtr { diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index 6bd6be949ee..f4601d5e678 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -214,10 +214,6 @@ impl CosmosTrigger { } } } - - fn address_match(&self) -> Option<&[u8]> { - None - } } impl Ord for CosmosTrigger { @@ -267,6 +263,10 @@ impl TriggerData for CosmosTrigger { fn error_context(&self) -> String { self.error_context() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } impl MappingTriggerTrait for CosmosTrigger { diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index e24ca0cdf77..4164f4302e6 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -137,16 +137,16 @@ impl TriggerData for NearTrigger { fn error_context(&self) -> String { self.error_context() } + + fn address_match(&self) -> Option<&[u8]> { + None + } } impl MappingTriggerTrait for NearTrigger { fn error_context(&self) -> String { self.error_context() } - - fn address_match(&self) -> Option<&[u8]> { - None - } } pub struct ReceiptWithOutcome { From fa0ed65c6821790c52985ba9ffe3e4fa49dfb149 Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Fri, 26 May 2023 14:37:50 +0100 Subject: [PATCH 0257/2104] fix(offchain): Avoid starvation in the offchain monitor (#4649) This addresses both sides of the issue, by making sure the task holding the `CallAll` doesn't hang, and by removing the concurrency control done by `Buffer`, which may be the reason why PR #4570 didn't fully work. --- core/src/polling_monitor/ipfs_service.rs | 5 +++-- core/src/polling_monitor/mod.rs | 13 ++++++++----- core/src/subgraph/context.rs | 6 ++++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/core/src/polling_monitor/ipfs_service.rs b/core/src/polling_monitor/ipfs_service.rs index f9436b851ca..d4fdeb7964a 100644 --- a/core/src/polling_monitor/ipfs_service.rs +++ b/core/src/polling_monitor/ipfs_service.rs @@ -30,8 +30,9 @@ pub fn ipfs_service( .service_fn(move |req| ipfs.cheap_clone().call_inner(req)) .boxed(); - // The `Buffer` makes it so the rate and concurrency limit are shared among clones. - Buffer::new(svc, 1) + // The `Buffer` makes it so the rate limit is shared among clones. + // Make it unbounded to avoid any risk of starvation. + Buffer::new(svc, u32::MAX as usize) } #[derive(Clone)] diff --git a/core/src/polling_monitor/mod.rs b/core/src/polling_monitor/mod.rs index 19f30f28cda..7ead8e57ebc 100644 --- a/core/src/polling_monitor/mod.rs +++ b/core/src/polling_monitor/mod.rs @@ -98,7 +98,7 @@ impl Queue { /// `Option`, to represent the object not being found. pub fn spawn_monitor( service: S, - response_sender: mpsc::Sender<(ID, Res)>, + response_sender: mpsc::UnboundedSender<(ID, Res)>, logger: Logger, metrics: PollingMonitorMetrics, ) -> PollingMonitor @@ -149,10 +149,13 @@ where let mut backoffs = Backoffs::new(); let mut responses = service.call_all(queue_to_stream).unordered().boxed(); while let Some(response) = responses.next().await { + // Note: Be careful not to `await` within this loop, as that could block requests in + // the `CallAll` from being polled. This can cause starvation as those requests may + // be holding on to resources such as slots for concurrent calls. match response { Ok((id, Some(response))) => { backoffs.remove(&id); - let send_result = response_sender.send((id, response)).await; + let send_result = response_sender.send((id, response)); if send_result.is_err() { // The receiver has been dropped, cancel this task. break; @@ -250,10 +253,10 @@ mod tests { fn setup() -> ( mock::Handle<&'static str, Option<&'static str>>, PollingMonitor<&'static str>, - mpsc::Receiver<(&'static str, &'static str)>, + mpsc::UnboundedReceiver<(&'static str, &'static str)>, ) { let (svc, handle) = mock::pair(); - let (tx, rx) = mpsc::channel(10); + let (tx, rx) = mpsc::unbounded_channel(); let monitor = spawn_monitor(svc, tx, log::discard(), PollingMonitorMetrics::mock()); (handle, monitor, rx) } @@ -263,7 +266,7 @@ mod tests { let (svc, mut handle) = mock::pair(); let shared_svc = tower::buffer::Buffer::new(tower::limit::ConcurrencyLimit::new(svc, 1), 1); let make_monitor = |svc| { - let (tx, rx) = mpsc::channel(10); + let (tx, rx) = mpsc::unbounded_channel(); let metrics = PollingMonitorMetrics::mock(); let monitor = spawn_monitor(svc, tx, log::discard(), metrics); (monitor, rx) diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index 547a83f7611..c87f03d2faf 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -185,7 +185,7 @@ impl> IndexingContext { pub struct OffchainMonitor { ipfs_monitor: PollingMonitor, - ipfs_monitor_rx: mpsc::Receiver<(CidFile, Bytes)>, + ipfs_monitor_rx: mpsc::UnboundedReceiver<(CidFile, Bytes)>, } impl OffchainMonitor { @@ -195,7 +195,9 @@ impl OffchainMonitor { subgraph_hash: &DeploymentHash, ipfs_service: IpfsService, ) -> Self { - let (ipfs_monitor_tx, ipfs_monitor_rx) = mpsc::channel(10); + // The channel is unbounded, as it is expected that `fn ready_offchain_events` is called + // frequently, or at least with the same frequency that requests are sent. + let (ipfs_monitor_tx, ipfs_monitor_rx) = mpsc::unbounded_channel(); let ipfs_monitor = spawn_monitor( ipfs_service, ipfs_monitor_tx, From 4ae0eff45e2e738d6b400e053ee842cc5ab224da Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 2 May 2023 14:45:33 -0700 Subject: [PATCH 0258/2104] all: Make write::EntityMod and EntityModification the same --- core/src/subgraph/runner.rs | 9 +++- graph/src/components/store/entity_cache.rs | 30 ++++++++++--- graph/src/components/store/mod.rs | 42 ++++++++++++++++--- graph/src/components/store/write.rs | 28 +++++++++---- runtime/test/src/test.rs | 8 ++-- store/test-store/src/store.rs | 2 +- store/test-store/tests/graph/entity_cache.rs | 37 ++++++---------- .../tests/postgres/relational_bytes.rs | 6 +-- store/test-store/tests/postgres/store.rs | 28 +++++++++---- 9 files changed, 128 insertions(+), 62 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 627ed8219cf..6af6f069d12 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -403,7 +403,7 @@ where evict_stats, } = block_state .entity_cache - .as_modifications() + .as_modifications(block.number()) .map_err(|e| BlockProcessingError::Unknown(e.into()))?; section.end(); @@ -738,7 +738,12 @@ where return Err(anyhow!("{}", err.to_string())); } - mods.extend(block_state.entity_cache.as_modifications()?.modifications); + mods.extend( + block_state + .entity_cache + .as_modifications(block.number())? + .modifications, + ); processed_data_sources.extend(block_state.processed_data_sources); } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 9dee1b84756..324551d0809 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -11,7 +11,7 @@ use crate::prelude::ENV_VARS; use crate::schema::InputSchema; use crate::util::lfu_cache::{EvictStats, LfuCache}; -use super::{DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; +use super::{BlockNumber, DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; /// The scope in which the `EntityCache` should perform a `get` operation pub enum GetScope { @@ -251,7 +251,10 @@ impl EntityCache { /// to the current state is actually needed. /// /// Also returns the updated `LfuCache`. - pub fn as_modifications(mut self) -> Result { + pub fn as_modifications( + mut self, + block: BlockNumber, + ) -> Result { assert!(!self.in_handler); // The first step is to make sure all entities being set are in `self.current`. @@ -285,7 +288,12 @@ impl EntityCache { | (None, EntityOp::Overwrite(mut updates)) => { updates.remove_null_fields(); self.current.insert(key.clone(), Some(updates.clone())); - Some(Insert { key, data: updates }) + Some(Insert { + key, + data: updates, + block, + end: None, + }) } // Entity may have been changed (Some(current), EntityOp::Update(updates)) => { @@ -294,7 +302,12 @@ impl EntityCache { .map_err(|e| key.unknown_attribute(e))?; self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { key, data }) + Some(Overwrite { + key, + data, + block, + end: None, + }) } else { None } @@ -303,7 +316,12 @@ impl EntityCache { (Some(current), EntityOp::Overwrite(data)) => { self.current.insert(key.clone(), Some(data.clone())); if current != data { - Some(Overwrite { key, data }) + Some(Overwrite { + key, + data, + block, + end: None, + }) } else { None } @@ -311,7 +329,7 @@ impl EntityCache { // Existing entity was deleted (Some(_), EntityOp::Remove) => { self.current.insert(key.clone(), None); - Some(Remove { key }) + Some(Remove { key, block }) } // Entity was deleted, but it doesn't exist in the store (None, EntityOp::Remove) => None, diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 1a6fd3e88fa..802e1318563 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -725,7 +725,7 @@ impl StoreEvent { .map(|op| { use self::EntityModification::*; match op { - Insert { key, .. } | Overwrite { key, .. } | Remove { key } => { + Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => { EntityChange::for_data(subgraph_id.clone(), key.clone()) } } @@ -1008,18 +1008,50 @@ pub type PoolWaitStats = Arc>; #[derive(Clone, Debug, PartialEq, Eq)] pub enum EntityModification { /// Insert the entity - Insert { key: EntityKey, data: Entity }, + Insert { + key: EntityKey, + data: Entity, + block: BlockNumber, + end: Option, + }, /// Update the entity by overwriting it - Overwrite { key: EntityKey, data: Entity }, + Overwrite { + key: EntityKey, + data: Entity, + block: BlockNumber, + end: Option, + }, /// Remove the entity - Remove { key: EntityKey }, + Remove { key: EntityKey, block: BlockNumber }, } impl EntityModification { + pub fn insert(key: EntityKey, data: Entity, block: BlockNumber) -> Self { + EntityModification::Insert { + key, + data, + block, + end: None, + } + } + + pub fn overwrite(key: EntityKey, data: Entity, block: BlockNumber) -> Self { + EntityModification::Overwrite { + key, + data, + block, + end: None, + } + } + + pub fn remove(key: EntityKey, block: BlockNumber) -> Self { + EntityModification::Remove { key, block } + } + pub fn entity_ref(&self) -> &EntityKey { use EntityModification::*; match self { - Insert { key, .. } | Overwrite { key, .. } | Remove { key } => key, + Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, } } diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index df3864079f8..72e065c4a1d 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -101,27 +101,37 @@ impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { } impl EntityMod { - fn new(m: EntityModification, block: BlockNumber) -> Self { + fn new(m: EntityModification) -> Self { match m { - EntityModification::Insert { key, data } => Self::Insert { + EntityModification::Insert { key, data, block, - end: None, + end, + } => Self::Insert { + key, + data, + block, + end, }, - EntityModification::Overwrite { key, data } => Self::Overwrite { + EntityModification::Overwrite { key, data, block, - end: None, + end, + } => Self::Overwrite { + key, + data, + block, + end, }, - EntityModification::Remove { key } => Self::Remove { key, block }, + EntityModification::Remove { key, block } => Self::Remove { key, block }, } } #[cfg(debug_assertions)] - pub fn new_test(m: EntityModification, block: BlockNumber) -> Self { - Self::new(m, block) + pub fn new_test(m: EntityModification) -> Self { + Self::new(m) } pub fn id(&self) -> &Word { @@ -321,7 +331,7 @@ impl RowGroup { )); } - let row = EntityMod::new(emod, block); + let row = EntityMod::new(emod); self.append_row(row) } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index ee92d3055e3..91b12e870e8 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -441,7 +441,7 @@ fn make_thing(id: &str, value: &str) -> (String, EntityModification) { let key = EntityKey::data("Thing".to_string(), id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), - EntityModification::Insert { key, data }, + EntityModification::insert(key, data, 0), ) } @@ -485,7 +485,7 @@ async fn run_ipfs_map( .ctx .state .entity_cache - .as_modifications()? + .as_modifications(0)? .modifications; // Bring the modifications into a predictable order (by entity_id) @@ -1017,7 +1017,7 @@ async fn test_entity_store(api_version: Version) { &mut module.instance_ctx_mut().ctx.state.entity_cache, EntityCache::new(Arc::new(writable.clone())), ); - let mut mods = cache.as_modifications().unwrap().modifications; + let mut mods = cache.as_modifications(0).unwrap().modifications; assert_eq!(1, mods.len()); match mods.pop().unwrap() { EntityModification::Overwrite { data, .. } => { @@ -1038,7 +1038,7 @@ async fn test_entity_store(api_version: Version) { .ctx .state .entity_cache - .as_modifications() + .as_modifications(0) .unwrap() .modifications; assert_eq!(1, mods.len()); diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 1a1737f9eb1..e7896040628 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -290,7 +290,7 @@ pub async fn transact_entities_and_dynamic_data_sources( let mut entity_cache = EntityCache::new(Arc::new(store.clone())); entity_cache.append(ops); let mods = entity_cache - .as_modifications() + .as_modifications(block_ptr_to.number) .expect("failed to convert to modifications") .modifications; let metrics_registry = Arc::new(MetricsRegistry::mock()); diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 12a05e1caba..76f46703b05 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -191,7 +191,7 @@ fn sort_by_entity_key(mut mods: Vec) -> Vec id: "mogwai", name: "Mogwai", founded: 1995 } - }]) + sort_by_entity_key(vec![EntityModification::overwrite( + update_key, + entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 }, + 0 + )]) ); } diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 559114113d1..52dd14dbc96 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -87,7 +87,7 @@ pub fn row_group_update( let mut group = RowGroup::new(entity_type.clone()); for (key, data) in data { group - .push(EntityModification::Overwrite { key, data }, block) + .push(EntityModification::overwrite(key, data, block), block) .unwrap(); } group @@ -101,7 +101,7 @@ pub fn row_group_insert( let mut group = RowGroup::new(entity_type.clone()); for (key, data) in data { group - .push(EntityModification::Insert { key, data }, block) + .push(EntityModification::insert(key, data, block), block) .unwrap(); } group @@ -115,7 +115,7 @@ pub fn row_group_delete( let mut group = RowGroup::new(entity_type.clone()); for key in data { group - .push(EntityModification::Remove { key }, block) + .push(EntityModification::remove(key, block), block) .unwrap(); } group diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 68b44c7958f..0fff7701de1 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -1492,12 +1492,17 @@ fn handle_large_string_with_index() { const ONE: &str = "large_string_one"; const TWO: &str = "large_string_two"; - fn make_insert_op(id: &str, name: &str, schema: &InputSchema) -> EntityModification { + fn make_insert_op( + id: &str, + name: &str, + schema: &InputSchema, + block: BlockNumber, + ) -> EntityModification { let data = entity! { schema => id: id, name: name }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); - EntityModification::Insert { key, data } + EntityModification::insert(key, data, block) } run_test(|store, writable, deployment| async move { @@ -1518,13 +1523,14 @@ fn handle_large_string_with_index() { metrics_registry.clone(), ); + let block = TEST_BLOCK_3_PTR.number; writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_text, &schema), - make_insert_op(TWO, &other_text, &schema), + make_insert_op(ONE, &long_text, &schema, block), + make_insert_op(TWO, &other_text, &schema, block), ], &stopwatch_metrics, Vec::new(), @@ -1580,12 +1586,17 @@ fn handle_large_bytea_with_index() { const ONE: &str = "large_string_one"; const TWO: &str = "large_string_two"; - fn make_insert_op(id: &str, name: &[u8], schema: &InputSchema) -> EntityModification { + fn make_insert_op( + id: &str, + name: &[u8], + schema: &InputSchema, + block: BlockNumber, + ) -> EntityModification { let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; let key = EntityKey::data(USER.to_owned(), id.to_owned()); - EntityModification::Insert { key, data } + EntityModification::insert(key, data, block) } run_test(|store, writable, deployment| async move { @@ -1611,13 +1622,14 @@ fn handle_large_bytea_with_index() { metrics_registry.clone(), ); + let block = TEST_BLOCK_3_PTR.number; writable .transact_block_operations( TEST_BLOCK_3_PTR.clone(), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_bytea, &schema), - make_insert_op(TWO, &other_bytea, &schema), + make_insert_op(ONE, &long_bytea, &schema, block), + make_insert_op(TWO, &other_bytea, &schema, block), ], &stopwatch_metrics, Vec::new(), From 4264b47c4ec2c7c177cc550f4905461013fbacfd Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 15:43:41 +0100 Subject: [PATCH 0259/2104] graph: Rename EntityMod to EntityModification Also, remove original EntityModification. This avoids a conversion from EntityModification to EntityMod when creating a batch. --- graph/src/components/store/entity_cache.rs | 3 +- graph/src/components/store/mod.rs | 72 +------- graph/src/components/store/write.rs | 192 +++++++++++---------- graph/src/lib.rs | 14 +- 4 files changed, 112 insertions(+), 169 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 324551d0809..1db072fb828 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -5,6 +5,7 @@ use std::fmt::{self, Debug}; use std::sync::Arc; use crate::cheap_clone::CheapClone; +use crate::components::store::write::EntityModification; use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; use crate::data::store::IntoEntityIterator; use crate::prelude::ENV_VARS; @@ -279,7 +280,7 @@ impl EntityCache { let mut mods = Vec::new(); for (key, update) in self.updates { - use s::EntityModification::*; + use EntityModification::*; let current = self.current.remove(&key).and_then(|entity| entity); let modification = match (current, update) { diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 802e1318563..fcbb0e53b7a 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -26,6 +26,7 @@ use std::time::Duration; use std::{fmt, io}; use crate::blockchain::Block; +use crate::components::store::write::EntityModification; use crate::data::store::scalar::Bytes; use crate::data::store::*; use crate::data::value::Word; @@ -723,7 +724,7 @@ impl StoreEvent { let changes: Vec<_> = mods .into_iter() .map(|op| { - use self::EntityModification::*; + use EntityModification::*; match op { Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => { EntityChange::for_data(subgraph_id.clone(), key.clone()) @@ -1002,75 +1003,6 @@ impl Display for DeploymentLocator { // connection checkouts pub type PoolWaitStats = Arc>; -/// An entity operation that can be transacted into the store; as opposed to -/// `EntityOperation`, we already know whether a `Set` should be an `Insert` -/// or `Update` -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum EntityModification { - /// Insert the entity - Insert { - key: EntityKey, - data: Entity, - block: BlockNumber, - end: Option, - }, - /// Update the entity by overwriting it - Overwrite { - key: EntityKey, - data: Entity, - block: BlockNumber, - end: Option, - }, - /// Remove the entity - Remove { key: EntityKey, block: BlockNumber }, -} - -impl EntityModification { - pub fn insert(key: EntityKey, data: Entity, block: BlockNumber) -> Self { - EntityModification::Insert { - key, - data, - block, - end: None, - } - } - - pub fn overwrite(key: EntityKey, data: Entity, block: BlockNumber) -> Self { - EntityModification::Overwrite { - key, - data, - block, - end: None, - } - } - - pub fn remove(key: EntityKey, block: BlockNumber) -> Self { - EntityModification::Remove { key, block } - } - - pub fn entity_ref(&self) -> &EntityKey { - use EntityModification::*; - match self { - Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, - } - } - - pub fn entity(&self) -> Option<&Entity> { - match self { - EntityModification::Insert { data, .. } - | EntityModification::Overwrite { data, .. } => Some(data), - EntityModification::Remove { .. } => None, - } - } - - pub fn is_remove(&self) -> bool { - match self { - EntityModification::Remove { .. } => true, - _ => false, - } - } -} - /// A representation of entity operations that can be accumulated. #[derive(Debug, Clone)] enum EntityOp { diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 72e065c4a1d..62e26bdbf21 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -12,10 +12,7 @@ use crate::{ util::cache_weight::CacheWeight, }; -use super::{ - BlockNumber, EntityKey, EntityModification, EntityType, StoreError, StoreEvent, - StoredDynamicDataSource, -}; +use super::{BlockNumber, EntityKey, EntityType, StoreError, StoreEvent, StoredDynamicDataSource}; /// A data structure similar to `EntityModification`, but tagged with a /// block. We might eventually replace `EntityModification` with this, but @@ -39,8 +36,8 @@ use super::{ /// `append_row`, eliminates an update in the database which would otherwise /// be needed to clamp the open block range of the entity to the block /// contained in `end` -#[derive(Debug, PartialEq, Eq)] -pub enum EntityMod { +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum EntityModification { /// Insert the entity Insert { key: EntityKey, @@ -71,18 +68,18 @@ pub struct EntityWrite<'a> { pub end: Option, } -impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { +impl<'a> TryFrom<&'a EntityModification> for EntityWrite<'a> { type Error = (); - fn try_from(emod: &'a EntityMod) -> Result { + fn try_from(emod: &'a EntityModification) -> Result { match emod { - EntityMod::Insert { + EntityModification::Insert { key, data, block, end, } - | EntityMod::Overwrite { + | EntityModification::Overwrite { key, data, block, @@ -95,58 +92,25 @@ impl<'a> TryFrom<&'a EntityMod> for EntityWrite<'a> { end: *end, }), - EntityMod::Remove { .. } => Err(()), + EntityModification::Remove { .. } => Err(()), } } } -impl EntityMod { - fn new(m: EntityModification) -> Self { - match m { - EntityModification::Insert { - key, - data, - block, - end, - } => Self::Insert { - key, - data, - block, - end, - }, - EntityModification::Overwrite { - key, - data, - block, - end, - } => Self::Overwrite { - key, - data, - block, - end, - }, - EntityModification::Remove { key, block } => Self::Remove { key, block }, - } - } - - #[cfg(debug_assertions)] - pub fn new_test(m: EntityModification) -> Self { - Self::new(m) - } - +impl EntityModification { pub fn id(&self) -> &Word { match self { - EntityMod::Insert { key, .. } - | EntityMod::Overwrite { key, .. } - | EntityMod::Remove { key, .. } => &key.entity_id, + EntityModification::Insert { key, .. } + | EntityModification::Overwrite { key, .. } + | EntityModification::Remove { key, .. } => &key.entity_id, } } fn block(&self) -> BlockNumber { match self { - EntityMod::Insert { block, .. } - | EntityMod::Overwrite { block, .. } - | EntityMod::Remove { block, .. } => *block, + EntityModification::Insert { block, .. } + | EntityModification::Overwrite { block, .. } + | EntityModification::Remove { block, .. } => *block, } } @@ -154,8 +118,8 @@ impl EntityMod { /// new row, for either a new or an existing entity fn is_write(&self) -> bool { match self { - EntityMod::Insert { .. } | EntityMod::Overwrite { .. } => true, - EntityMod::Remove { .. } => false, + EntityModification::Insert { .. } | EntityModification::Overwrite { .. } => true, + EntityModification::Remove { .. } => false, } } @@ -168,44 +132,43 @@ impl EntityMod { /// Return `true` if `self` requires clamping of an existing version fn is_clamp(&self) -> bool { match self { - EntityMod::Insert { .. } => false, - EntityMod::Overwrite { .. } | EntityMod::Remove { .. } => true, + EntityModification::Insert { .. } => false, + EntityModification::Overwrite { .. } | EntityModification::Remove { .. } => true, } } pub fn creates_entity(&self) -> bool { match self { - EntityMod::Insert { .. } => true, - EntityMod::Overwrite { .. } | EntityMod::Remove { .. } => false, + EntityModification::Insert { .. } => true, + EntityModification::Overwrite { .. } | EntityModification::Remove { .. } => false, } } fn key(&self) -> &EntityKey { + use EntityModification::*; match self { - EntityMod::Insert { key, .. } - | EntityMod::Overwrite { key, .. } - | EntityMod::Remove { key, .. } => key, + Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, } } fn entity_count_change(&self) -> i32 { match self { - EntityMod::Insert { end: None, .. } => 1, - EntityMod::Insert { end: Some(_), .. } => { + EntityModification::Insert { end: None, .. } => 1, + EntityModification::Insert { end: Some(_), .. } => { // Insert followed by a remove 0 } - EntityMod::Overwrite { end: None, .. } => 0, - EntityMod::Overwrite { end: Some(_), .. } => { + EntityModification::Overwrite { end: None, .. } => 0, + EntityModification::Overwrite { end: Some(_), .. } => { // Overwrite followed by a remove -1 } - EntityMod::Remove { .. } => -1, + EntityModification::Remove { .. } => -1, } } fn clamp(&mut self, block: BlockNumber) -> Result<(), StoreError> { - use EntityMod::*; + use EntityModification::*; match self { Insert { end, .. } | Overwrite { end, .. } => { @@ -231,7 +194,7 @@ impl EntityMod { /// Turn an `Overwrite` into an `Insert`, return an error if this is a `Remove` fn as_insert(self, entity_type: &EntityType) -> Result { - use EntityMod::*; + use EntityModification::*; match self { Insert { .. } => Ok(self), @@ -259,7 +222,7 @@ impl EntityMod { fn as_entity_op(&self, at: BlockNumber) -> EntityOp<'_> { debug_assert!(self.block() <= at); - use EntityMod::*; + use EntityModification::*; match self { Insert { @@ -297,6 +260,52 @@ impl EntityMod { } } +impl EntityModification { + pub fn insert(key: EntityKey, data: Entity, block: BlockNumber) -> Self { + EntityModification::Insert { + key, + data, + block, + end: None, + } + } + + pub fn overwrite(key: EntityKey, data: Entity, block: BlockNumber) -> Self { + EntityModification::Overwrite { + key, + data, + block, + end: None, + } + } + + pub fn remove(key: EntityKey, block: BlockNumber) -> Self { + EntityModification::Remove { key, block } + } + + pub fn entity_ref(&self) -> &EntityKey { + use EntityModification::*; + match self { + Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, + } + } + + pub fn entity(&self) -> Option<&Entity> { + match self { + EntityModification::Insert { data, .. } + | EntityModification::Overwrite { data, .. } => Some(data), + EntityModification::Remove { .. } => None, + } + } + + pub fn is_remove(&self) -> bool { + match self { + EntityModification::Remove { .. } => true, + _ => false, + } + } +} + /// A list of entity changes grouped by the entity type #[derive(Debug)] pub struct RowGroup { @@ -304,7 +313,7 @@ pub struct RowGroup { /// All changes for this entity type, ordered by block; i.e., if `i < j` /// then `rows[i].block() <= rows[j].block()`. Several methods on this /// struct rely on the fact that this ordering is observed. - rows: Vec, + rows: Vec, } impl RowGroup { @@ -331,8 +340,7 @@ impl RowGroup { )); } - let row = EntityMod::new(emod); - self.append_row(row) + self.append_row(emod) } fn row_count(&self) -> usize { @@ -347,12 +355,12 @@ impl RowGroup { /// Iterate over all changes that need clamping of the block range of an /// existing entity version - pub fn clamps_by_block(&self) -> impl Iterator { + pub fn clamps_by_block(&self) -> impl Iterator { ClampsByBlockIterator::new(self) } /// Iterate over all changes that require writing a new entity version - pub fn writes(&self) -> impl Iterator { + pub fn writes(&self) -> impl Iterator { self.rows.iter().filter(|row| row.is_write()) } @@ -394,15 +402,15 @@ impl RowGroup { } /// Find the most recent entry for `id` - fn prev_row_mut(&mut self, id: &Word) -> Option<&mut EntityMod> { + fn prev_row_mut(&mut self, id: &Word) -> Option<&mut EntityModification> { self.rows.iter_mut().rfind(|emod| emod.id() == id) } /// Append `row` to `self.rows` by combining it with a previously /// existing row, if that is possible - fn append_row(&mut self, row: EntityMod) -> Result<(), StoreError> { + fn append_row(&mut self, row: EntityModification) -> Result<(), StoreError> { if let Some(prev_row) = self.prev_row_mut(row.id()) { - use EntityMod::*; + use EntityModification::*; if row.block() <= prev_row.block() { return Err(constraint_violation!( @@ -476,7 +484,7 @@ impl RowGroup { struct ClampsByBlockIterator<'a> { position: usize, - rows: &'a [EntityMod], + rows: &'a [EntityModification], } impl<'a> ClampsByBlockIterator<'a> { @@ -489,7 +497,7 @@ impl<'a> ClampsByBlockIterator<'a> { } impl<'a> Iterator for ClampsByBlockIterator<'a> { - type Item = (BlockNumber, &'a [EntityMod]); + type Item = (BlockNumber, &'a [EntityModification]); fn next(&mut self) -> Option { // Make sure we start on a clamp @@ -767,13 +775,14 @@ impl CacheWeight for RowGroup { } } -impl CacheWeight for EntityMod { +impl CacheWeight for EntityModification { fn indirect_weight(&self) -> usize { match self { - EntityMod::Insert { key, data, .. } | EntityMod::Overwrite { key, data, .. } => { + EntityModification::Insert { key, data, .. } + | EntityModification::Overwrite { key, data, .. } => { key.indirect_weight() + data.indirect_weight() } - EntityMod::Remove { key, .. } => key.indirect_weight(), + EntityModification::Remove { key, .. } => key.indirect_weight(), } } } @@ -886,7 +895,8 @@ impl<'a> Iterator for WriteChunkIter<'a> { mod test { use crate::{ components::store::{ - write::EntityMod, write::EntityOp, BlockNumber, EntityKey, EntityType, StoreError, + write::EntityModification, write::EntityOp, BlockNumber, EntityKey, EntityType, + StoreError, }, entity, prelude::DeploymentHash, @@ -903,7 +913,7 @@ mod test { let rows = values .iter() .zip(blocks.iter()) - .map(|(value, block)| EntityMod::Remove { + .map(|(value, block)| EntityModification::Remove { key: EntityKey::data("RowGroup".to_string(), value.to_string()), block: *block, }) @@ -964,33 +974,33 @@ mod test { OvwC(BlockNumber, BlockNumber), } - impl From<&Mod> for EntityMod { + impl From<&Mod> for EntityModification { fn from(value: &Mod) -> Self { use Mod::*; let value = value.clone(); let key = EntityKey::data("Thing", "one"); match value { - Ins(block) => EntityMod::Insert { + Ins(block) => EntityModification::Insert { key, data: entity! { SCHEMA => id: "one", count: block }, block, end: None, }, - Ovw(block) => EntityMod::Overwrite { + Ovw(block) => EntityModification::Overwrite { key, data: entity! { SCHEMA => id: "one", count: block }, block, end: None, }, - Rem(block) => EntityMod::Remove { key, block }, - InsC(block, end) => EntityMod::Insert { + Rem(block) => EntityModification::Remove { key, block }, + InsC(block, end) => EntityModification::Insert { key, data: entity! { SCHEMA => id: "one", count: block }, block, end: Some(end), }, - OvwC(block, end) => EntityMod::Overwrite { + OvwC(block, end) => EntityModification::Overwrite { key, data: entity! { SCHEMA => id: "one", count: block }, block, @@ -1015,7 +1025,7 @@ mod test { fn append(&mut self, mods: &[Mod]) -> Result<(), StoreError> { for m in mods { - self.group.append_row(EntityMod::from(m))? + self.group.append_row(EntityModification::from(m))? } Ok(()) } @@ -1029,7 +1039,7 @@ mod test { impl PartialEq<&[Mod]> for Group { fn eq(&self, mods: &&[Mod]) -> bool { - let mods: Vec<_> = mods.iter().map(|m| EntityMod::from(m)).collect(); + let mods: Vec<_> = mods.iter().map(|m| EntityModification::from(m)).collect(); self.group.rows == mods } } diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 5acfd460984..ad123b7100c 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -124,13 +124,13 @@ pub mod prelude { pub use crate::components::server::query::GraphQLServer; pub use crate::components::server::subscription::SubscriptionServer; pub use crate::components::store::{ - AttributeNames, BlockNumber, CachedEthereumCall, ChainStore, Child, ChildMultiplicity, - EntityCache, EntityChange, EntityChangeOperation, EntityCollection, EntityFilter, - EntityLink, EntityModification, EntityOperation, EntityOrder, EntityOrderByChild, - EntityOrderByChildInfo, EntityQuery, EntityRange, EntityWindow, EthereumCallCache, - ParentLink, PartialBlockPtr, PoolWaitStats, QueryStore, QueryStoreManager, StoreError, - StoreEvent, StoreEventStream, StoreEventStreamBox, SubgraphStore, UnfailOutcome, - WindowAttribute, BLOCK_NUMBER_MAX, + write::EntityModification, AttributeNames, BlockNumber, CachedEthereumCall, ChainStore, + Child, ChildMultiplicity, EntityCache, EntityChange, EntityChangeOperation, + EntityCollection, EntityFilter, EntityLink, EntityOperation, EntityOrder, + EntityOrderByChild, EntityOrderByChildInfo, EntityQuery, EntityRange, EntityWindow, + EthereumCallCache, ParentLink, PartialBlockPtr, PoolWaitStats, QueryStore, + QueryStoreManager, StoreError, StoreEvent, StoreEventStream, StoreEventStreamBox, + SubgraphStore, UnfailOutcome, WindowAttribute, BLOCK_NUMBER_MAX, }; pub use crate::components::subgraph::{ BlockState, DataSourceTemplateInfo, HostMetrics, RuntimeHost, RuntimeHostBuilder, From a75b9cbaffca6a58ead1de24467f04f04cf3eac6 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 15:51:10 +0100 Subject: [PATCH 0260/2104] all: Rename EntityModification::entity_ref() to key() --- core/src/subgraph/runner.rs | 2 +- graph/src/components/store/write.rs | 5 ++--- runtime/test/src/test.rs | 7 +------ store/test-store/tests/graph/entity_cache.rs | 2 +- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 6af6f069d12..56a6186340e 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -452,7 +452,7 @@ where // If a deterministic error has happened, make the PoI to be the only entity that'll be stored. if has_errors && !is_non_fatal_errors_active { let is_poi_entity = - |entity_mod: &EntityModification| entity_mod.entity_ref().entity_type.is_poi(); + |entity_mod: &EntityModification| entity_mod.key().entity_type.is_poi(); mods.retain(is_poi_entity); // Confidence check assert!( diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 62e26bdbf21..3b7441d5b86 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -283,7 +283,7 @@ impl EntityModification { EntityModification::Remove { key, block } } - pub fn entity_ref(&self) -> &EntityKey { + pub fn key(&self) -> &EntityKey { use EntityModification::*; match self { Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, @@ -649,8 +649,7 @@ impl Batch { let mut mods = RowGroups::new(); for m in raw_mods { - mods.group_entry(&m.entity_ref().entity_type) - .push(m, block)?; + mods.group_entry(&m.key().entity_type).push(m, block)?; } let data_sources = DataSources::new(block_ptr.cheap_clone(), data_sources); diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 91b12e870e8..751266f3416 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -489,12 +489,7 @@ async fn run_ipfs_map( .modifications; // Bring the modifications into a predictable order (by entity_id) - mods.sort_by(|a, b| { - a.entity_ref() - .entity_id - .partial_cmp(&b.entity_ref().entity_id) - .unwrap() - }); + mods.sort_by(|a, b| a.key().entity_id.partial_cmp(&b.key().entity_id).unwrap()); Ok(mods) }) .join() diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 76f46703b05..75a591a556d 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -183,7 +183,7 @@ fn make_band_key(id: &'static str) -> EntityKey { } fn sort_by_entity_key(mut mods: Vec) -> Vec { - mods.sort_by_key(|m| m.entity_ref().clone()); + mods.sort_by_key(|m| m.key().clone()); mods } From b76b1701448f0d88867783f05b65dec338c5a581 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 15:51:34 +0100 Subject: [PATCH 0261/2104] graph: Remove unused EntityModification::is_remove() and entity() --- graph/src/components/store/write.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 3b7441d5b86..5f8e9f5c178 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -289,21 +289,6 @@ impl EntityModification { Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, } } - - pub fn entity(&self) -> Option<&Entity> { - match self { - EntityModification::Insert { data, .. } - | EntityModification::Overwrite { data, .. } => Some(data), - EntityModification::Remove { .. } => None, - } - } - - pub fn is_remove(&self) -> bool { - match self { - EntityModification::Remove { .. } => true, - _ => false, - } - } } /// A list of entity changes grouped by the entity type From 8eb46acf9f5a05dbabca21312455723dc62b4c59 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 15:56:07 +0100 Subject: [PATCH 0262/2104] graph: Move EntityOp to EntityCache That's the only placed where it is needed --- graph/src/components/store/entity_cache.rs | 42 ++++++++++++++++++++- graph/src/components/store/mod.rs | 43 +--------------------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 1db072fb828..0ce8eb4f5d3 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -6,10 +6,11 @@ use std::sync::Arc; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; -use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; +use crate::components::store::{self as s, Entity, EntityKey, EntityOperation}; use crate::data::store::IntoEntityIterator; use crate::prelude::ENV_VARS; use crate::schema::InputSchema; +use crate::util::intern::Error as InternError; use crate::util::lfu_cache::{EvictStats, LfuCache}; use super::{BlockNumber, DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; @@ -22,6 +23,45 @@ pub enum GetScope { InBlock, } +/// A representation of entity operations that can be accumulated. +#[derive(Debug, Clone)] +enum EntityOp { + Remove, + Update(Entity), + Overwrite(Entity), +} + +impl EntityOp { + fn apply_to(self, entity: &mut Option>) -> Result<(), InternError> { + use EntityOp::*; + match (self, entity) { + (Remove, e @ _) => *e = None, + (Overwrite(new), e @ _) | (Update(new), e @ None) => *e = Some(Cow::Owned(new)), + (Update(updates), Some(entity)) => entity.to_mut().merge_remove_null_fields(updates)?, + } + Ok(()) + } + + fn accumulate(&mut self, next: EntityOp) { + use EntityOp::*; + let update = match next { + // Remove and Overwrite ignore the current value. + Remove | Overwrite(_) => { + *self = next; + return; + } + Update(update) => update, + }; + + // We have an update, apply it. + match self { + // This is how `Overwrite` is constructed, by accumulating `Update` onto `Remove`. + Remove => *self = Overwrite(update), + Update(current) | Overwrite(current) => current.merge(update), + } + } +} + /// A cache for entities from the store that provides the basic functionality /// needed for the store interactions in the host exports. This struct tracks /// how entities are modified, and caches all entities looked up from the diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index fcbb0e53b7a..f22f79beaa9 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -16,7 +16,7 @@ use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; use graphql_parser::schema as s; use serde::{Deserialize, Serialize}; -use std::borrow::{Borrow, Cow}; +use std::borrow::Borrow; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::fmt::Display; @@ -32,7 +32,7 @@ use crate::data::store::*; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::schema::InputSchema; -use crate::util::intern::{self, Error as InternError}; +use crate::util::intern; use crate::{constraint_violation, prelude::*}; /// The type name of an entity. This is the string that is used in the @@ -1003,45 +1003,6 @@ impl Display for DeploymentLocator { // connection checkouts pub type PoolWaitStats = Arc>; -/// A representation of entity operations that can be accumulated. -#[derive(Debug, Clone)] -enum EntityOp { - Remove, - Update(Entity), - Overwrite(Entity), -} - -impl EntityOp { - fn apply_to(self, entity: &mut Option>) -> Result<(), InternError> { - use EntityOp::*; - match (self, entity) { - (Remove, e @ _) => *e = None, - (Overwrite(new), e @ _) | (Update(new), e @ None) => *e = Some(Cow::Owned(new)), - (Update(updates), Some(entity)) => entity.to_mut().merge_remove_null_fields(updates)?, - } - Ok(()) - } - - fn accumulate(&mut self, next: EntityOp) { - use EntityOp::*; - let update = match next { - // Remove and Overwrite ignore the current value. - Remove | Overwrite(_) => { - *self = next; - return; - } - Update(update) => update, - }; - - // We have an update, apply it. - match self { - // This is how `Overwrite` is constructed, by accumulating `Update` onto `Remove`. - Remove => *self = Overwrite(update), - Update(current) | Overwrite(current) => current.merge(update), - } - } -} - /// Determines which columns should be selected in a table. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum AttributeNames { From ed1472c02c2daef5b759ea607bebd251188d977a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 16:08:59 +0100 Subject: [PATCH 0263/2104] graph, store: Simplify adding modifications for immutable entities --- graph/src/components/store/write.rs | 49 +++++++++++++------ store/postgres/src/writable.rs | 1 + .../tests/postgres/relational_bytes.rs | 6 +-- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 5f8e9f5c178..dd9746545c6 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -1,5 +1,5 @@ //! Data structures and helpers for writing subgraph changes to the store -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use crate::{ blockchain::{block_stream::FirehoseCursor, BlockPtr}, @@ -9,6 +9,7 @@ use crate::{ data::{subgraph::schema::SubgraphError, value::Word}, data_source::CausalityRegion, prelude::DeploymentHash, + schema::InputSchema, util::cache_weight::CacheWeight, }; @@ -144,13 +145,6 @@ impl EntityModification { } } - fn key(&self) -> &EntityKey { - use EntityModification::*; - match self { - Insert { key, .. } | Overwrite { key, .. } | Remove { key, .. } => key, - } - } - fn entity_count_change(&self) -> i32 { match self { EntityModification::Insert { end: None, .. } => 1, @@ -299,13 +293,16 @@ pub struct RowGroup { /// then `rows[i].block() <= rows[j].block()`. Several methods on this /// struct rely on the fact that this ordering is observed. rows: Vec, + + immutable: bool, } impl RowGroup { - pub fn new(entity_type: EntityType) -> Self { + pub fn new(entity_type: EntityType, immutable: bool) -> Self { Self { entity_type, rows: Vec::new(), + immutable, } } @@ -394,6 +391,22 @@ impl RowGroup { /// Append `row` to `self.rows` by combining it with a previously /// existing row, if that is possible fn append_row(&mut self, row: EntityModification) -> Result<(), StoreError> { + if self.immutable { + match row { + EntityModification::Insert { .. } => { + self.rows.push(row); + } + EntityModification::Overwrite { .. } | EntityModification::Remove { .. } => { + return Err(constraint_violation!( + "immutable entity type {} only allows inserts, not {:?}", + self.entity_type, + row + )); + } + } + return Ok(()); + } + if let Some(prev_row) = self.prev_row_mut(row.id()) { use EntityModification::*; @@ -510,12 +523,16 @@ impl<'a> Iterator for ClampsByBlockIterator<'a> { /// A list of entity changes with one group per entity type #[derive(Debug)] pub struct RowGroups { + schema: Arc, pub groups: Vec, } impl RowGroups { - fn new() -> Self { - Self { groups: Vec::new() } + fn new(schema: Arc) -> Self { + Self { + schema, + groups: Vec::new(), + } } fn group(&self, entity_type: &EntityType) -> Option<&RowGroup> { @@ -534,7 +551,9 @@ impl RowGroups { match pos { Some(pos) => &mut self.groups[pos], None => { - self.groups.push(RowGroup::new(entity_type.clone())); + let immutable = self.schema.is_immutable(entity_type); + self.groups + .push(RowGroup::new(entity_type.clone(), immutable)); // unwrap: we just pushed an entry self.groups.last_mut().unwrap() } @@ -612,6 +631,7 @@ pub struct Batch { impl Batch { pub fn new( + schema: Arc, block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, mut raw_mods: Vec, @@ -631,7 +651,7 @@ impl Batch { EntityModification::Remove { .. } => 0, }); - let mut mods = RowGroups::new(); + let mut mods = RowGroups::new(schema); for m in raw_mods { mods.group_entry(&m.key().entity_type).push(m, block)?; @@ -905,6 +925,7 @@ mod test { let group = RowGroup { entity_type: EntityType::new("Entry".to_string()), rows, + immutable: false, }; let act = group .clamps_by_block() @@ -1003,7 +1024,7 @@ mod test { impl Group { fn new() -> Self { Self { - group: RowGroup::new(ENTITY_TYPE.clone()), + group: RowGroup::new(ENTITY_TYPE.clone(), false), } } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index b29173206db..6cc644f950f 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1497,6 +1497,7 @@ impl WritableStoreTrait for WritableStore { processed_data_sources: Vec, ) -> Result<(), StoreError> { let batch = Batch::new( + self.store.input_schema.cheap_clone(), block_ptr_to.clone(), firehose_cursor.clone(), mods, diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 52dd14dbc96..554abbd591b 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -84,7 +84,7 @@ pub fn row_group_update( block: BlockNumber, data: impl IntoIterator, ) -> RowGroup { - let mut group = RowGroup::new(entity_type.clone()); + let mut group = RowGroup::new(entity_type.clone(), false); for (key, data) in data { group .push(EntityModification::overwrite(key, data, block), block) @@ -98,7 +98,7 @@ pub fn row_group_insert( block: BlockNumber, data: impl IntoIterator, ) -> RowGroup { - let mut group = RowGroup::new(entity_type.clone()); + let mut group = RowGroup::new(entity_type.clone(), false); for (key, data) in data { group .push(EntityModification::insert(key, data, block), block) @@ -112,7 +112,7 @@ pub fn row_group_delete( block: BlockNumber, data: impl IntoIterator, ) -> RowGroup { - let mut group = RowGroup::new(entity_type.clone()); + let mut group = RowGroup::new(entity_type.clone(), false); for key in data { group .push(EntityModification::remove(key, block), block) From 6120a53319fce0e25827f5b7ab1052baa4d12def Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 13 May 2023 16:11:26 +0100 Subject: [PATCH 0264/2104] graph: Reserve extra capacity upfront when appending RowGroups --- graph/src/components/store/write.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index dd9746545c6..b14035272bb 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -468,6 +468,7 @@ impl RowGroup { )); } + self.rows.reserve(group.rows.len()); for row in group.rows { self.append_row(row)?; } From 08da7cb46ddc8c09f448c5ea4b210c9021ea05ad Mon Sep 17 00:00:00 2001 From: Saihajpreet Singh Date: Fri, 26 May 2023 13:43:23 -0400 Subject: [PATCH 0265/2104] docs: update links for graph tooling repo (#4303) docs: update links for graph tooling repo dependabot: weekly yarn.lock upgrade rebuild lockfile update lockfiles fixes --- .github/dependabot.yml | 12 + .github/workflows/ci.yml | 4 +- docker/README.md | 2 +- docs/getting-started.md | 12 +- docs/implementation/add-chain.md | 12 +- docs/subgraph-manifest.md | 2 +- runtime/test/README.md | 2 +- .../api-version-v0-0-4/package.json | 1 + .../ganache-reverts/package.json | 4 +- .../ganache-reverts/subgraph.yaml | 4 +- .../host-exports/package.json | 4 +- .../host-exports/src/mapping.ts | 6 +- .../host-exports/subgraph.yaml | 4 +- .../non-fatal-errors/package.json | 4 +- .../non-fatal-errors/subgraph.yaml | 8 +- .../package.json | 4 +- .../subgraph.yaml | 4 +- .../poi-for-failed-subgraph/package.json | 4 +- .../poi-for-failed-subgraph/subgraph.yaml | 4 +- .../remove-then-update/package.json | 4 +- .../remove-then-update/subgraph.yaml | 4 +- .../value-roundtrip/package.json | 4 +- .../value-roundtrip/subgraph.yaml | 4 +- tests/integration-tests/yarn.lock | 1442 +++++++++-- .../data-source-revert/grafted.yaml | 2 +- .../data-source-revert/package.json | 4 +- .../data-source-revert2/package.json | 4 +- .../dynamic-data-source/package.json | 4 +- tests/runner-tests/fatal-error/package.json | 4 +- .../file-data-sources/package.json | 4 +- tests/runner-tests/typename/package.json | 4 +- tests/runner-tests/yarn.lock | 2160 ++++++++++------- tests/tests/runner_tests.rs | 6 +- 33 files changed, 2655 insertions(+), 1092 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 44043c3072e..977a3b8fc50 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,5 +1,17 @@ version: 2 updates: + +- package-ecosystem: npm + directory: tests/integration-tests + schedule: + interval: weekly + open-pull-requests-limit: 10 + allow: + # We always want to test against the latest Graph CLI tooling: `graph-cli`, + # `graph-ts`. + - dependency-name: "@graphprotocol/graph-*" + versioning-strategy: lockfile-only + - package-ecosystem: cargo directory: "/" schedule: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0847d9d898..08d5d3c4295 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,10 +102,10 @@ jobs: uses: actions/checkout@v2 - uses: Swatinem/rust-cache@v2 - - name: Install Node 14 + - name: Install Node 16 uses: actions/setup-node@v3 with: - node-version: "14" + node-version: "16" cache: yarn cache-dependency-path: "tests/integration-tests/yarn.lock" diff --git a/docker/README.md b/docker/README.md index 326a3535e9f..af95f4bbd52 100644 --- a/docker/README.md +++ b/docker/README.md @@ -59,7 +59,7 @@ can access these via: - `postgresql://graph-node:let-me-in@localhost:5432/graph-node` Once this is up and running, you can use -[`graph-cli`](https://github.com/graphprotocol/graph-cli) to create and +[`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) to create and deploy your subgraph to the running Graph Node. ### Running Graph Node on an Macbook M1 diff --git a/docs/getting-started.md b/docs/getting-started.md index b504f357bdb..13c2c8786d4 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -43,7 +43,7 @@ The high-level dataflow for a dApp using The Graph is as follows: ### 0.3 What's Needed to Build a Graph Node? Three repositories are relevant to building on The Graph: 1. [Graph Node](../README.md) – A server implementation for indexing, caching, and serving queries against data from Ethereum. -2. [Graph CLI](https://github.com/graphprotocol/graph-cli) – A CLI for building and compiling projects that are deployed to the Graph Node. +2. [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) – A CLI for building and compiling projects that are deployed to the Graph Node. 3. [Graph TypeScript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) – TypeScript/AssemblyScript library for writing subgraph mappings to be deployed to The Graph. ### 0.4 Getting Started Overview @@ -182,9 +182,9 @@ schema: ``` ### 1.3 Create a Subgraph Project and Generate Types -Once you have the `subgraph.yaml` manifest and the `./schema.graphql` file, you are ready to use the Graph CLI to set up the subgraph directory. The Graph CLI is a command-line tool that contains helpful commands for deploying the subgraphs. Before continuing with this guide, please go to the [Graph CLI README](https://github.com/graphprotocol/graph-cli/) and follow the instructions up to Step 7 for setting up the subgraph directory. +Once you have the `subgraph.yaml` manifest and the `./schema.graphql` file, you are ready to use the Graph CLI to set up the subgraph directory. The Graph CLI is a command-line tool that contains helpful commands for deploying the subgraphs. Before continuing with this guide, please go to the [Graph CLI README](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) and follow the instructions up to Step 7 for setting up the subgraph directory. -Once you run `yarn codegen` as outlined in the [Graph CLI README](https://github.com/graphprotocol/graph-cli/), you are ready to create the mappings. +Once you run `yarn codegen` as outlined in the [Graph CLI README](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), you are ready to create the mappings. `yarn codegen` looks at the contract ABIs defined in the subgraph manifest and generates TypeScript classes for the smart contracts the mappings script will interface with, which includes the types of public methods and events. In reality, the classes are AssemblyScript but more on that later. @@ -228,7 +228,7 @@ export function handleTransfer(event: Transfer): void { ``` A few things to note from this code: * We create a new entity named `token`, which is stored in the Graph Node database. -* We create an ID for that token, which must be unique, and then create an entity with `new Token(tokenID)`. We get the token ID from the event emitted by Ethereum, which was turned into an AssemblyScript type by the [Graph TypeScript Library](https://github.com/graphprotocol/graph-ts). We access it at `event.params.tokenId`. Note that you must set `ID` as a string and call `toHex()` on the `tokenID` to turn it into a hex string. +* We create an ID for that token, which must be unique, and then create an entity with `new Token(tokenID)`. We get the token ID from the event emitted by Ethereum, which was turned into an AssemblyScript type by the [Graph TypeScript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts). We access it at `event.params.tokenId`. Note that you must set `ID` as a string and call `toHex()` on the `tokenID` to turn it into a hex string. * This entity is updated by the `Transfer` event emitted by the ERC721 contract. * The current owner is gathered from the event with `event.params.to`. It is set as an Address by the Token class. * Event handlers functions always return `void`. @@ -252,7 +252,7 @@ The definition for `.load()` is: entity.load() // Entity is representative of the entity type being updated. In our example above, it is Token. ``` -Once again, all these functions come from the [Graph TypeScript Library](https://github.com/graphprotocol/graph-ts). +Once again, all these functions come from the [Graph TypeScript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts). Let's look at the ERC721 token as an example for using `token.load()`. Above, we showed how to use `token.save()`. Now, let's consider that you have another event handler that needs to retrieve the currentOwner of an ERC721 token. To do this within an event handler, you would write the following: @@ -407,7 +407,7 @@ If you want to sync using a public testnet such as Kovan, Rinkeby, or Ropsten, j When you deploy the subgraph to the Graph Node, it will start ingesting all the subgraph events from the blockchain, transforming that data with the subgraph mappings and storing it in the Graph Node. Note that a running subgraph can safely be stopped and restarted, picking up where it left off. -Now that the infrastructure is set up, you can run `yarn create-subgraph` and then `yarn deploy` in the subgraph directory. These commands should have been added to `package.json` in section 1.3 when we took a moment to go through the set up for [Graph CLI documentation](https://github.com/graphprotocol/graph-cli). This builds the subgraph and creates the WASM files in the `dist/` folder. Next, it uploads the `dist/ +Now that the infrastructure is set up, you can run `yarn create-subgraph` and then `yarn deploy` in the subgraph directory. These commands should have been added to `package.json` in section 1.3 when we took a moment to go through the set up for [Graph CLI documentation](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). This builds the subgraph and creates the WASM files in the `dist/` folder. Next, it uploads the `dist/ ` files to IPFS and deploys it to the Graph Node. The subgraph is now fully running. The `watch` flag allows the subgraph to continually restart every time you save an update to the `manifest`, `schema`, or `mappings`. If you are making many edits or have a subgraph that has been syncing for a few hours, leave this flag off. diff --git a/docs/implementation/add-chain.md b/docs/implementation/add-chain.md index eea61687910..f4af3371f25 100644 --- a/docs/implementation/add-chain.md +++ b/docs/implementation/add-chain.md @@ -256,22 +256,22 @@ Just like in the `server` crate, you'll just have to handle the new `BlockchainK ## What else? -Besides making `graph-node` support the new chain, [graph-cli](https://github.com/graphprotocol/graph-cli) and [graph-ts](https://github.com/graphprotocol/graph-ts) should also include the new types and enable the new functionality so that subgraph developers can use it. +Besides making `graph-node` support the new chain, [graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) and [graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) should also include the new types and enable the new functionality so that subgraph developers can use it. For now this document doesn't include how to do that integration, here are a few PRs that might help you with that: - NEAR - `graph-cli` - - https://github.com/graphprotocol/graph-cli/pull/760 - - https://github.com/graphprotocol/graph-cli/pull/783 + - https://github.com/graphprotocol/graph-tooling/pull/760 + - https://github.com/graphprotocol/graph-tooling/pull/783 - `graph-ts` - https://github.com/graphprotocol/graph-ts/pull/210 - https://github.com/graphprotocol/graph-ts/pull/217 - Cosmos - `graph-cli` - - https://github.com/graphprotocol/graph-cli/pull/827 - - https://github.com/graphprotocol/graph-cli/pull/851 - - https://github.com/graphprotocol/graph-cli/pull/888 + - https://github.com/graphprotocol/graph-tooling/pull/827 + - https://github.com/graphprotocol/graph-tooling/pull/851 + - https://github.com/graphprotocol/graph-toolingpull/888 - `graph-ts` - https://github.com/graphprotocol/graph-ts/pull/250 - https://github.com/graphprotocol/graph-ts/pull/273 diff --git a/docs/subgraph-manifest.md b/docs/subgraph-manifest.md index 14b47b059dc..e595690e9f3 100644 --- a/docs/subgraph-manifest.md +++ b/docs/subgraph-manifest.md @@ -34,7 +34,7 @@ Any data format that has a well-defined 1:1 mapping with the [IPLD Canonical For | --- | --- | --- | | **kind** | *String | The type of data source. Possible values: *ethereum/contract*.| | **name** | *String* | The name of the source data. Will be used to generate APIs in the mapping and also for self-documentation purposes. | -| **network** | *String* | For blockchains, this describes which network the subgraph targets. For Ethereum, this can be any of "mainnet", "rinkeby", "kovan", "ropsten", "goerli", "poa-core", "poa-sokol", "xdai", "matic", "mumbai", "fantom", "bsc" or "clover". Developers could look for an up to date list in the graph-cli [*code*](https://github.com/graphprotocol/graph-cli/blob/main/packages/cli/src/protocols/index.js#L70-L107).| +| **network** | *String* | For blockchains, this describes which network the subgraph targets. For Ethereum, this can be any of "mainnet", "rinkeby", "kovan", "ropsten", "goerli", "poa-core", "poa-sokol", "xdai", "matic", "mumbai", "fantom", "bsc" or "clover". Developers could look for an up to date list in the graph-cli [*code*](https://github.com/graphprotocol/graph-tooling/blob/main/packages/cli/src/protocols/index.ts#L76-L117).| | **source** | [*EthereumContractSource*](#151-ethereumcontractsource) | The source data on a blockchain such as Ethereum. | | **mapping** | [*Mapping*](#152-mapping) | The transformation logic applied to the data prior to being indexed. | diff --git a/runtime/test/README.md b/runtime/test/README.md index c55561780f3..7beeb342351 100644 --- a/runtime/test/README.md +++ b/runtime/test/README.md @@ -1,6 +1,6 @@ # Runtime tests -These are the unit tests that check if the WASM runtime code is working. For now we only run code compiled from the [`AssemblyScript`](https://www.assemblyscript.org/) language, which is done by [`asc`](https://github.com/AssemblyScript/assemblyscript) (the AssemblyScript Compiler) in our [`CLI`](https://github.com/graphprotocol/graph-cli). +These are the unit tests that check if the WASM runtime code is working. For now we only run code compiled from the [`AssemblyScript`](https://www.assemblyscript.org/) language, which is done by [`asc`](https://github.com/AssemblyScript/assemblyscript) (the AssemblyScript Compiler) in our [`CLI`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). We support two versions of their compiler/language for now: diff --git a/tests/integration-tests/api-version-v0-0-4/package.json b/tests/integration-tests/api-version-v0-0-4/package.json index 4fae4144853..7a8849872b7 100644 --- a/tests/integration-tests/api-version-v0-0-4/package.json +++ b/tests/integration-tests/api-version-v0-0-4/package.json @@ -8,6 +8,7 @@ "create:test": "graph create test/api-version-v0-0-4 --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/api-version-v0-0-4 --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, + "note": "Do not update the dependencies below - we want to make sure it's backward comaptible, so we are using an old CLI version on purpose.", "devDependencies": { "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#v0.21.1", "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#v0.21.1", diff --git a/tests/integration-tests/ganache-reverts/package.json b/tests/integration-tests/ganache-reverts/package.json index fdc5f9119d5..94a4704fcc7 100644 --- a/tests/integration-tests/ganache-reverts/package.json +++ b/tests/integration-tests/ganache-reverts/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/ganache-reverts --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/ganache-reverts/subgraph.yaml b/tests/integration-tests/ganache-reverts/subgraph.yaml index 2b3cbfa922f..ff808ecfc0a 100644 --- a/tests/integration-tests/ganache-reverts/subgraph.yaml +++ b/tests/integration-tests/ganache-reverts/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/host-exports/package.json b/tests/integration-tests/host-exports/package.json index 038d6f2cac2..68cb9ac40cc 100644 --- a/tests/integration-tests/host-exports/package.json +++ b/tests/integration-tests/host-exports/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/host-exports --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/host-exports/src/mapping.ts b/tests/integration-tests/host-exports/src/mapping.ts index 1e02d849132..65587293870 100644 --- a/tests/integration-tests/host-exports/src/mapping.ts +++ b/tests/integration-tests/host-exports/src/mapping.ts @@ -62,10 +62,10 @@ function testBigDecimal(): void { assert((bigInt & BigInt.fromI32(42)) == BigInt.fromI32(40)); // Test big int left shift - assert(bigInt << 6 == BigInt.fromString("568888888888888832")); + assert(bigInt.leftShift(6) == BigInt.fromString("568888888888888832")); // Test big int right shift - assert(bigInt >> 6 == BigInt.fromString("138888888888888")); + assert(bigInt.rightShift(6) == BigInt.fromString("138888888888888")); } function testEthereumAbi(): void { @@ -121,4 +121,4 @@ function ethereumAbiComplexCase(): void { assert(bigInt1.toBigInt() == decodedBigInt1, "uint256[0] ethereum encoded does not equal the decoded value"); assert(bigInt2.toBigInt() == decodedBigInt2, "uint256[1] ethereum encoded does not equal the decoded value"); assert(bool.toBoolean() == decodedBool, "boolean ethereum encoded does not equal the decoded value"); -} +} \ No newline at end of file diff --git a/tests/integration-tests/host-exports/subgraph.yaml b/tests/integration-tests/host-exports/subgraph.yaml index 09e3122864c..0ab4801ce44 100644 --- a/tests/integration-tests/host-exports/subgraph.yaml +++ b/tests/integration-tests/host-exports/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/non-fatal-errors/package.json b/tests/integration-tests/non-fatal-errors/package.json index 162a4dbd213..431b77a04c8 100644 --- a/tests/integration-tests/non-fatal-errors/package.json +++ b/tests/integration-tests/non-fatal-errors/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/non-fatal-errors --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/non-fatal-errors/subgraph.yaml b/tests/integration-tests/non-fatal-errors/subgraph.yaml index 1039ea79ae3..8a5e91c23ed 100644 --- a/tests/integration-tests/non-fatal-errors/subgraph.yaml +++ b/tests/integration-tests/non-fatal-errors/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql features: @@ -12,7 +12,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract @@ -30,7 +30,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract @@ -48,7 +48,7 @@ templates: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/overloaded-contract-functions/package.json b/tests/integration-tests/overloaded-contract-functions/package.json index ac6c59b16db..4d1bf025013 100644 --- a/tests/integration-tests/overloaded-contract-functions/package.json +++ b/tests/integration-tests/overloaded-contract-functions/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/overloaded-contract-functions --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/overloaded-contract-functions/subgraph.yaml b/tests/integration-tests/overloaded-contract-functions/subgraph.yaml index 2b3cbfa922f..ff808ecfc0a 100644 --- a/tests/integration-tests/overloaded-contract-functions/subgraph.yaml +++ b/tests/integration-tests/overloaded-contract-functions/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/poi-for-failed-subgraph/package.json b/tests/integration-tests/poi-for-failed-subgraph/package.json index 665bf0ace52..524d2c2df25 100644 --- a/tests/integration-tests/poi-for-failed-subgraph/package.json +++ b/tests/integration-tests/poi-for-failed-subgraph/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/poi-for-failed-subgraph --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/poi-for-failed-subgraph/subgraph.yaml b/tests/integration-tests/poi-for-failed-subgraph/subgraph.yaml index 09e3122864c..0ab4801ce44 100644 --- a/tests/integration-tests/poi-for-failed-subgraph/subgraph.yaml +++ b/tests/integration-tests/poi-for-failed-subgraph/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/remove-then-update/package.json b/tests/integration-tests/remove-then-update/package.json index 91eea8a5417..3c5dde77937 100644 --- a/tests/integration-tests/remove-then-update/package.json +++ b/tests/integration-tests/remove-then-update/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/remove-then-update --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/remove-then-update/subgraph.yaml b/tests/integration-tests/remove-then-update/subgraph.yaml index 09e3122864c..0ab4801ce44 100644 --- a/tests/integration-tests/remove-then-update/subgraph.yaml +++ b/tests/integration-tests/remove-then-update/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/value-roundtrip/package.json b/tests/integration-tests/value-roundtrip/package.json index cf177d5c862..ceea4439afd 100644 --- a/tests/integration-tests/value-roundtrip/package.json +++ b/tests/integration-tests/value-roundtrip/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/value-roundtrip --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/integration-tests/value-roundtrip/subgraph.yaml b/tests/integration-tests/value-roundtrip/subgraph.yaml index 09e3122864c..0ab4801ce44 100644 --- a/tests/integration-tests/value-roundtrip/subgraph.yaml +++ b/tests/integration-tests/value-roundtrip/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,7 +10,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract diff --git a/tests/integration-tests/yarn.lock b/tests/integration-tests/yarn.lock index bf8ff17f87e..0f10ad49261 100644 --- a/tests/integration-tests/yarn.lock +++ b/tests/integration-tests/yarn.lock @@ -513,13 +513,6 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/runtime@^7.9.2": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.18.9.tgz#b4fcfce55db3d2e5e080d2490f608a3b9f407f4a" - integrity sha512-lkqXDcvlFT5rvEjiu6+QYO+1GXrEHRo2LOtS7E4GtX5ESIZOgepqsZBVIj6Pv+a6zqsya9VCgiK1KAK4BvJDAw== - dependencies: - regenerator-runtime "^0.13.4" - "@babel/template@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.12.13.tgz#530265be8a2589dbb37523844c5bcb55947fb327" @@ -577,6 +570,13 @@ lodash "^4.17.19" to-fast-properties "^2.0.0" +"@cspotcode/source-map-support@^0.8.0": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" + integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== + dependencies: + "@jridgewell/trace-mapping" "0.3.9" + "@ethersproject/abi@5.0.0-beta.153": version "5.0.0-beta.153" resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.0.0-beta.153.tgz#43a37172b33794e4562999f6e2d555b7599a8eee" @@ -861,35 +861,47 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@graphprotocol/graph-cli@https://github.com/graphprotocol/graph-cli#main": - version "0.33.0" - resolved "https://github.com/graphprotocol/graph-cli#47e075a9701680580e0e8e09c5444963224dbf5c" - dependencies: - assemblyscript "0.19.10" +"@float-capital/float-subgraph-uncrashable@^0.0.0-alpha.4": + version "0.0.0-internal-testing.5" + resolved "https://registry.yarnpkg.com/@float-capital/float-subgraph-uncrashable/-/float-subgraph-uncrashable-0.0.0-internal-testing.5.tgz#060f98440f6e410812766c5b040952d2d02e2b73" + integrity sha512-yZ0H5e3EpAYKokX/AbtplzlvSxEJY7ZfpvQyDzyODkks0hakAAlDG6fQu1SlDJMWorY7bbq1j7fCiFeTWci6TA== + dependencies: + "@rescript/std" "9.0.0" + graphql "^16.6.0" + graphql-import-node "^0.0.5" + js-yaml "^4.1.0" + +"@graphprotocol/graph-cli@0.50.0": + version "0.50.0" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.50.0.tgz#1ffef3834cc8376e64a05d6ee327c9d4eb269231" + integrity sha512-Fw46oN06ec1pf//vTPFzmyL0LRD9ed/XXfibQQClyMLfNlYAATZvz930RH3SHb2N4ZLdfKDDkY1SLgtDghtrow== + dependencies: + "@float-capital/float-subgraph-uncrashable" "^0.0.0-alpha.4" + "@oclif/core" "2.8.4" + "@whatwg-node/fetch" "^0.8.4" + assemblyscript "0.19.23" binary-install-raw "0.0.13" chalk "3.0.0" - chokidar "3.5.1" - debug "4.3.1" - docker-compose "0.23.4" + chokidar "3.5.3" + debug "4.3.4" + docker-compose "0.23.19" dockerode "2.5.8" - fs-extra "9.0.0" - glob "7.1.6" - gluegun "https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep" + fs-extra "9.1.0" + glob "9.3.5" + gluegun "5.1.2" graphql "15.5.0" - immutable "3.8.2" - ipfs-http-client "34.0.0" - jayson "3.6.6" - js-yaml "3.13.1" - node-fetch "2.6.0" - pkginfo "0.4.1" + immutable "4.2.1" + ipfs-http-client "55.0.0" + jayson "4.0.0" + js-yaml "3.14.1" prettier "1.19.1" request "2.88.2" - semver "7.3.5" + semver "7.4.0" sync-request "6.1.0" - tmp-promise "3.0.2" + tmp-promise "3.0.3" web3-eth-abi "1.7.0" which "2.0.2" - yaml "1.9.2" + yaml "1.10.2" "@graphprotocol/graph-cli@https://github.com/graphprotocol/graph-cli#v0.21.1": version "0.21.1" @@ -916,9 +928,10 @@ tmp-promise "^3.0.2" yaml "^1.5.1" -"@graphprotocol/graph-ts@https://github.com/graphprotocol/graph-ts#main": - version "0.28.1" - resolved "https://github.com/graphprotocol/graph-ts#4e91d2c0b695c7689aba205516d3e80fb5588454" +"@graphprotocol/graph-ts@0.30.0": + version "0.30.0" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-ts/-/graph-ts-0.30.0.tgz#591dee3c7d9fc236ad57ce0712779e94aef9a50a" + integrity sha512-h5tJqlsZXglGYM0PcBsBOqof4PT0Fr4Z3QBTYN/IjMF3VvRX2A8/bdpqaAnva+2N0uAfXXwRcwcOcW5O35yzXw== dependencies: assemblyscript "0.19.10" @@ -1223,11 +1236,52 @@ normalize-path "^2.0.1" through2 "^2.0.3" +"@ipld/dag-cbor@^7.0.0": + version "7.0.3" + resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-7.0.3.tgz#aa31b28afb11a807c3d627828a344e5521ac4a1e" + integrity sha512-1VVh2huHsuohdXC1bGJNE8WR72slZ9XE2T3wbBBq31dm7ZBatmKLLxrB+XAqafxfRFjv08RZmj/W/ZqaM13AuA== + dependencies: + cborg "^1.6.0" + multiformats "^9.5.4" + +"@ipld/dag-json@^8.0.1": + version "8.0.11" + resolved "https://registry.yarnpkg.com/@ipld/dag-json/-/dag-json-8.0.11.tgz#8d30cc2dfacb0aef04d327465d3df91e79e8b6ce" + integrity sha512-Pea7JXeYHTWXRTIhBqBlhw7G53PJ7yta3G/sizGEZyzdeEwhZRr0od5IQ0r2ZxOt1Do+2czddjeEPp+YTxDwCA== + dependencies: + cborg "^1.5.4" + multiformats "^9.5.4" + +"@ipld/dag-pb@^2.1.3": + version "2.1.18" + resolved "https://registry.yarnpkg.com/@ipld/dag-pb/-/dag-pb-2.1.18.tgz#12d63e21580e87c75fd1a2c62e375a78e355c16f" + integrity sha512-ZBnf2fuX9y3KccADURG5vb9FaOeMjFkCrNysB0PtftME/4iCTjxfaLoNq/IAh5fTqUOMXvryN6Jyka4ZGuMLIg== + dependencies: + multiformats "^9.5.4" + "@josephg/resolvable@^1.0.0": version "1.0.1" resolved "https://registry.yarnpkg.com/@josephg/resolvable/-/resolvable-1.0.1.tgz#69bc4db754d79e1a2f17a650d3466e038d94a5eb" integrity sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg== +"@jridgewell/resolve-uri@^3.0.3": + version "3.1.1" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721" + integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== + +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.4.15" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" + integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== + +"@jridgewell/trace-mapping@0.3.9": + version "0.3.9" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" + integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@nodelib/fs.scandir@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" @@ -1249,6 +1303,68 @@ "@nodelib/fs.scandir" "2.1.4" fastq "^1.6.0" +"@oclif/core@2.8.4": + version "2.8.4" + resolved "https://registry.yarnpkg.com/@oclif/core/-/core-2.8.4.tgz#7b453be6d4cd060ff4990bc8e31824a1de308354" + integrity sha512-VlFDhoAJ1RDwcpDF46wAlciWTIryapMUViACttY9GwX6Ci6Lud1awe/pC3k4jad5472XshnPQV4bHAl4a/yxpA== + dependencies: + "@types/cli-progress" "^3.11.0" + ansi-escapes "^4.3.2" + ansi-styles "^4.3.0" + cardinal "^2.1.1" + chalk "^4.1.2" + clean-stack "^3.0.1" + cli-progress "^3.12.0" + debug "^4.3.4" + ejs "^3.1.8" + fs-extra "^9.1.0" + get-package-type "^0.1.0" + globby "^11.1.0" + hyperlinker "^1.0.0" + indent-string "^4.0.0" + is-wsl "^2.2.0" + js-yaml "^3.14.1" + natural-orderby "^2.0.3" + object-treeify "^1.1.33" + password-prompt "^1.1.2" + semver "^7.3.7" + string-width "^4.2.3" + strip-ansi "^6.0.1" + supports-color "^8.1.1" + supports-hyperlinks "^2.2.0" + ts-node "^10.9.1" + tslib "^2.5.0" + widest-line "^3.1.0" + wordwrap "^1.0.0" + wrap-ansi "^7.0.0" + +"@peculiar/asn1-schema@^2.3.6": + version "2.3.6" + resolved "https://registry.yarnpkg.com/@peculiar/asn1-schema/-/asn1-schema-2.3.6.tgz#3dd3c2ade7f702a9a94dfb395c192f5fa5d6b922" + integrity sha512-izNRxPoaeJeg/AyH8hER6s+H7p4itk+03QCa4sbxI3lNdseQYCuxzgsuNK8bTXChtLTjpJz6NmXKA73qLa3rCA== + dependencies: + asn1js "^3.0.5" + pvtsutils "^1.3.2" + tslib "^2.4.0" + +"@peculiar/json-schema@^1.1.12": + version "1.1.12" + resolved "https://registry.yarnpkg.com/@peculiar/json-schema/-/json-schema-1.1.12.tgz#fe61e85259e3b5ba5ad566cb62ca75b3d3cd5339" + integrity sha512-coUfuoMeIB7B8/NMekxaDzLhaYmp0HZNPEjYRm9goRou8UZIC3z21s0sL9AWoCw4EG876QyO3kYrc61WNF9B/w== + dependencies: + tslib "^2.0.0" + +"@peculiar/webcrypto@^1.4.0": + version "1.4.3" + resolved "https://registry.yarnpkg.com/@peculiar/webcrypto/-/webcrypto-1.4.3.tgz#078b3e8f598e847b78683dc3ba65feb5029b93a7" + integrity sha512-VtaY4spKTdN5LjJ04im/d/joXuvLbQdgy5Z4DXF4MFZhQ+MTrejbNMkfZBp1Bs3O5+bFqnJgyGdPuZQflvIa5A== + dependencies: + "@peculiar/asn1-schema" "^2.3.6" + "@peculiar/json-schema" "^1.1.12" + pvtsutils "^1.3.2" + tslib "^2.5.0" + webcrypto-core "^1.7.7" + "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf" @@ -1346,6 +1462,11 @@ resolved "https://registry.yarnpkg.com/@redux-saga/types/-/types-1.1.0.tgz#0e81ce56b4883b4b2a3001ebe1ab298b84237204" integrity sha512-afmTuJrylUU/0OtqzaRkbyYFFNgCF73Bvel/sw90pvGrWIZ+vyoIJqA6eMSoA6+nb443kTmulmBtC9NerXboNg== +"@rescript/std@9.0.0": + version "9.0.0" + resolved "https://registry.yarnpkg.com/@rescript/std/-/std-9.0.0.tgz#df53f3fa5911cb4e85bd66b92e9e58ddf3e4a7e1" + integrity sha512-zGzFsgtZ44mgL4Xef2gOy1hrRVdrs9mcxCOOKZrIPsmbZW14yTkaF591GXxpQvjXiHtgZ/iA9qLyWH6oSReIxQ== + "@sindresorhus/is@^0.14.0": version "0.14.0" resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" @@ -1668,6 +1789,26 @@ xhr "^2.2.0" xtend "^4.0.1" +"@tsconfig/node10@^1.0.7": + version "1.0.9" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2" + integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA== + +"@tsconfig/node12@^1.0.7": + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" + integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== + +"@tsconfig/node14@^1.0.0": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" + integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== + +"@tsconfig/node16@^1.0.2": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" + integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== + "@types/accepts@*", "@types/accepts@^1.3.5": version "1.3.5" resolved "https://registry.yarnpkg.com/@types/accepts/-/accepts-1.3.5.tgz#c34bec115cfc746e04fe5a059df4ce7e7b391575" @@ -1705,6 +1846,13 @@ "@types/connect" "*" "@types/node" "*" +"@types/cli-progress@^3.11.0": + version "3.11.0" + resolved "https://registry.yarnpkg.com/@types/cli-progress/-/cli-progress-3.11.0.tgz#ec79df99b26757c3d1c7170af8422e0fc95eef7e" + integrity sha512-XhXhBv1R/q2ahF3BM7qT5HLzJNlIL0wbcGyZVjqOTqAybAnsLisd7gy1UCyIqpL+5Iv6XhlSyzjLCnI2sIdbCg== + dependencies: + "@types/node" "*" + "@types/concat-stream@^1.6.0": version "1.6.1" resolved "https://registry.yarnpkg.com/@types/concat-stream/-/concat-stream-1.6.1.tgz#24bcfc101ecf68e886aaedce60dfd74b632a1b74" @@ -1827,16 +1975,31 @@ resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.1.tgz#459c65fa1867dafe6a8f322c4c51695663cc55e9" integrity sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w== +"@types/long@^4.0.1": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.2.tgz#b74129719fc8d11c01868010082d483b7545591a" + integrity sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA== + "@types/mime@^1": version "1.3.2" resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== +"@types/minimatch@^3.0.4": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.5.tgz#1001cc5e6a3704b83c236027e77f2f58ea010f40" + integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ== + "@types/node@*": version "18.7.11" resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.11.tgz#486e72cfccde88da24e1f23ff1b7d8bfb64e6250" integrity sha512-KZhFpSLlmK/sdocfSAjqPETTMd0ug6HIMIAwkwUpU79olnZdQtMxpQP+G1wDzCH7na+FltSIhbaZuKdwZ8RDrw== +"@types/node@>=13.7.0": + version "20.2.3" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.2.3.tgz#b31eb300610c3835ac008d690de6f87e28f9b878" + integrity sha512-pg9d0yC4rVNWQzX8U7xb4olIOFuuVL9za3bzMT2pu2SU0SNEi66i2qrvhE2qt0HvkhuCaWJu7pLNOt/Pj8BIrw== + "@types/node@^10.0.3", "@types/node@^10.1.0": version "10.17.60" resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.60.tgz#35f3d6213daed95da7f0f73e75bcc6980e90597b" @@ -1928,6 +2091,33 @@ resolved "https://registry.yarnpkg.com/@ungap/global-this/-/global-this-0.4.4.tgz#8a1b2cfcd3e26e079a847daba879308c924dd695" integrity sha512-mHkm6FvepJECMNthFuIgpAEFmPOk71UyXuIxYfjytvFTnSDBIz7jmViO+LfHI/AjrazWije0PnSP3+/NlwzqtA== +"@whatwg-node/events@^0.0.3": + version "0.0.3" + resolved "https://registry.yarnpkg.com/@whatwg-node/events/-/events-0.0.3.tgz#13a65dd4f5893f55280f766e29ae48074927acad" + integrity sha512-IqnKIDWfXBJkvy/k6tzskWTc2NK3LcqHlb+KHGCrjOCH4jfQckRX0NAiIcC/vIqQkzLYw2r2CTSwAxcrtcD6lA== + +"@whatwg-node/fetch@^0.8.4": + version "0.8.8" + resolved "https://registry.yarnpkg.com/@whatwg-node/fetch/-/fetch-0.8.8.tgz#48c6ad0c6b7951a73e812f09dd22d75e9fa18cae" + integrity sha512-CdcjGC2vdKhc13KKxgsc6/616BQ7ooDIgPeTuAiE8qfCnS0mGzcfCOoZXypQSz73nxI+GWc7ZReIAVhxoE1KCg== + dependencies: + "@peculiar/webcrypto" "^1.4.0" + "@whatwg-node/node-fetch" "^0.3.6" + busboy "^1.6.0" + urlpattern-polyfill "^8.0.0" + web-streams-polyfill "^3.2.1" + +"@whatwg-node/node-fetch@^0.3.6": + version "0.3.6" + resolved "https://registry.yarnpkg.com/@whatwg-node/node-fetch/-/node-fetch-0.3.6.tgz#e28816955f359916e2d830b68a64493124faa6d0" + integrity sha512-w9wKgDO4C95qnXZRwZTfCmLWqyRnooGjcIwG0wADWjw9/HN0p7dtvtgSvItZtUyNteEvgTrd8QojNEqV6DAGTA== + dependencies: + "@whatwg-node/events" "^0.0.3" + busboy "^1.6.0" + fast-querystring "^1.1.1" + fast-url-parser "^1.1.3" + tslib "^2.3.1" + "@wry/context@^0.5.2": version "0.5.4" resolved "https://registry.yarnpkg.com/@wry/context/-/context-0.5.4.tgz#b6c28038872e0a0e1ff14eb40b5bf4cab2ab4e06" @@ -2048,6 +2238,11 @@ acorn-globals@^1.0.4: dependencies: acorn "^2.1.0" +acorn-walk@^8.1.1: + version "8.2.0" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" + integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== + acorn@4.X: version "4.0.13" resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" @@ -2058,6 +2253,11 @@ acorn@^2.1.0, acorn@^2.4.0: resolved "https://registry.yarnpkg.com/acorn/-/acorn-2.7.0.tgz#ab6e7d9d886aaca8b085bc3312b79a198433f0e7" integrity sha1-q259nYhqrKiwhbwzEreaGYQz8Oc= +acorn@^8.4.1: + version "8.8.2" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.2.tgz#1b2f25db02af965399b9776b0c2c391276d37c4a" + integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw== + aes-js@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" @@ -2088,6 +2288,23 @@ ansi-colors@^3.2.1: resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== +ansi-colors@^4.1.1: + version "4.1.3" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" + integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== + +ansi-escapes@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" + integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== + +ansi-escapes@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== + dependencies: + type-fest "^0.21.3" + ansi-mark@^1.0.0: version "1.0.4" resolved "https://registry.yarnpkg.com/ansi-mark/-/ansi-mark-1.0.4.tgz#1cd4ba8d57f15f109d6aaf6ec9ca9786c8a4ee6c" @@ -2131,18 +2348,36 @@ ansi-styles@^3.2.0, ansi-styles@^3.2.1: dependencies: color-convert "^1.9.0" -ansi-styles@^4.0.0, ansi-styles@^4.1.0: +ansi-styles@^4.0.0, ansi-styles@^4.1.0, ansi-styles@^4.3.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== dependencies: color-convert "^2.0.1" +ansicolors@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979" + integrity sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg== + any-promise@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" integrity sha1-q8av7tzqUugJzcA3au0845Y10X8= +any-signal@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-2.1.2.tgz#8d48270de0605f8b218cf9abe8e9c6a0e7418102" + integrity sha512-B+rDnWasMi/eWcajPcCWSlYc7muXOrcYrqgyzcdKisl2H/WTlQ0gip1KyQfr0ZlxJdsuWCj/LWwQm7fhyhRfIQ== + dependencies: + abort-controller "^3.0.0" + native-abort-controller "^1.0.3" + +any-signal@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-3.0.1.tgz#49cae34368187a3472e31de28fb5cb1430caa9a6" + integrity sha512-xgZgJtKEa9YmDqXodIgl7Fl1C8yNXr8w6gXjqK3LW4GcEiYT+6AQfJSE/8SPsEpLLmcvbv8YU+qet94UewHxqg== + anymatch@~3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" @@ -2151,13 +2386,13 @@ anymatch@~3.1.1: normalize-path "^3.0.0" picomatch "^2.0.4" -apisauce@^1.0.1: - version "1.1.5" - resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-1.1.5.tgz#31d41a5cf805e401266cec67faf1a50f4aeae234" - integrity sha512-gKC8qb/bDJsPsnEXLZnXJ7gVx7dh87CEVNeIwv1dvaffnXoh5GHwac5pWR1P2broLiVj/fqFMQvLDDt/RhjiqA== +anymatch@~3.1.2: + version "3.1.3" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== dependencies: - axios "^0.21.2" - ramda "^0.25.0" + normalize-path "^3.0.0" + picomatch "^2.0.4" apisauce@^2.0.1: version "2.0.1" @@ -2167,6 +2402,13 @@ apisauce@^2.0.1: axios "^0.21.1" ramda "^0.25.0" +apisauce@^2.1.5: + version "2.1.6" + resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-2.1.6.tgz#94887f335bf3d735305fc895c8a191c9c2608a7f" + integrity sha512-MdxR391op/FucS2YQRfB/NMRyCnHEPDd4h17LRIuVYi0BpGmMhpxc0shbOpfs5ahABuBEffNCGal5EcsydbBWg== + dependencies: + axios "^0.21.4" + apollo-cache-control@^0.14.0: version "0.14.0" resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.14.0.tgz#95f20c3e03e7994e0d1bd48c59aeaeb575ed0ce7" @@ -2363,6 +2605,11 @@ are-we-there-yet@~1.1.2: delegates "^1.0.0" readable-stream "^2.0.6" +arg@^4.1.0: + version "4.1.3" + resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" + integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== + argparse@^1.0.7: version "1.0.10" resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" @@ -2370,6 +2617,11 @@ argparse@^1.0.7: dependencies: sprintf-js "~1.0.2" +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + argsarray@0.0.1, argsarray@^0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/argsarray/-/argsarray-0.0.1.tgz#6e7207b4ecdb39b0af88303fa5ae22bda8df61cb" @@ -2450,6 +2702,15 @@ asn1@~0.2.3: dependencies: safer-buffer "~2.1.0" +asn1js@^3.0.1, asn1js@^3.0.5: + version "3.0.5" + resolved "https://registry.yarnpkg.com/asn1js/-/asn1js-3.0.5.tgz#5ea36820443dbefb51cc7f88a2ebb5b462114f38" + integrity sha512-FVnvrKJwpt9LP2lAMl8qZswRNm3T4q9CON+bxldk2iwk3FFpuwhx2FfinyitizWHsVYyaY+y5JzDR0rCMV5yTQ== + dependencies: + pvtsutils "^1.3.2" + pvutils "^1.1.3" + tslib "^2.4.0" + assemblyscript@0.19.10: version "0.19.10" resolved "https://registry.yarnpkg.com/assemblyscript/-/assemblyscript-0.19.10.tgz#7ede6d99c797a219beb4fa4614c3eab9e6343c8e" @@ -2458,6 +2719,15 @@ assemblyscript@0.19.10: binaryen "101.0.0-nightly.20210723" long "^4.0.0" +assemblyscript@0.19.23: + version "0.19.23" + resolved "https://registry.yarnpkg.com/assemblyscript/-/assemblyscript-0.19.23.tgz#16ece69f7f302161e2e736a0f6a474e6db72134c" + integrity sha512-fwOQNZVTMga5KRsfY80g7cpOl4PsFQczMwHzdtgoqLXaYhkhavufKb0sB0l3T1DUxpAufA0KNhlbpuuhZUwxMA== + dependencies: + binaryen "102.0.0-nightly.20211028" + long "^5.2.0" + source-map-support "^0.5.20" + "assemblyscript@git+https://github.com/AssemblyScript/assemblyscript.git#v0.6": version "0.6.0" resolved "git+https://github.com/AssemblyScript/assemblyscript.git#3ed76a97f05335504166fce1653da75f4face28f" @@ -2523,6 +2793,11 @@ async@^2.6.1, async@^2.6.2, async@^2.6.3: dependencies: lodash "^4.17.14" +async@^3.2.3: + version "3.2.4" + resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" + integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== + asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -2560,7 +2835,7 @@ aws4@^1.8.0: resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== -axios@^0.21.1, axios@^0.21.2: +axios@^0.21.1, axios@^0.21.4: version "0.21.4" resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== @@ -2845,6 +3120,11 @@ binaryen@101.0.0-nightly.20210723: resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-101.0.0-nightly.20210723.tgz#b6bb7f3501341727681a03866c0856500eec3740" integrity sha512-eioJNqhHlkguVSbblHOtLqlhtC882SOEPKmNFZaDuz1hzQjolxZ+eu3/kaS10n3sGPONsIZsO7R9fR00UyhEUA== +binaryen@102.0.0-nightly.20211028: + version "102.0.0-nightly.20211028" + resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-102.0.0-nightly.20211028.tgz#8f1efb0920afd34509e342e37f84313ec936afb2" + integrity sha512-GCJBVB5exbxzzvyt8MGDv/MeUjs6gkXDvf4xOIItRBptYl0Tz5sm1o/uG95YK0L0VeG5ajDu3hRtkBP2kzqC5w== + binaryen@77.0.0-nightly.20190407: version "77.0.0-nightly.20190407" resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-77.0.0-nightly.20190407.tgz#fbe4f8ba0d6bd0809a84eb519d2d5b5ddff3a7d1" @@ -2893,6 +3173,13 @@ blakejs@^1.1.0: resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" integrity sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ== +blob-to-it@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/blob-to-it/-/blob-to-it-1.0.4.tgz#f6caf7a4e90b7bb9215fa6a318ed6bd8ad9898cb" + integrity sha512-iCmk0W4NdbrWgRRuxOriU8aM5ijeVLI61Zulsmg/lUHNr7pYjoj+U77opLefNagevtrrbMt3JQ5Qip7ar178kA== + dependencies: + browser-readablestream-to-it "^1.0.3" + bluebird@^3.4.7, bluebird@^3.5.0: version "3.7.2" resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" @@ -2967,6 +3254,13 @@ brace-expansion@^1.1.7: balanced-match "^1.0.0" concat-map "0.0.1" +brace-expansion@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" + integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + dependencies: + balanced-match "^1.0.0" + braces@^1.8.2: version "1.8.5" resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" @@ -2976,7 +3270,7 @@ braces@^1.8.2: preserve "^0.2.0" repeat-element "^1.1.2" -braces@^3.0.1, braces@~3.0.2: +braces@^3.0.1, braces@^3.0.2, braces@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== @@ -2988,6 +3282,11 @@ brorand@^1.0.1, brorand@^1.1.0: resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= +browser-readablestream-to-it@^1.0.0, browser-readablestream-to-it@^1.0.1, browser-readablestream-to-it@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/browser-readablestream-to-it/-/browser-readablestream-to-it-1.0.3.tgz#ac3e406c7ee6cdf0a502dd55db33bab97f7fba76" + integrity sha512-+12sHB+Br8HIh6VAMVEG5r3UXCyESIgDW7kzk3BjIXa43DVqVwL7GC5TW3jeh+72dtcH99pPVpw0X8i0jt+/kw== + browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" @@ -3137,7 +3436,7 @@ buffer@^5.0.5, buffer@^5.2.1, buffer@^5.4.2, buffer@^5.4.3, buffer@^5.5.0, buffe base64-js "^1.3.1" ieee754 "^1.1.13" -buffer@^6.0.3: +buffer@^6.0.1, buffer@^6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== @@ -3164,6 +3463,13 @@ busboy@^0.3.1: dependencies: dicer "0.3.0" +busboy@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" + integrity sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA== + dependencies: + streamsearch "^1.1.0" + bytes@3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" @@ -3244,6 +3550,14 @@ caniuse-lite@^1.0.30001181: resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001197.tgz#47ad15b977d2f32b3ec2fe2b087e0c50443771db" integrity sha512-8aE+sqBqtXz4G8g35Eg/XEaFr2N7rd/VQ6eABGBmNtcB8cN6qNJhMi6oSFy4UWWZgqgL3filHT8Nha4meu3tsw== +cardinal@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-2.1.1.tgz#7cc1055d822d212954d07b085dea251cc7bc5505" + integrity sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw== + dependencies: + ansicolors "~0.3.2" + redeyed "~2.1.0" + caseless@^0.12.0, caseless@~0.12.0: version "0.12.0" resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" @@ -3257,6 +3571,11 @@ cbor@^5.1.0: bignumber.js "^9.0.1" nofilter "^1.0.4" +cborg@^1.5.4, cborg@^1.6.0: + version "1.10.2" + resolved "https://registry.yarnpkg.com/cborg/-/cborg-1.10.2.tgz#83cd581b55b3574c816f82696307c7512db759a1" + integrity sha512-b3tFPA9pUr2zCUiCfRd2+wok2/LBSNUMKOuRRok+WlvvAgEt/PlbgPTsZUcwCOs53IJvLgTp0eotwtosE6njug== + chalk@1.1.3, chalk@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" @@ -3293,6 +3612,14 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^4.0.2, chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + change-case@3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/change-case/-/change-case-3.0.2.tgz#fd48746cce02f03f0a672577d1d3a8dc2eceb037" @@ -3388,7 +3715,22 @@ chokidar@3.4.2: optionalDependencies: fsevents "~2.1.2" -chokidar@3.5.1, chokidar@^3.0.2: +chokidar@3.5.3: + version "3.5.3" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" + integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== + dependencies: + anymatch "~3.1.2" + braces "~3.0.2" + glob-parent "~5.1.2" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.6.0" + optionalDependencies: + fsevents "~2.3.2" + +chokidar@^3.0.2: version "3.5.1" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.1.tgz#ee9ce7bbebd2b79f49f304799d5468e31e14e68a" integrity sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw== @@ -3448,6 +3790,13 @@ class-is@^1.1.0: resolved "https://registry.yarnpkg.com/class-is/-/class-is-1.1.0.tgz#9d3c0fba0440d211d843cec3dedfa48055005825" integrity sha512-rhjH9AG1fvabIDoGRVH587413LPjTZgmDF9fOFCbFJQV4yuocX1mHxxvXI4g3cGwbVY9wAYIoKlg1N79frJKQw== +clean-stack@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-3.0.1.tgz#155bf0b2221bf5f4fba89528d24c5953f17fe3a8" + integrity sha512-lR9wNiMRcVQjSB3a7xXGLuz4cr4wJuuXlaAEbRutGowQTmlp7R72/DOgN21e8jdwblMWl9UOJMJXarX94pzKdg== + dependencies: + escape-string-regexp "4.0.0" + cli-cursor@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" @@ -3462,6 +3811,13 @@ cli-cursor@^3.1.0: dependencies: restore-cursor "^3.1.0" +cli-progress@^3.12.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.12.0.tgz#807ee14b66bcc086258e444ad0f19e7d42577942" + integrity sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A== + dependencies: + string-width "^4.2.3" + cli-spinners@^2.0.0: version "2.5.0" resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.5.0.tgz#12763e47251bf951cb75c201dfa58ff1bcb2d047" @@ -3472,6 +3828,16 @@ cli-spinners@^2.2.0: resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.7.0.tgz#f815fd30b5f9eaac02db604c7a231ed7cb2f797a" integrity sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw== +cli-table3@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.0.tgz#b7b1bc65ca8e7b5cef9124e13dc2b21e2ce4faee" + integrity sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ== + dependencies: + object-assign "^4.1.0" + string-width "^4.2.0" + optionalDependencies: + colors "^1.1.2" + cli-table3@~0.5.0: version "0.5.1" resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.5.1.tgz#0252372d94dfc40dbd8df06005f48f31f656f202" @@ -3585,12 +3951,7 @@ colorette@^1.2.1: resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.2.tgz#cbcc79d5e99caea2dbf10eb3a26fd8b3e6acfa94" integrity sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w== -colors@1.3.3: - version "1.3.3" - resolved "https://registry.yarnpkg.com/colors/-/colors-1.3.3.tgz#39e005d546afe01e01f9c4ca8fa50f686a01205d" - integrity sha512-mmGt/1pZqYRjMxB1axhTo16/snVZ5krrKkcmMeVKxzECMMXoCgnvTPp10QgHfcbQZw8Dq2jMNG6je4JlWU0gWg== - -colors@^1.1.2, colors@^1.3.3: +colors@1.4.0, colors@^1.1.2, colors@^1.3.3: version "1.4.0" resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== @@ -3768,6 +4129,17 @@ cosmiconfig@6.0.0: path-type "^4.0.0" yaml "^1.7.2" +cosmiconfig@7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.0.1.tgz#714d756522cace867867ccb4474c5d01bbae5d6d" + integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.2.1" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.10.0" + create-ecdh@^4.0.0: version "4.0.4" resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.4.tgz#d6e7f4bffa66736085a0762fd3a632684dabcc4e" @@ -3799,6 +4171,11 @@ create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: safe-buffer "^5.0.1" sha.js "^2.4.8" +create-require@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" + integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== + cross-fetch@3.0.6, cross-fetch@^3.0.4: version "3.0.6" resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.0.6.tgz#3a4040bc8941e653e0e9cf17f29ebcd177d3365c" @@ -3822,7 +4199,7 @@ cross-fetch@^2.1.0, cross-fetch@^2.1.1: node-fetch "2.1.2" whatwg-fetch "2.0.4" -cross-spawn@^7.0.0: +cross-spawn@7.0.3, cross-spawn@^7.0.0, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -3831,6 +4208,17 @@ cross-spawn@^7.0.0: shebang-command "^2.0.0" which "^2.0.1" +cross-spawn@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" + integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + crypto-browserify@3.12.0: version "3.12.0" resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" @@ -3966,10 +4354,10 @@ debug@4.1.1: dependencies: ms "^2.1.1" -debug@4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" - integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== +debug@4.3.4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" @@ -3980,13 +4368,6 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" -debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: - version "4.3.4" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" - integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== - dependencies: - ms "2.1.2" - decamelize@^1.1.1, decamelize@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" @@ -4138,7 +4519,7 @@ dicer@0.3.0: dependencies: streamsearch "0.1.2" -diff@4.0.2: +diff@4.0.2, diff@^4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== @@ -4159,10 +4540,21 @@ dir-glob@^3.0.1: dependencies: path-type "^4.0.0" -docker-compose@0.23.4: - version "0.23.4" - resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.4.tgz#43bcabcde55a6ba2873b52fe0ccd99dd8fdceba8" - integrity sha512-yWdXby9uQ8o4syOfvoSJ9ZlTnLipvUmDn59uaYY5VGIUSUAfMPPGqE1DE3pOCnfSg9Tl9UOOFO0PCSAzuIHmuA== +dns-over-http-resolver@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/dns-over-http-resolver/-/dns-over-http-resolver-1.2.3.tgz#194d5e140a42153f55bb79ac5a64dd2768c36af9" + integrity sha512-miDiVSI6KSNbi4SVifzO/reD8rMnxgrlnkrlkugOLQpWQTe2qMdHsZp5DmfKjxNE+/T3VAAYLQUZMv9SMr6+AA== + dependencies: + debug "^4.3.1" + native-fetch "^3.0.0" + receptacle "^1.3.2" + +docker-compose@0.23.19: + version "0.23.19" + resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.19.tgz#9947726e2fe67bdfa9e8efe1ff15aa0de2e10eb8" + integrity sha512-v5vNLIdUqwj4my80wxFDkNH+4S85zsRuH29SO7dCWVWPCMt/ohZBsGN6g6KXWifT0pzQ7uOxqEKCYCDPJ8Vz4g== + dependencies: + yaml "^1.10.2" docker-compose@^0.23.2: version "0.23.6" @@ -4330,11 +4722,32 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== +ejs@3.1.6: + version "3.1.6" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.6.tgz#5bfd0a0689743bb5268b3550cceeebbc1702822a" + integrity sha512-9lt9Zse4hPucPkoP7FHDF0LQAlGyF9JVpnClFLFH3aSSbxmyoqINRpp/9wePWJTUl4KOQwRL72Iw3InHPDkoGw== + dependencies: + jake "^10.6.1" + ejs@^2.6.1: version "2.7.4" resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.7.4.tgz#48661287573dcc53e366c7a1ae52c3a120eec9ba" integrity sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA== +ejs@^3.1.8: + version "3.1.9" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.9.tgz#03c9e8777fe12686a9effcef22303ca3d8eeb361" + integrity sha512-rC+QVNMJWv+MtPgkt0y+0rVEIdbtxVADApW9JXrUVlzHetgcyczP/E7DJmWJ4fJCZF2cPcBk0laWO9ZHMG3DmQ== + dependencies: + jake "^10.8.5" + +electron-fetch@^1.7.2: + version "1.9.1" + resolved "https://registry.yarnpkg.com/electron-fetch/-/electron-fetch-1.9.1.tgz#e28bfe78d467de3f2dec884b1d72b8b05322f30f" + integrity sha512-M9qw6oUILGVrcENMSRRefE1MbHPIz0h79EKIeJWK9v563aT9Qkh8aEHPO1H5vi970wPirNY+jO9OpFoLiMsMGA== + dependencies: + encoding "^0.1.13" + electron-to-chromium@^1.3.649: version "1.3.683" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.683.tgz#2c9ab53ff5275cf3dd49278af714d0f8975204f7" @@ -4396,7 +4809,7 @@ encoding-down@^6.3.0: level-codec "^9.0.0" level-errors "^2.0.0" -encoding@^0.1.11: +encoding@^0.1.11, encoding@^0.1.13: version "0.1.13" resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.13.tgz#56574afdd791f54a8e9b2785c0582a2d26210fa9" integrity sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A== @@ -4424,6 +4837,13 @@ enquirer@2.3.4: dependencies: ansi-colors "^3.2.1" +enquirer@2.3.6: + version "2.3.6" + resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" + integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg== + dependencies: + ansi-colors "^4.1.1" + entities@1.0: version "1.0.0" resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26" @@ -4454,6 +4874,11 @@ err-code@^2.0.0: resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA== +err-code@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/err-code/-/err-code-3.0.1.tgz#a444c7b992705f2b120ee320b09972eef331c920" + integrity sha512-GiaH0KJUewYok+eeY05IIgjtAe4Yltygk9Wqp1V5yVWLdhf0hYZchRjNIT9bb0mSwRcIusT3cx7PJUf3zEIfUA== + errno@~0.1.1: version "0.1.8" resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.8.tgz#8bb3e9c7d463be4976ff888f76b4809ebc2e811f" @@ -4636,7 +5061,7 @@ esdoc@^1.0.4: minimist "1.2.0" taffydb "2.7.3" -esprima@^4.0.0, esprima@^4.0.1: +esprima@^4.0.0, esprima@^4.0.1, esprima@~4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== @@ -5001,6 +5426,21 @@ evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: md5.js "^1.3.4" safe-buffer "^5.1.1" +execa@5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + execa@^3.0.0: version "3.4.0" resolved "https://registry.yarnpkg.com/execa/-/execa-3.4.0.tgz#c08ed4550ef65d858fac269ffc8572446f37eb89" @@ -5138,11 +5578,21 @@ fast-check@^2.12.1: dependencies: pure-rand "^4.1.1" +fast-decode-uri-component@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fast-decode-uri-component/-/fast-decode-uri-component-1.0.1.tgz#46f8b6c22b30ff7a81357d4f59abfae938202543" + integrity sha512-WKgKWg5eUxvRZGwW8FvfbaH7AXSh2cL+3j5fMGzUMCxWBJ3dV3a7Wz8y2f/uQ0e3B6WmodD3oS54jTQ9HVTIIg== + fast-deep-equal@^3.1.1: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== +fast-fifo@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/fast-fifo/-/fast-fifo-1.2.0.tgz#2ee038da2468e8623066dee96958b0c1763aa55a" + integrity sha512-NcvQXt7Cky1cNau15FWy64IjuO8X0JijhTBBrJj1YlxlDfRkJXNaK9RFUjwpfDPzMdv7wB38jr53l9tkNLxnWg== + fast-future@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/fast-future/-/fast-future-1.0.2.tgz#8435a9aaa02d79248d17d704e76259301d99280a" @@ -5160,6 +5610,17 @@ fast-glob@^3.1.1: micromatch "^4.0.2" picomatch "^2.2.1" +fast-glob@^3.2.9: + version "3.2.12" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.12.tgz#7f39ec99c2e6ab030337142da9e0c18f37afae80" + integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + fast-json-stable-stringify@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" @@ -5170,11 +5631,25 @@ fast-levenshtein@~2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= +fast-querystring@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/fast-querystring/-/fast-querystring-1.1.1.tgz#f4c56ef56b1a954880cfd8c01b83f9e1a3d3fda2" + integrity sha512-qR2r+e3HvhEFmpdHMv//U8FnFlnYjaC6QKDuaXALDkw2kvHO8WDjxH+f/rHGR4Me4pnk8p9JAkRNTjYHAKRn2Q== + dependencies: + fast-decode-uri-component "^1.0.1" + fast-safe-stringify@^2.0.6: version "2.0.7" resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz#124aa885899261f68aedb42a7c080de9da608743" integrity sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA== +fast-url-parser@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d" + integrity sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ== + dependencies: + punycode "^1.3.2" + fastq@^1.6.0: version "1.11.0" resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.0.tgz#bb9fb955a07130a918eb63c1f5161cc32a5d0858" @@ -5234,6 +5709,13 @@ file-uri-to-path@1.0.0: resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== +filelist@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5" + integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q== + dependencies: + minimatch "^5.0.1" + filename-regex@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" @@ -5434,15 +5916,15 @@ fs-extra@5.0.0: jsonfile "^4.0.0" universalify "^0.1.0" -fs-extra@9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.0.0.tgz#b6afc31036e247b2466dc99c29ae797d5d4580a3" - integrity sha512-pmEYSk3vYsG/bF651KPUXZ+hvjpgWYw/Gc7W9NFUe3ZVLczKKWIij3IKpOrQcdw4TILtibFslZ0UmR8Vvzig4g== +fs-extra@9.1.0, fs-extra@^9.0.0, fs-extra@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== dependencies: at-least-node "^1.0.0" graceful-fs "^4.2.0" jsonfile "^6.0.1" - universalify "^1.0.0" + universalify "^2.0.0" fs-extra@^0.30.0: version "0.30.0" @@ -5464,15 +5946,13 @@ fs-extra@^4.0.2: jsonfile "^4.0.0" universalify "^0.1.0" -fs-extra@^9.0.0, fs-extra@^9.1.0: - version "9.1.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" - integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== +fs-jetpack@4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/fs-jetpack/-/fs-jetpack-4.3.1.tgz#cdfd4b64e6bfdec7c7dc55c76b39efaa7853bb20" + integrity sha512-dbeOK84F6BiQzk2yqqCVwCPWTxAvVGJ3fMQc6E2wuEohS28mR6yHngbrKuVCK1KHRx/ccByDylqu4H5PCP2urQ== dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^2.0.0" + minimatch "^3.0.2" + rimraf "^2.6.3" fs-jetpack@^2.2.2: version "2.4.0" @@ -5506,7 +5986,7 @@ fsevents@~2.1.2: resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== -fsevents@~2.3.1: +fsevents@~2.3.1, fsevents@~2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== @@ -5563,6 +6043,16 @@ get-intrinsic@^1.1.0, get-intrinsic@^1.1.1: has "^1.0.3" has-symbols "^1.0.1" +get-iterator@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-iterator/-/get-iterator-1.0.2.tgz#cd747c02b4c084461fac14f48f6b45a80ed25c82" + integrity sha512-v+dm9bNVfOYsY1OrhaCrmyOcYoSeVvbt+hHZ0Au+T+p1y+0Uyj9aMaGIeUTT6xdpRbWzDeYKvfOslPhggQMcsg== + +get-package-type@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" + integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== + get-params@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/get-params/-/get-params-0.1.2.tgz#bae0dfaba588a0c60d7834c0d8dc2ff60eeef2fe" @@ -5592,6 +6082,11 @@ get-stream@^5.0.0, get-stream@^5.1.0: dependencies: pump "^3.0.0" +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + get-symbol-description@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" @@ -5630,7 +6125,7 @@ glob-parent@^3.0.0: is-glob "^3.1.0" path-dirname "^1.0.0" -glob-parent@^5.1.0, glob-parent@~5.1.0: +glob-parent@^5.1.0, glob-parent@^5.1.2, glob-parent@~5.1.0, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== @@ -5663,6 +6158,16 @@ glob@7.1.6, glob@^7.1.1, glob@^7.1.2: once "^1.3.0" path-is-absolute "^1.0.0" +glob@9.3.5: + version "9.3.5" + resolved "https://registry.yarnpkg.com/glob/-/glob-9.3.5.tgz#ca2ed8ca452781a3009685607fdf025a899dfe21" + integrity sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q== + dependencies: + fs.realpath "^1.0.0" + minimatch "^8.0.2" + minipass "^4.2.4" + path-scurry "^1.6.1" + glob@^5.0.3: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -5716,21 +6221,33 @@ globby@11.0.2: merge2 "^1.3.0" slash "^3.0.0" -gluegun@^4.3.1, gluegun@^4.6.1: - version "4.6.1" - resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" - integrity sha512-Jd5hV1Uku2rjBg59mYA/bnwLwynK7u9A1zmK/LIb/p5d3pzjDCKRjWFuxZXyPwl9rsvKGhJUQxkFo2HEy8crKQ== +globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== dependencies: - apisauce "^2.0.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + +gluegun@5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.1.2.tgz#ffa0beda0fb6bbc089a867157b08602beae2c8cf" + integrity sha512-Cwx/8S8Z4YQg07a6AFsaGnnnmd8mN17414NcPS3OoDtZRwxgsvwRNJNg69niD6fDa8oNwslCG0xH7rEpRNNE/g== + dependencies: + apisauce "^2.1.5" app-module-path "^2.2.0" - cli-table3 "~0.5.0" - colors "^1.3.3" - cosmiconfig "6.0.0" - cross-spawn "^7.0.0" - ejs "^2.6.1" - enquirer "2.3.4" - execa "^3.0.0" - fs-jetpack "^2.2.2" + cli-table3 "0.6.0" + colors "1.4.0" + cosmiconfig "7.0.1" + cross-spawn "7.0.3" + ejs "3.1.6" + enquirer "2.3.6" + execa "5.1.1" + fs-jetpack "4.3.1" lodash.camelcase "^4.3.0" lodash.kebabcase "^4.1.1" lodash.lowercase "^4.3.0" @@ -5746,21 +6263,21 @@ gluegun@^4.3.1, gluegun@^4.6.1: lodash.trimstart "^4.5.1" lodash.uppercase "^4.3.0" lodash.upperfirst "^4.3.1" - ora "^4.0.0" + ora "4.0.2" pluralize "^8.0.0" - ramdasauce "^2.1.0" - semver "^7.0.0" - which "^2.0.0" - yargs-parser "^16.1.0" + semver "7.3.5" + which "2.0.2" + yargs-parser "^21.0.0" -"gluegun@https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep": - version "4.3.1" - resolved "https://github.com/edgeandnode/gluegun#b34b9003d7bf556836da41b57ef36eb21570620a" +gluegun@^4.3.1, gluegun@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" + integrity sha512-Jd5hV1Uku2rjBg59mYA/bnwLwynK7u9A1zmK/LIb/p5d3pzjDCKRjWFuxZXyPwl9rsvKGhJUQxkFo2HEy8crKQ== dependencies: - apisauce "^1.0.1" + apisauce "^2.0.1" app-module-path "^2.2.0" cli-table3 "~0.5.0" - colors "1.3.3" + colors "^1.3.3" cosmiconfig "6.0.0" cross-spawn "^7.0.0" ejs "^2.6.1" @@ -5845,6 +6362,11 @@ graphql-extensions@^0.15.0: apollo-server-env "^3.1.0" apollo-server-types "^0.9.0" +graphql-import-node@^0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/graphql-import-node/-/graphql-import-node-0.0.5.tgz#caf76a6cece10858b14f27cce935655398fc1bf0" + integrity sha512-OXbou9fqh9/Lm7vwXT0XoRN9J5+WCYKnbiTalgFDvkQERITRmcfncZs6aVABedd5B85yQU5EULS4a5pnbpuI0Q== + graphql-subscriptions@^1.0.0: version "1.2.1" resolved "https://registry.yarnpkg.com/graphql-subscriptions/-/graphql-subscriptions-1.2.1.tgz#2142b2d729661ddf967b7388f7cf1dd4cf2e061d" @@ -5927,6 +6449,11 @@ graphql@15.5.0, graphql@^15.3.0, graphql@^15.5.0: resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.5.0.tgz#39d19494dbe69d1ea719915b578bf920344a69d5" integrity sha512-OmaM7y0kaK31NKG31q4YbD2beNYa6jBBKtMFT6gLYJljHLJr42IqJ8KX08u3Li/0ifzTU5HjmoOOrwa5BRLeDA== +graphql@^16.6.0: + version "16.6.0" + resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.6.0.tgz#c2dcffa4649db149f6282af726c8c83f1c7c5fdb" + integrity sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw== + growl@1.10.5: version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" @@ -6203,6 +6730,16 @@ human-signals@^1.1.1: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +hyperlinker@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hyperlinker/-/hyperlinker-1.0.0.tgz#23dc9e38a206b208ee49bc2d6c8ef47027df0c0e" + integrity sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ== + ice-cap@0.0.4: version "0.0.4" resolved "https://registry.yarnpkg.com/ice-cap/-/ice-cap-0.0.4.tgz#8a6d31ab4cac8d4b56de4fa946df3352561b6e18" @@ -6249,6 +6786,11 @@ ignore@^5.1.4: resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== +ignore@^5.2.0: + version "5.2.4" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" + integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + immediate@3.0.6: version "3.0.6" resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.0.6.tgz#9db1dbd0faf8de6fbe0f5dd5e56bb606280de69b" @@ -6264,7 +6806,12 @@ immediate@~3.2.3: resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" integrity sha1-0UD6j2FGWb1lQSMwl92qwlzdmRw= -immutable@3.8.2, immutable@^3.8.2: +immutable@4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.2.1.tgz#8a4025691018c560a40c67e43d698f816edc44d4" + integrity sha512-7WYV7Q5BTs0nlQm7tl92rDYYoyELLKHoDMBKhrxEoiV4mrfVdRz8hzPiYOzH7yWjzoVEamxRuAqhxL2PLRwZYQ== + +immutable@^3.8.2: version "3.8.2" resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" integrity sha1-wkOZUUVbs5kT2vKBN28VMOEErfM= @@ -6274,7 +6821,7 @@ immutable@~3.7.6: resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" integrity sha1-E7TTyxK++hVIKib+Gy665kAHHks= -import-fresh@^3.1.0: +import-fresh@^3.1.0, import-fresh@^3.2.1: version "3.3.0" resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== @@ -6294,6 +6841,11 @@ imurmurhash@^0.1.4: resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + inflight@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" @@ -6317,6 +6869,20 @@ ini@~1.3.0: resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== +interface-datastore@^6.0.2: + version "6.1.1" + resolved "https://registry.yarnpkg.com/interface-datastore/-/interface-datastore-6.1.1.tgz#5150a00de2e7513eaadba58bcafd059cb50004c1" + integrity sha512-AmCS+9CT34pp2u0QQVXjKztkuq3y5T+BIciuiHDDtDZucZD8VudosnSdUyXJV6IsRkN5jc4RFDhCk1O6Q3Gxjg== + dependencies: + interface-store "^2.0.2" + nanoid "^3.0.2" + uint8arrays "^3.0.0" + +interface-store@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/interface-store/-/interface-store-2.0.2.tgz#83175fd2b0c501585ed96db54bb8ba9d55fce34c" + integrity sha512-rScRlhDcz6k199EkHqT8NpM87ebN89ICOzILoBHgaG36/WX50N32BnU/kpZgCGPLhARRAWUUX5/cyaIjt7Kipg== + internal-slot@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.3.tgz#7347e307deeea2faac2ac6205d4bc7d34967f59c" @@ -6366,7 +6932,67 @@ ipfs-block@~0.8.1: cids "~0.7.0" class-is "^1.1.0" -ipfs-http-client@34.0.0, ipfs-http-client@^34.0.0: +ipfs-core-types@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/ipfs-core-types/-/ipfs-core-types-0.9.0.tgz#cb201ff7a9470651ba14c4e7fae56661a55bf37e" + integrity sha512-VJ8vJSHvI1Zm7/SxsZo03T+zzpsg8pkgiIi5hfwSJlsrJ1E2v68QPlnLshGHUSYw89Oxq0IbETYl2pGTFHTWfg== + dependencies: + interface-datastore "^6.0.2" + multiaddr "^10.0.0" + multiformats "^9.4.13" + +ipfs-core-utils@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/ipfs-core-utils/-/ipfs-core-utils-0.13.0.tgz#8f0ec9aaa7c24f6f307e6e76e7bdc1cefd829894" + integrity sha512-HP5EafxU4/dLW3U13CFsgqVO5Ika8N4sRSIb/dTg16NjLOozMH31TXV0Grtu2ZWo1T10ahTzMvrfT5f4mhioXw== + dependencies: + any-signal "^2.1.2" + blob-to-it "^1.0.1" + browser-readablestream-to-it "^1.0.1" + debug "^4.1.1" + err-code "^3.0.1" + ipfs-core-types "^0.9.0" + ipfs-unixfs "^6.0.3" + ipfs-utils "^9.0.2" + it-all "^1.0.4" + it-map "^1.0.4" + it-peekable "^1.0.2" + it-to-stream "^1.0.0" + merge-options "^3.0.4" + multiaddr "^10.0.0" + multiaddr-to-uri "^8.0.0" + multiformats "^9.4.13" + nanoid "^3.1.23" + parse-duration "^1.0.0" + timeout-abort-controller "^2.0.0" + uint8arrays "^3.0.0" + +ipfs-http-client@55.0.0: + version "55.0.0" + resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-55.0.0.tgz#8b713c5fa318e873b7d7ad099a4eb14320a5b0ce" + integrity sha512-GpvEs7C7WL9M6fN/kZbjeh4Y8YN7rY8b18tVWZnKxRsVwM25cIFrRI8CwNt3Ugin9yShieI3i9sPyzYGMrLNnQ== + dependencies: + "@ipld/dag-cbor" "^7.0.0" + "@ipld/dag-json" "^8.0.1" + "@ipld/dag-pb" "^2.1.3" + abort-controller "^3.0.0" + any-signal "^2.1.2" + debug "^4.1.1" + err-code "^3.0.1" + ipfs-core-types "^0.9.0" + ipfs-core-utils "^0.13.0" + ipfs-utils "^9.0.2" + it-first "^1.0.6" + it-last "^1.0.4" + merge-options "^3.0.4" + multiaddr "^10.0.0" + multiformats "^9.4.13" + native-abort-controller "^1.0.3" + parse-duration "^1.0.0" + stream-to-it "^0.2.2" + uint8arrays "^3.0.0" + +ipfs-http-client@^34.0.0: version "34.0.0" resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-34.0.0.tgz#8804d06a11c22306332a8ffa0949b6f672a0c9c8" integrity sha512-4RCkk8ix4Dqn6sxqFVwuXWCZ1eLFPsVaj6Ijvu1fs9VYgxgVudsW9PWwarlr4mw1xUCmPWYyXnEbGgzBrfMy0Q== @@ -6423,6 +7049,36 @@ ipfs-http-client@34.0.0, ipfs-http-client@^34.0.0: tar-stream "^2.0.1" through2 "^3.0.1" +ipfs-unixfs@^6.0.3: + version "6.0.9" + resolved "https://registry.yarnpkg.com/ipfs-unixfs/-/ipfs-unixfs-6.0.9.tgz#f6613b8e081d83faa43ed96e016a694c615a9374" + integrity sha512-0DQ7p0/9dRB6XCb0mVCTli33GzIzSVx5udpJuVM47tGcD+W+Bl4LsnoLswd3ggNnNEakMv1FdoFITiEnchXDqQ== + dependencies: + err-code "^3.0.1" + protobufjs "^6.10.2" + +ipfs-utils@^9.0.2: + version "9.0.14" + resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-9.0.14.tgz#24f5fda1f4567685eb32bca2543d518f95fd8704" + integrity sha512-zIaiEGX18QATxgaS0/EOQNoo33W0islREABAcxXE8n7y2MGAlB+hdsxXn4J0hGZge8IqVQhW8sWIb+oJz2yEvg== + dependencies: + any-signal "^3.0.0" + browser-readablestream-to-it "^1.0.0" + buffer "^6.0.1" + electron-fetch "^1.7.2" + err-code "^3.0.1" + is-electron "^2.2.0" + iso-url "^1.1.5" + it-all "^1.0.4" + it-glob "^1.0.1" + it-to-stream "^1.0.0" + merge-options "^3.0.4" + nanoid "^3.1.20" + native-fetch "^3.0.0" + node-fetch "^2.6.8" + react-native-fetch-api "^3.0.0" + stream-to-it "^0.2.2" + ipfs-utils@~0.0.3: version "0.0.4" resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-0.0.4.tgz#946114cfeb6afb4454b4ccb10d2327cd323b0cce" @@ -6542,6 +7198,11 @@ is-date-object@^1.0.1: dependencies: has-tostringtag "^1.0.0" +is-docker@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== + is-dotfile@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" @@ -6731,6 +7392,11 @@ is-plain-obj@^1.1.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= +is-plain-obj@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + is-posix-bracket@^0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" @@ -6851,6 +7517,13 @@ is-weakref@^1.0.1: dependencies: call-bind "^1.0.0" +is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + isarray@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" @@ -6888,6 +7561,11 @@ iso-stream-http@~0.1.2: inherits "^2.0.1" readable-stream "^3.1.1" +iso-url@^1.1.5: + version "1.2.1" + resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-1.2.1.tgz#db96a49d8d9a64a1c889fc07cc525d093afb1811" + integrity sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng== + iso-url@~0.4.6, iso-url@~0.4.7: version "0.4.7" resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-0.4.7.tgz#de7e48120dae46921079fe78f325ac9e9217a385" @@ -6923,6 +7601,51 @@ isurl@^1.0.0-alpha5: has-to-string-tag-x "^1.2.0" is-object "^1.0.1" +it-all@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-all/-/it-all-1.0.6.tgz#852557355367606295c4c3b7eff0136f07749335" + integrity sha512-3cmCc6Heqe3uWi3CVM/k51fa/XbMFpQVzFoDsV0IZNHSQDyAXl3c4MjHkFX5kF3922OGj7Myv1nSEUgRtcuM1A== + +it-first@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/it-first/-/it-first-1.0.7.tgz#a4bef40da8be21667f7d23e44dae652f5ccd7ab1" + integrity sha512-nvJKZoBpZD/6Rtde6FXqwDqDZGF1sCADmr2Zoc0hZsIvnE449gRFnGctxDf09Bzc/FWnHXAdaHVIetY6lrE0/g== + +it-glob@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/it-glob/-/it-glob-1.0.2.tgz#bab9b04d6aaac42884502f3a0bfee84c7a29e15e" + integrity sha512-Ch2Dzhw4URfB9L/0ZHyY+uqOnKvBNeS/SMcRiPmJfpHiM0TsUZn+GkpcZxAoF3dJVdPm/PuIk3A4wlV7SUo23Q== + dependencies: + "@types/minimatch" "^3.0.4" + minimatch "^3.0.4" + +it-last@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-last/-/it-last-1.0.6.tgz#4106232e5905ec11e16de15a0e9f7037eaecfc45" + integrity sha512-aFGeibeiX/lM4bX3JY0OkVCFkAw8+n9lkukkLNivbJRvNz8lI3YXv5xcqhFUV2lDJiraEK3OXRDbGuevnnR67Q== + +it-map@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-map/-/it-map-1.0.6.tgz#6aa547e363eedcf8d4f69d8484b450bc13c9882c" + integrity sha512-XT4/RM6UHIFG9IobGlQPFQUrlEKkU4eBUFG3qhWhfAdh1JfF2x11ShCrKCdmZ0OiZppPfoLuzcfA4cey6q3UAQ== + +it-peekable@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/it-peekable/-/it-peekable-1.0.3.tgz#8ebe933767d9c5aa0ae4ef8e9cb3a47389bced8c" + integrity sha512-5+8zemFS+wSfIkSZyf0Zh5kNN+iGyccN02914BY4w/Dj+uoFEoPSvj5vaWn8pNZJNSxzjW0zHRxC3LUb2KWJTQ== + +it-to-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/it-to-stream/-/it-to-stream-1.0.0.tgz#6c47f91d5b5df28bda9334c52782ef8e97fe3a4a" + integrity sha512-pLULMZMAB/+vbdvbZtebC0nWBTbG581lk6w8P7DfIIIKUfa8FbY7Oi0FxZcFPbxvISs7A9E+cMpLDBc1XhpAOA== + dependencies: + buffer "^6.0.3" + fast-fifo "^1.0.0" + get-iterator "^1.0.2" + p-defer "^3.0.0" + p-fifo "^1.0.0" + readable-stream "^3.6.0" + iterable-ndjson@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/iterable-ndjson/-/iterable-ndjson-1.1.0.tgz#36f7e8a5bb04fd087d384f29e44fc4280fc014fc" @@ -6948,14 +7671,22 @@ iterate-value@^1.0.0: es-get-iterator "^1.0.2" iterate-iterator "^1.0.1" -jayson@3.6.6: - version "3.6.6" - resolved "https://registry.yarnpkg.com/jayson/-/jayson-3.6.6.tgz#189984f624e398f831bd2be8e8c80eb3abf764a1" - integrity sha512-f71uvrAWTtrwoww6MKcl9phQTC+56AopLyEenWvKVAIMz+q0oVGj6tenLZ7Z6UiPBkJtKLj4kt0tACllFQruGQ== +jake@^10.6.1, jake@^10.8.5: + version "10.8.6" + resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.6.tgz#227a96786a1e035214e0ba84b482d6223d41ef04" + integrity sha512-G43Ub9IYEFfu72sua6rzooi8V8Gz2lkfk48rW20vEWCGizeaEPlKB1Kh8JIA84yQbiAEfqlPmSpGgCKKxH3rDA== + dependencies: + async "^3.2.3" + chalk "^4.0.2" + filelist "^1.0.4" + minimatch "^3.1.2" + +jayson@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jayson/-/jayson-4.0.0.tgz#145a0ced46f900934c9b307e1332bcb0c7dbdb17" + integrity sha512-v2RNpDCMu45fnLzSk47vx7I+QUaOsox6f5X0CUlabAFwxoP+8MfAY0NQRFwOEYXIxm8Ih5y6OaEa5KYiQMkyAA== dependencies: "@types/connect" "^3.4.33" - "@types/express-serve-static-core" "^4.17.9" - "@types/lodash" "^4.14.159" "@types/node" "^12.12.54" "@types/ws" "^7.4.4" JSONStream "^1.3.5" @@ -6965,7 +7696,6 @@ jayson@3.6.6: eyes "^0.1.8" isomorphic-ws "^4.0.1" json-stringify-safe "^5.0.1" - lodash "^4.17.20" uuid "^8.3.2" ws "^7.4.5" @@ -7006,14 +7736,6 @@ js-tokens@^3.0.2: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= -js-yaml@3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" - integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - js-yaml@3.14.0: version "3.14.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" @@ -7022,7 +7744,7 @@ js-yaml@3.14.0: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@^3.13.1: +js-yaml@3.14.1, js-yaml@^3.13.1, js-yaml@^3.14.1: version "3.14.1" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== @@ -7030,6 +7752,13 @@ js-yaml@^3.13.1: argparse "^1.0.7" esprima "^4.0.0" +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + jsan@^3.1.13: version "3.1.13" resolved "https://registry.yarnpkg.com/jsan/-/jsan-3.1.13.tgz#4de8c7bf8d1cfcd020c313d438f930cec4b91d86" @@ -7764,6 +8493,11 @@ long@^4.0.0: resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== +long@^5.2.0: + version "5.2.3" + resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" + integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== + looper@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/looper/-/looper-3.0.0.tgz#2efa54c3b1cbaba9b94aee2e5914b0be57fbb749" @@ -7819,6 +8553,11 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" +lru-cache@^9.1.1: + version "9.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-9.1.1.tgz#c58a93de58630b688de39ad04ef02ef26f1902f1" + integrity sha512-65/Jky17UwSb0BuB9V+MyDpsOtXKmYwzhyl+cOa9XUiI4uV2Ouy/2voFP3+al0BjZbJgMBD8FojMpAf+Z+qn4A== + ltgt@2.2.1, ltgt@^2.1.2, ltgt@~2.2.0: version "2.2.1" resolved "https://registry.yarnpkg.com/ltgt/-/ltgt-2.2.1.tgz#f35ca91c493f7b73da0e07495304f17b31f87ee5" @@ -7845,6 +8584,11 @@ make-dir@^1.0.0: dependencies: pify "^3.0.0" +make-error@^1.1.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== + map-stream@0.0.6: version "0.0.6" resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.0.6.tgz#d2ef4eb811a28644c7a8989985c69c2fdd496827" @@ -7896,6 +8640,13 @@ merge-descriptors@1.0.1: resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== +merge-options@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/merge-options/-/merge-options-3.0.4.tgz#84709c2aa2a4b24c1981f66c179fe5565cc6dbb7" + integrity sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ== + dependencies: + is-plain-obj "^2.1.0" + merge-stream@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-1.0.1.tgz#4041202d508a342ba00174008df0c251b8c135e1" @@ -7908,7 +8659,7 @@ merge-stream@^2.0.0: resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== -merge2@^1.3.0: +merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== @@ -7959,6 +8710,14 @@ micromatch@^4.0.2: braces "^3.0.1" picomatch "^2.0.5" +micromatch@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + dependencies: + braces "^3.0.2" + picomatch "^2.3.1" + miller-rabin@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" @@ -8040,13 +8799,27 @@ minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: dependencies: brace-expansion "^1.1.7" -minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1: +minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: brace-expansion "^1.1.7" +minimatch@^5.0.1: + version "5.1.6" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" + integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^8.0.2: + version "8.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-8.0.4.tgz#847c1b25c014d4e9a7f68aaf63dedd668a626229" + integrity sha512-W0Wvr9HyFXZRGIDgCicunpQ299OKXs9RgZfaukz4qAW/pJhcpUfupc9c+OObPOFueNy8VSrZgEmDtk6Kh4WzDA== + dependencies: + brace-expansion "^2.0.1" + minimist@0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" @@ -8077,6 +8850,16 @@ minipass@^3.0.0: dependencies: yallist "^4.0.0" +minipass@^4.2.4: + version "4.2.8" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" + integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== + +"minipass@^5.0.0 || ^6.0.2": + version "6.0.2" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-6.0.2.tgz#542844b6c4ce95b202c0995b0a471f1229de4c81" + integrity sha512-MzWSV5nYVT7mVyWCwn2o7JH13w2TBRmmSqSRCKzTw+lmft9X4z+3wjvs06Tzijo5z4W/kahUCDpRXTF+ZrmF/w== + minizlib@^1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d" @@ -8189,6 +8972,25 @@ ms@2.1.3, ms@^2.1.1: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +multiaddr-to-uri@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/multiaddr-to-uri/-/multiaddr-to-uri-8.0.0.tgz#65efe4b1f9de5f6b681aa42ff36a7c8db7625e58" + integrity sha512-dq4p/vsOOUdVEd1J1gl+R2GFrXJQH8yjLtz4hodqdVbieg39LvBOdMQRdQnfbg5LSM/q1BYNVf5CBbwZFFqBgA== + dependencies: + multiaddr "^10.0.0" + +multiaddr@^10.0.0: + version "10.0.1" + resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-10.0.1.tgz#0d15848871370860a4d266bb44d93b3dac5d90ef" + integrity sha512-G5upNcGzEGuTHkzxezPrrD6CaIHR9uo+7MwqhNVcXTs33IInon4y7nMiGxl2CY5hG7chvYQUQhz5V52/Qe3cbg== + dependencies: + dns-over-http-resolver "^1.2.3" + err-code "^3.0.1" + is-ip "^3.1.0" + multiformats "^9.4.5" + uint8arrays "^3.0.0" + varint "^6.0.0" + multiaddr@^6.0.3, multiaddr@^6.0.6, multiaddr@^6.1.0: version "6.1.1" resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-6.1.1.tgz#9aae57b3e399089b9896d9455afa8f6b117dff06" @@ -8252,6 +9054,11 @@ multicodec@^1.0.0, multicodec@^1.0.1: buffer "^5.6.0" varint "^5.0.0" +multiformats@^9.4.13, multiformats@^9.4.2, multiformats@^9.4.5, multiformats@^9.5.4: + version "9.9.0" + resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-9.9.0.tgz#c68354e7d21037a8f1f8833c8ccd68618e8f1d37" + integrity sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg== + multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4.15: version "0.4.21" resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-0.4.21.tgz#dc02d525579f334a7909ade8a122dabb58ccfcb5" @@ -8351,6 +9158,11 @@ nanoid@^2.0.0: resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-2.1.11.tgz#ec24b8a758d591561531b4176a01e3ab4f0f0280" integrity sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA== +nanoid@^3.0.2, nanoid@^3.1.20, nanoid@^3.1.23: + version "3.3.6" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c" + integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== + napi-macros@~1.8.1: version "1.8.2" resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda" @@ -8361,6 +9173,21 @@ napi-macros@~2.0.0: resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-2.0.0.tgz#2b6bae421e7b96eb687aa6c77a7858640670001b" integrity sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg== +native-abort-controller@^1.0.3, native-abort-controller@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/native-abort-controller/-/native-abort-controller-1.0.4.tgz#39920155cc0c18209ff93af5bc90be856143f251" + integrity sha512-zp8yev7nxczDJMoP6pDxyD20IU0T22eX8VwN2ztDccKvSZhRaV33yP1BGwKSZfXuqWUzsXopVFjBdau9OOAwMQ== + +native-fetch@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/native-fetch/-/native-fetch-3.0.0.tgz#06ccdd70e79e171c365c75117959cf4fe14a09bb" + integrity sha512-G3Z7vx0IFb/FQ4JxvtqGABsOTIqRWvgQz6e+erkB+JJD6LrszQtMozEHI4EkmgZQvnGHrpLVzUWk7t4sJCIkVw== + +natural-orderby@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/natural-orderby/-/natural-orderby-2.0.3.tgz#8623bc518ba162f8ff1cdb8941d74deb0fdcc016" + integrity sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q== + "ndjson@github:hugomrdias/ndjson#feat/readable-stream3": version "1.5.0" resolved "https://codeload.github.com/hugomrdias/ndjson/tar.gz/4db16da6b42e5b39bf300c3a7cde62abb3fa3a11" @@ -8389,6 +9216,11 @@ next-tick@~1.0.0: resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" integrity sha1-yobR/ogoFpsBICCOPchCS524NCw= +nice-try@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" + integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== + no-case@^2.2.0, no-case@^2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" @@ -8451,6 +9283,13 @@ node-fetch@^2.6.1: dependencies: whatwg-url "^5.0.0" +node-fetch@^2.6.8: + version "2.6.11" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25" + integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w== + dependencies: + whatwg-url "^5.0.0" + node-forge@^0.10.0: version "0.10.0" resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" @@ -8578,7 +9417,7 @@ npm-packlist@^1.1.6: npm-bundled "^1.0.1" npm-normalize-package-bin "^1.0.1" -npm-run-path@^4.0.0: +npm-run-path@^4.0.0, npm-run-path@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== @@ -8677,6 +9516,11 @@ object-path@^0.11.4: resolved "https://registry.yarnpkg.com/object-path/-/object-path-0.11.8.tgz#ed002c02bbdd0070b78a27455e8ae01fc14d4742" integrity sha512-YJjNZrlXJFM42wTBn6zgOJVar9KFJvzx6sTWDte8sWZF//cnjl0BxHNpfZx+ZffXX63A9q0b1zsFiBX4g4X5KA== +object-treeify@^1.1.33: + version "1.1.33" + resolved "https://registry.yarnpkg.com/object-treeify/-/object-treeify-1.1.33.tgz#f06fece986830a3cba78ddd32d4c11d1f76cdf40" + integrity sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A== + object.assign@4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" @@ -8749,7 +9593,7 @@ onetime@^2.0.0: dependencies: mimic-fn "^1.0.0" -onetime@^5.1.0: +onetime@^5.1.0, onetime@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== @@ -8788,6 +9632,19 @@ optionator@^0.8.1: type-check "~0.3.2" word-wrap "~1.2.3" +ora@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/ora/-/ora-4.0.2.tgz#0e1e68fd45b135d28648b27cf08081fa6e8a297d" + integrity sha512-YUOZbamht5mfLxPmk4M35CD/5DuOkAacxlEUbStVXpBAt4fyhBf+vZHI/HRkI++QUp3sNoeA2Gw4C+hi4eGSig== + dependencies: + chalk "^2.4.2" + cli-cursor "^3.1.0" + cli-spinners "^2.2.0" + is-interactive "^1.0.0" + log-symbols "^3.0.0" + strip-ansi "^5.2.0" + wcwidth "^1.0.1" + ora@^3.4.0: version "3.4.0" resolved "https://registry.yarnpkg.com/ora/-/ora-3.4.0.tgz#bf0752491059a3ef3ed4c85097531de9fdbcd318" @@ -8869,6 +9726,19 @@ p-cancelable@^1.0.0: resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== +p-defer@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83" + integrity sha512-ugZxsxmtTln604yeYd29EGrNhazN2lywetzpKhfmQjW/VJmhpDmWbiX+h0zL8V91R0UXkhb3KtPmyq9PZw3aYw== + +p-fifo@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-fifo/-/p-fifo-1.0.0.tgz#e29d5cf17c239ba87f51dde98c1d26a9cfe20a63" + integrity sha512-IjoCxXW48tqdtDFz6fqo5q1UfFVjjVZe8TC1QRflvNUJtNfCUhxOUw6MOVZhDPjqhSzc26xKdugsO17gmzd5+A== + dependencies: + fast-fifo "^1.0.0" + p-defer "^3.0.0" + p-finally@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" @@ -8980,6 +9850,11 @@ parse-cache-control@^1.0.1: resolved "https://registry.yarnpkg.com/parse-cache-control/-/parse-cache-control-1.0.1.tgz#8eeab3e54fa56920fe16ba38f77fa21aacc2d74e" integrity sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg== +parse-duration@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/parse-duration/-/parse-duration-1.0.3.tgz#b6681f5edcc2689643b34c09ea63f86f58a35814" + integrity sha512-o6NAh12na5VvR6nFejkU0gpQ8jmOY9Y9sTU2ke3L3G/d/3z8jqmbBbeyBGHU73P4JLXfc7tJARygIK3WGIkloA== + parse-glob@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" @@ -9057,6 +9932,14 @@ pascal-case@^3.1.1, pascal-case@^3.1.2: no-case "^3.0.4" tslib "^2.0.3" +password-prompt@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/password-prompt/-/password-prompt-1.1.2.tgz#85b2f93896c5bd9e9f2d6ff0627fa5af3dc00923" + integrity sha512-bpuBhROdrhuN3E7G/koAju0WjVw9/uQOG5Co5mokNj0MiOSBVZS1JTwM4zl55hu0WFmIEFvO9cU9sJQiBIYeIA== + dependencies: + ansi-escapes "^3.1.0" + cross-spawn "^6.0.5" + path-case@^2.1.0: version "2.1.1" resolved "https://registry.yarnpkg.com/path-case/-/path-case-2.1.1.tgz#94b8037c372d3fe2906e465bb45e25d226e8eea5" @@ -9091,6 +9974,11 @@ path-is-absolute@^1.0.0, path-is-absolute@^1.0.1: resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= +path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + integrity sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw== + path-key@^3.0.0, path-key@^3.1.0: version "3.1.1" resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" @@ -9101,6 +9989,14 @@ path-parse@^1.0.6: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== +path-scurry@^1.6.1: + version "1.9.2" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.9.2.tgz#90f9d296ac5e37e608028e28a447b11d385b3f63" + integrity sha512-qSDLy2aGFPm8i4rsbHd4MNyTcrzHFsLQykrtbuGRknZZCBBVXSv2tSCDN2Cg6Rt/GFRw8GoW9y9Ecw5rIPG1sg== + dependencies: + lru-cache "^9.1.1" + minipass "^5.0.0 || ^6.0.2" + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" @@ -9174,7 +10070,7 @@ performance-now@^2.1.0: resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== -picomatch@^2.0.4, picomatch@^2.2.1: +picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== @@ -9216,7 +10112,7 @@ pkg-conf@^1.1.2: object-assign "^4.0.1" symbol "^0.2.1" -pkginfo@0.4.1, pkginfo@^0.4.1: +pkginfo@^0.4.1: version "0.4.1" resolved "https://registry.yarnpkg.com/pkginfo/-/pkginfo-0.4.1.tgz#b5418ef0439de5425fc4995042dced14fb2a84ff" integrity sha1-tUGO8EOd5UJfxJlQQtztFPsqhP8= @@ -9607,6 +10503,25 @@ prop-types@^15.7.2: object-assign "^4.1.1" react-is "^16.8.1" +protobufjs@^6.10.2: + version "6.11.3" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.11.3.tgz#637a527205a35caa4f3e2a9a4a13ddffe0e7af74" + integrity sha512-xL96WDdCZYdU7Slin569tFX712BxsxslWwAfAhCYjQKGTq7dAU91Lomy6nLLhh/dyGhk/YH4TwTSRxTzhuHyZg== + dependencies: + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/long" "^4.0.1" + "@types/node" ">=13.7.0" + long "^4.0.0" + protocol-buffers-schema@^3.3.1: version "3.6.0" resolved "https://registry.yarnpkg.com/protocol-buffers-schema/-/protocol-buffers-schema-3.6.0.tgz#77bc75a48b2ff142c1ad5b5b90c94cd0fa2efd03" @@ -9695,6 +10610,11 @@ punycode@2.1.0: resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.0.tgz#5f863edc89b96db09074bad7947bf09056ca4e7d" integrity sha1-X4Y+3Im5bbCQdLrXlHvwkFbKTn0= +punycode@^1.3.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== + punycode@^2.1.0, punycode@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" @@ -9705,6 +10625,18 @@ pure-rand@^4.1.1: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-4.1.2.tgz#cbad2a3e3ea6df0a8d80d8ba204779b5679a5205" integrity sha512-uLzZpQWfroIqyFWmX/pl0OL2JHJdoU3dbh0dvZ25fChHFJJi56J5oQZhW6QgbT2Llwh1upki84LnTwlZvsungA== +pvtsutils@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/pvtsutils/-/pvtsutils-1.3.2.tgz#9f8570d132cdd3c27ab7d51a2799239bf8d8d5de" + integrity sha512-+Ipe2iNUyrZz+8K/2IOo+kKikdtfhRKzNpQbruF2URmqPtoqAs8g3xS7TJvFF2GcPXjh7DkqMnpVveRFq4PgEQ== + dependencies: + tslib "^2.4.0" + +pvutils@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/pvutils/-/pvutils-1.1.3.tgz#f35fc1d27e7cd3dfbd39c0826d173e806a03f5a3" + integrity sha512-pMpnA0qRdFp32b1sJl1wOJNxZLQ2cbQx+k6tjNtZ8CpvVhNqEPRgivZ2WOUev2YMajecdH7ctUPDvEe87nariQ== + qs@6.11.0, qs@^6.4.0, qs@^6.5.2: version "6.11.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" @@ -9817,6 +10749,13 @@ react-is@^16.7.0, react-is@^16.8.1: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== +react-native-fetch-api@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/react-native-fetch-api/-/react-native-fetch-api-3.0.0.tgz#81e1bb6562c292521bc4eca52fe1097f4c1ebab5" + integrity sha512-g2rtqPjdroaboDKTsJCTlcmtw54E25OjyaunUP0anOZn4Fuo2IKs8BVfe02zVggA/UysbmfSnRJIqtNkAgggNA== + dependencies: + p-defer "^3.0.0" + read-pkg-up@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02" @@ -9927,6 +10866,27 @@ readdirp@~3.5.0: dependencies: picomatch "^2.2.1" +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== + dependencies: + picomatch "^2.2.1" + +receptacle@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/receptacle/-/receptacle-1.3.2.tgz#a7994c7efafc7a01d0e2041839dab6c4951360d2" + integrity sha512-HrsFvqZZheusncQRiEE7GatOAETrARKV/lnfYicIm8lbvp/JQOdADOfhjBd2DajvoszEyxSM6RlAAIZgEoeu/A== + dependencies: + ms "^2.1.1" + +redeyed@~2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-2.1.1.tgz#8984b5815d99cb220469c99eeeffe38913e6cc0b" + integrity sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ== + dependencies: + esprima "~4.0.0" + redux-cli-logger@^2.0.1: version "2.1.0" resolved "https://registry.yarnpkg.com/redux-cli-logger/-/redux-cli-logger-2.1.0.tgz#7e546502a4b08c7fac4fe2faee2326a6326cb4a1" @@ -10189,6 +11149,11 @@ restore-cursor@^3.1.0: onetime "^5.1.0" signal-exit "^3.0.2" +retimer@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/retimer/-/retimer-3.0.0.tgz#98b751b1feaf1af13eb0228f8ea68b8f9da530df" + integrity sha512-WKE0j11Pa0ZJI5YIk0nflGI7SQsfl2ljihVy7ogh7DeQSeYAUi0ubZ/yEueGtDfUPk6GH5LRw1hBdLq4IwUBWA== + retry@0.13.1: version "0.13.1" resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658" @@ -10366,6 +11331,13 @@ semver@7.3.5: dependencies: lru-cache "^6.0.0" +semver@7.4.0: + version "7.4.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.4.0.tgz#8481c92feffc531ab1e012a8ffc15bdd3a0f4318" + integrity sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw== + dependencies: + lru-cache "^6.0.0" + semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" @@ -10385,6 +11357,13 @@ semver@^7.3.4: dependencies: lru-cache "^6.0.0" +semver@^7.3.7: + version "7.5.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.1.tgz#c90c4d631cf74720e46b21c1d37ea07edfab91ec" + integrity sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw== + dependencies: + lru-cache "^6.0.0" + semver@~5.4.1: version "5.4.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" @@ -10483,6 +11462,13 @@ shallowequal@^1.0.2: resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg== + dependencies: + shebang-regex "^1.0.0" + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -10490,6 +11476,11 @@ shebang-command@^2.0.0: dependencies: shebang-regex "^3.0.0" +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ== + shebang-regex@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" @@ -10509,7 +11500,7 @@ signal-exit@^3.0.0: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== -signal-exit@^3.0.2: +signal-exit@^3.0.2, signal-exit@^3.0.3: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== @@ -10614,6 +11605,14 @@ source-map-support@^0.5.11, source-map-support@^0.5.19, source-map-support@^0.5. buffer-from "^1.0.0" source-map "^0.6.0" +source-map-support@^0.5.20: + version "0.5.21" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + source-map-url@^0.4.0: version "0.4.1" resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.1.tgz#0af66605a745a5a2f91cf1bbf8a7afbc283dec56" @@ -10735,6 +11734,13 @@ stream-shift@^1.0.0: resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== +stream-to-it@^0.2.2: + version "0.2.4" + resolved "https://registry.yarnpkg.com/stream-to-it/-/stream-to-it-0.2.4.tgz#d2fd7bfbd4a899b4c0d6a7e6a533723af5749bd0" + integrity sha512-4vEbkSs83OahpmBybNJXlJd7d6/RxzkkSdT3I0mnGt79Xd2Kk+e1JqbvAvsQfCeKj3aKb0QIWkyK3/n0j506vQ== + dependencies: + get-iterator "^1.0.2" + stream-to-pull-stream@^1.7.2: version "1.7.3" resolved "https://registry.yarnpkg.com/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz#4161aa2d2eb9964de60bfa1af7feaf917e874ece" @@ -10748,6 +11754,11 @@ streamsearch@0.1.2: resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" integrity sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo= +streamsearch@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" + integrity sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg== + strict-uri-encode@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" @@ -10779,6 +11790,15 @@ string-width@^3.0.0, string-width@^3.1.0: is-fullwidth-code-point "^2.0.0" strip-ansi "^5.1.0" +string-width@^4.0.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^4.1.0, string-width@^4.2.0: version "4.2.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.2.tgz#dafd4f9559a7585cfba529c6a0a4f73488ebd4c5" @@ -10844,7 +11864,7 @@ strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0: +strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10938,13 +11958,28 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7.1.0: +supports-color@^7.0.0, supports-color@^7.1.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== dependencies: has-flag "^4.0.0" +supports-color@^8.1.1: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +supports-hyperlinks@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" + integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== + dependencies: + has-flag "^4.0.0" + supports-color "^7.0.0" + swap-case@^1.1.0: version "1.1.2" resolved "https://registry.yarnpkg.com/swap-case/-/swap-case-1.1.2.tgz#c39203a4587385fad3c850a0bd1bcafa081974e3" @@ -11159,6 +12194,15 @@ timed-out@^4.0.0, timed-out@^4.0.1: resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= +timeout-abort-controller@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/timeout-abort-controller/-/timeout-abort-controller-2.0.0.tgz#d6a59209132e520413092dd4b4d71eaaf5887feb" + integrity sha512-2FAPXfzTPYEgw27bQGTHc0SzrbmnU2eso4qo172zMLZzaGqeu09PFa5B2FCUHM1tflgRqPgn5KQgp6+Vex4uNA== + dependencies: + abort-controller "^3.0.0" + native-abort-controller "^1.0.4" + retimer "^3.0.0" + tiny-queue@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/tiny-queue/-/tiny-queue-0.2.1.tgz#25a67f2c6e253b2ca941977b5ef7442ef97a6046" @@ -11172,7 +12216,14 @@ title-case@^2.1.0: no-case "^2.2.0" upper-case "^1.0.3" -tmp-promise@3.0.2, tmp-promise@^3.0.2: +tmp-promise@3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.3.tgz#60a1a1cc98c988674fcbfd23b6e3367bdeac4ce7" + integrity sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ== + dependencies: + tmp "^0.2.0" + +tmp-promise@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.2.tgz#6e933782abff8b00c3119d63589ca1fb9caaa62a" integrity sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA== @@ -11292,11 +12343,35 @@ ts-invariant@^0.6.0: "@ungap/global-this" "^0.4.2" tslib "^1.9.3" +ts-node@^10.9.1: + version "10.9.1" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b" + integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw== + dependencies: + "@cspotcode/source-map-support" "^0.8.0" + "@tsconfig/node10" "^1.0.7" + "@tsconfig/node12" "^1.0.7" + "@tsconfig/node14" "^1.0.0" + "@tsconfig/node16" "^1.0.2" + acorn "^8.4.1" + acorn-walk "^8.1.1" + arg "^4.1.0" + create-require "^1.1.0" + diff "^4.0.1" + make-error "^1.1.1" + v8-compile-cache-lib "^3.0.1" + yn "3.1.1" + tslib@^1.10.0, tslib@^1.14.1, tslib@^1.9.3: version "1.14.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== +tslib@^2.0.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0: + version "2.5.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.2.tgz#1b6f07185c881557b0ffa84b111a0106989e8338" + integrity sha512-5svOrSA2w3iGFDs1HibEVBGbDrAY82bFQ3HZ3ixB+88nsbsWQoKqDRb5UBYAUPEzbBn6dAp5gRNXglySbx1MlA== + tslib@^2.0.3, tslib@~2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" @@ -11336,6 +12411,11 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== + type-is@^1.6.16, type-is@~1.6.18: version "1.6.18" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" @@ -11390,6 +12470,13 @@ ua-parser-js@^0.7.18: resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.24.tgz#8d3ecea46ed4f1f1d63ec25f17d8568105dc027c" integrity sha512-yo+miGzQx5gakzVK3QFfN0/L9uVhosXBBO7qmnk7c2iw1IhL212wfA3zbnI54B0obGwC/5NWub/iT9sReMx+Fw== +uint8arrays@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/uint8arrays/-/uint8arrays-3.1.1.tgz#2d8762acce159ccd9936057572dade9459f65ae0" + integrity sha512-+QJa8QRnbdXVpHYjLoTpJIdCTiw9Ir62nocClWuXIq2JIh4Uta0cQsTSpFL678p2CN8B+XSApwcU+pQEqVpKWg== + dependencies: + multiformats "^9.4.2" + ultron@~1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.1.1.tgz#9fe1536a10a664a65266a1e3ccf85fd36302bc9c" @@ -11450,11 +12537,6 @@ universalify@^0.1.0, universalify@^0.1.2: resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== -universalify@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-1.0.0.tgz#b61a1da173e8435b2fe3c67d29b9adf8594bd16d" - integrity sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug== - universalify@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" @@ -11528,6 +12610,11 @@ url-to-options@^1.0.1: resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= +urlpattern-polyfill@^8.0.0: + version "8.0.2" + resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-8.0.2.tgz#99f096e35eff8bf4b5a2aa7d58a1523d6ebc7ce5" + integrity sha512-Qp95D4TPJl1kC9SKigDcqgyM2VDVO4RiJc2d4qe5GrYm+zbIQCWWKAFaJNQ4BhdFeDGwBmAxqJBwWSJDb9T3BQ== + ursa-optional@~0.10.0: version "0.10.2" resolved "https://registry.yarnpkg.com/ursa-optional/-/ursa-optional-0.10.2.tgz#bd74e7d60289c22ac2a69a3c8dea5eb2817f9681" @@ -11611,6 +12698,11 @@ uuid@^8.0.0, uuid@^8.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +v8-compile-cache-lib@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" + integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== + vali-date@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/vali-date/-/vali-date-1.0.0.tgz#1b904a59609fb328ef078138420934f6b86709a6" @@ -11634,6 +12726,11 @@ varint@^5.0.0, varint@~5.0.0: resolved "https://registry.yarnpkg.com/varint/-/varint-5.0.2.tgz#5b47f8a947eb668b848e034dcfa87d0ff8a7f7a4" integrity sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow== +varint@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/varint/-/varint-6.0.0.tgz#9881eb0ce8feaea6512439d19ddf84bf551661d0" + integrity sha512-cXEIW6cfr15lFv563k4GuVuW/fiwjknytD37jIOLSdSWuOI6WnO/oKwmP2FQTU2l01LP8/M5TSAJpzUaGe3uWg== + vary@^1, vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" @@ -11692,6 +12789,11 @@ wcwidth@^1.0.1: dependencies: defaults "^1.0.3" +web-streams-polyfill@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6" + integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q== + web3-bzz@1.2.9: version "1.2.9" resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.2.9.tgz#25f8a373bc2dd019f47bf80523546f98b93c8790" @@ -12192,6 +13294,17 @@ web3@^1.0.0-beta.34: web3-shh "1.3.4" web3-utils "1.3.4" +webcrypto-core@^1.7.7: + version "1.7.7" + resolved "https://registry.yarnpkg.com/webcrypto-core/-/webcrypto-core-1.7.7.tgz#06f24b3498463e570fed64d7cab149e5437b162c" + integrity sha512-7FjigXNsBfopEj+5DV2nhNpfic2vumtjjgPmeDKk45z+MJwXKKfhPB7118Pfzrmh4jqOMST6Ch37iPAHoImg5g== + dependencies: + "@peculiar/asn1-schema" "^2.3.6" + "@peculiar/json-schema" "^1.1.12" + asn1js "^3.0.1" + pvtsutils "^1.3.2" + tslib "^2.4.0" + webidl-conversions@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-2.0.1.tgz#3bf8258f7d318c7443c36f2e169402a1a6703506" @@ -12286,6 +13399,13 @@ which@2.0.2, which@^2.0.0, which@^2.0.1: dependencies: isexe "^2.0.0" +which@^1.2.9: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + wide-align@1.1.3, wide-align@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" @@ -12293,6 +13413,13 @@ wide-align@1.1.3, wide-align@^1.1.0: dependencies: string-width "^1.0.2 || 2" +widest-line@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" + integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== + dependencies: + string-width "^4.0.0" + window-size@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.2.0.tgz#b4315bb4214a3d7058ebeee892e13fa24d98b075" @@ -12303,6 +13430,11 @@ word-wrap@~1.2.3: resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== +wordwrap@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== + wordwrap@~0.0.2: version "0.0.3" resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" @@ -12339,6 +13471,15 @@ wrap-ansi@^6.2.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" @@ -12495,23 +13636,16 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yaml@1.9.2: - version "1.9.2" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.9.2.tgz#f0cfa865f003ab707663e4f04b3956957ea564ed" - integrity sha512-HPT7cGGI0DuRcsO51qC1j9O16Dh1mZ2bnXwsi0jrSpsLz0WxOLSLXfkABVl6bZO629py3CU+OMJtpNHDLB97kg== - dependencies: - "@babel/runtime" "^7.9.2" +yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== yaml@^1.5.1: version "1.10.0" resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.0.tgz#3b593add944876077d4d683fee01081bd9fff31e" integrity sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg== -yaml@^1.7.2: - version "1.10.2" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" - integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== - yargs-parser@13.1.2, yargs-parser@^13.1.2: version "13.1.2" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" @@ -12552,6 +13686,11 @@ yargs-parser@^2.4.0: camelcase "^3.0.0" lodash.assign "^4.0.6" +yargs-parser@^21.0.0: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + yargs-unparser@1.6.1: version "1.6.1" resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.1.tgz#bd4b0ee05b4c94d058929c32cb09e3fce71d3c5f" @@ -12631,6 +13770,11 @@ yargs@^15.3.1: y18n "^4.0.0" yargs-parser "^18.1.2" +yn@3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" + integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== + yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" diff --git a/tests/runner-tests/data-source-revert/grafted.yaml b/tests/runner-tests/data-source-revert/grafted.yaml index 96703b41964..ac5fe04d971 100644 --- a/tests/runner-tests/data-source-revert/grafted.yaml +++ b/tests/runner-tests/data-source-revert/grafted.yaml @@ -5,7 +5,7 @@ schema: file: ./schema.graphql graft: # Must match the id from building `subgraph.yaml` - base: QmX8y4Vwg7pqEMa94GmuT8RRRTJNVKdQTT6Yq8Zw3Vvpd6 + base: QmR3TAJuimii5huAswW7uqSzJfsR4GmAxwfawK2DwF3YFh block: 3 dataSources: - kind: ethereum/contract diff --git a/tests/runner-tests/data-source-revert/package.json b/tests/runner-tests/data-source-revert/package.json index 813b91748ef..d6c28f38a7a 100644 --- a/tests/runner-tests/data-source-revert/package.json +++ b/tests/runner-tests/data-source-revert/package.json @@ -7,7 +7,7 @@ "deploy:test-grafted": "graph deploy test/data-source-revert-grafted grafted.yaml --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0" } } diff --git a/tests/runner-tests/data-source-revert2/package.json b/tests/runner-tests/data-source-revert2/package.json index 50c61bc7d79..4a5e20aa147 100644 --- a/tests/runner-tests/data-source-revert2/package.json +++ b/tests/runner-tests/data-source-revert2/package.json @@ -6,7 +6,7 @@ "deploy:test": "graph deploy test/data-source-revert2 --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0" } } diff --git a/tests/runner-tests/dynamic-data-source/package.json b/tests/runner-tests/dynamic-data-source/package.json index 7e31db5c444..f5396ae24eb 100644 --- a/tests/runner-tests/dynamic-data-source/package.json +++ b/tests/runner-tests/dynamic-data-source/package.json @@ -9,8 +9,8 @@ "deploy:test": "graph deploy test/dynamic-data-source --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/runner-tests/fatal-error/package.json b/tests/runner-tests/fatal-error/package.json index 2df8ce04bf0..f01a2c3a834 100644 --- a/tests/runner-tests/fatal-error/package.json +++ b/tests/runner-tests/fatal-error/package.json @@ -7,8 +7,8 @@ "deploy:test": "graph deploy test/fatal-error --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0", "solc": "^0.8.2" }, "dependencies": { diff --git a/tests/runner-tests/file-data-sources/package.json b/tests/runner-tests/file-data-sources/package.json index 5aa79b0f6b3..d8edecacb83 100644 --- a/tests/runner-tests/file-data-sources/package.json +++ b/tests/runner-tests/file-data-sources/package.json @@ -7,7 +7,7 @@ "deploy:test": "graph deploy test/file-data-sources --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0" } } diff --git a/tests/runner-tests/typename/package.json b/tests/runner-tests/typename/package.json index 4d4d46b63e6..67cfbaefd90 100644 --- a/tests/runner-tests/typename/package.json +++ b/tests/runner-tests/typename/package.json @@ -7,7 +7,7 @@ "deploy:test": "graph deploy test/typename --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, "devDependencies": { - "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", - "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" + "@graphprotocol/graph-cli": "0.50.0", + "@graphprotocol/graph-ts": "0.30.0" } } diff --git a/tests/runner-tests/yarn.lock b/tests/runner-tests/yarn.lock index df7ba6fc49a..8e618381859 100644 --- a/tests/runner-tests/yarn.lock +++ b/tests/runner-tests/yarn.lock @@ -513,13 +513,6 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/runtime@^7.9.2": - version "7.18.9" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.18.9.tgz#b4fcfce55db3d2e5e080d2490f608a3b9f407f4a" - integrity sha512-lkqXDcvlFT5rvEjiu6+QYO+1GXrEHRo2LOtS7E4GtX5ESIZOgepqsZBVIj6Pv+a6zqsya9VCgiK1KAK4BvJDAw== - dependencies: - regenerator-runtime "^0.13.4" - "@babel/template@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.12.13.tgz#530265be8a2589dbb37523844c5bcb55947fb327" @@ -577,6 +570,13 @@ lodash "^4.17.19" to-fast-properties "^2.0.0" +"@cspotcode/source-map-support@^0.8.0": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" + integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== + dependencies: + "@jridgewell/trace-mapping" "0.3.9" + "@ethersproject/abi@5.0.0-beta.153": version "5.0.0-beta.153" resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.0.0-beta.153.tgz#43a37172b33794e4562999f6e2d555b7599a8eee" @@ -861,39 +861,52 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@graphprotocol/graph-cli@https://github.com/graphprotocol/graph-cli#main": - version "0.33.0" - resolved "https://github.com/graphprotocol/graph-cli#47e075a9701680580e0e8e09c5444963224dbf5c" - dependencies: - assemblyscript "0.19.10" +"@float-capital/float-subgraph-uncrashable@^0.0.0-alpha.4": + version "0.0.0-internal-testing.5" + resolved "https://registry.yarnpkg.com/@float-capital/float-subgraph-uncrashable/-/float-subgraph-uncrashable-0.0.0-internal-testing.5.tgz#060f98440f6e410812766c5b040952d2d02e2b73" + integrity sha512-yZ0H5e3EpAYKokX/AbtplzlvSxEJY7ZfpvQyDzyODkks0hakAAlDG6fQu1SlDJMWorY7bbq1j7fCiFeTWci6TA== + dependencies: + "@rescript/std" "9.0.0" + graphql "^16.6.0" + graphql-import-node "^0.0.5" + js-yaml "^4.1.0" + +"@graphprotocol/graph-cli@0.50.0": + version "0.50.0" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.50.0.tgz#1ffef3834cc8376e64a05d6ee327c9d4eb269231" + integrity sha512-Fw46oN06ec1pf//vTPFzmyL0LRD9ed/XXfibQQClyMLfNlYAATZvz930RH3SHb2N4ZLdfKDDkY1SLgtDghtrow== + dependencies: + "@float-capital/float-subgraph-uncrashable" "^0.0.0-alpha.4" + "@oclif/core" "2.8.4" + "@whatwg-node/fetch" "^0.8.4" + assemblyscript "0.19.23" binary-install-raw "0.0.13" chalk "3.0.0" - chokidar "3.5.1" - debug "4.3.1" - docker-compose "0.23.4" + chokidar "3.5.3" + debug "4.3.4" + docker-compose "0.23.19" dockerode "2.5.8" - fs-extra "9.0.0" - glob "7.1.6" - gluegun "https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep" + fs-extra "9.1.0" + glob "9.3.5" + gluegun "5.1.2" graphql "15.5.0" - immutable "3.8.2" - ipfs-http-client "34.0.0" - jayson "3.6.6" - js-yaml "3.13.1" - node-fetch "2.6.0" - pkginfo "0.4.1" + immutable "4.2.1" + ipfs-http-client "55.0.0" + jayson "4.0.0" + js-yaml "3.14.1" prettier "1.19.1" request "2.88.2" - semver "7.3.5" + semver "7.4.0" sync-request "6.1.0" - tmp-promise "3.0.2" + tmp-promise "3.0.3" web3-eth-abi "1.7.0" which "2.0.2" - yaml "1.9.2" + yaml "1.10.2" -"@graphprotocol/graph-ts@https://github.com/graphprotocol/graph-ts#main": - version "0.28.1" - resolved "https://github.com/graphprotocol/graph-ts#4e91d2c0b695c7689aba205516d3e80fb5588454" +"@graphprotocol/graph-ts@0.30.0": + version "0.30.0" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-ts/-/graph-ts-0.30.0.tgz#591dee3c7d9fc236ad57ce0712779e94aef9a50a" + integrity sha512-h5tJqlsZXglGYM0PcBsBOqof4PT0Fr4Z3QBTYN/IjMF3VvRX2A8/bdpqaAnva+2N0uAfXXwRcwcOcW5O35yzXw== dependencies: assemblyscript "0.19.10" @@ -1192,11 +1205,52 @@ normalize-path "^2.0.1" through2 "^2.0.3" +"@ipld/dag-cbor@^7.0.0": + version "7.0.3" + resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-7.0.3.tgz#aa31b28afb11a807c3d627828a344e5521ac4a1e" + integrity sha512-1VVh2huHsuohdXC1bGJNE8WR72slZ9XE2T3wbBBq31dm7ZBatmKLLxrB+XAqafxfRFjv08RZmj/W/ZqaM13AuA== + dependencies: + cborg "^1.6.0" + multiformats "^9.5.4" + +"@ipld/dag-json@^8.0.1": + version "8.0.11" + resolved "https://registry.yarnpkg.com/@ipld/dag-json/-/dag-json-8.0.11.tgz#8d30cc2dfacb0aef04d327465d3df91e79e8b6ce" + integrity sha512-Pea7JXeYHTWXRTIhBqBlhw7G53PJ7yta3G/sizGEZyzdeEwhZRr0od5IQ0r2ZxOt1Do+2czddjeEPp+YTxDwCA== + dependencies: + cborg "^1.5.4" + multiformats "^9.5.4" + +"@ipld/dag-pb@^2.1.3": + version "2.1.18" + resolved "https://registry.yarnpkg.com/@ipld/dag-pb/-/dag-pb-2.1.18.tgz#12d63e21580e87c75fd1a2c62e375a78e355c16f" + integrity sha512-ZBnf2fuX9y3KccADURG5vb9FaOeMjFkCrNysB0PtftME/4iCTjxfaLoNq/IAh5fTqUOMXvryN6Jyka4ZGuMLIg== + dependencies: + multiformats "^9.5.4" + "@josephg/resolvable@^1.0.0": version "1.0.1" resolved "https://registry.yarnpkg.com/@josephg/resolvable/-/resolvable-1.0.1.tgz#69bc4db754d79e1a2f17a650d3466e038d94a5eb" integrity sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg== +"@jridgewell/resolve-uri@^3.0.3": + version "3.1.1" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721" + integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== + +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.4.15" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" + integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== + +"@jridgewell/trace-mapping@0.3.9": + version "0.3.9" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" + integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@nodelib/fs.scandir@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" @@ -1218,6 +1272,68 @@ "@nodelib/fs.scandir" "2.1.4" fastq "^1.6.0" +"@oclif/core@2.8.4": + version "2.8.4" + resolved "https://registry.yarnpkg.com/@oclif/core/-/core-2.8.4.tgz#7b453be6d4cd060ff4990bc8e31824a1de308354" + integrity sha512-VlFDhoAJ1RDwcpDF46wAlciWTIryapMUViACttY9GwX6Ci6Lud1awe/pC3k4jad5472XshnPQV4bHAl4a/yxpA== + dependencies: + "@types/cli-progress" "^3.11.0" + ansi-escapes "^4.3.2" + ansi-styles "^4.3.0" + cardinal "^2.1.1" + chalk "^4.1.2" + clean-stack "^3.0.1" + cli-progress "^3.12.0" + debug "^4.3.4" + ejs "^3.1.8" + fs-extra "^9.1.0" + get-package-type "^0.1.0" + globby "^11.1.0" + hyperlinker "^1.0.0" + indent-string "^4.0.0" + is-wsl "^2.2.0" + js-yaml "^3.14.1" + natural-orderby "^2.0.3" + object-treeify "^1.1.33" + password-prompt "^1.1.2" + semver "^7.3.7" + string-width "^4.2.3" + strip-ansi "^6.0.1" + supports-color "^8.1.1" + supports-hyperlinks "^2.2.0" + ts-node "^10.9.1" + tslib "^2.5.0" + widest-line "^3.1.0" + wordwrap "^1.0.0" + wrap-ansi "^7.0.0" + +"@peculiar/asn1-schema@^2.3.6": + version "2.3.6" + resolved "https://registry.yarnpkg.com/@peculiar/asn1-schema/-/asn1-schema-2.3.6.tgz#3dd3c2ade7f702a9a94dfb395c192f5fa5d6b922" + integrity sha512-izNRxPoaeJeg/AyH8hER6s+H7p4itk+03QCa4sbxI3lNdseQYCuxzgsuNK8bTXChtLTjpJz6NmXKA73qLa3rCA== + dependencies: + asn1js "^3.0.5" + pvtsutils "^1.3.2" + tslib "^2.4.0" + +"@peculiar/json-schema@^1.1.12": + version "1.1.12" + resolved "https://registry.yarnpkg.com/@peculiar/json-schema/-/json-schema-1.1.12.tgz#fe61e85259e3b5ba5ad566cb62ca75b3d3cd5339" + integrity sha512-coUfuoMeIB7B8/NMekxaDzLhaYmp0HZNPEjYRm9goRou8UZIC3z21s0sL9AWoCw4EG876QyO3kYrc61WNF9B/w== + dependencies: + tslib "^2.0.0" + +"@peculiar/webcrypto@^1.4.0": + version "1.4.3" + resolved "https://registry.yarnpkg.com/@peculiar/webcrypto/-/webcrypto-1.4.3.tgz#078b3e8f598e847b78683dc3ba65feb5029b93a7" + integrity sha512-VtaY4spKTdN5LjJ04im/d/joXuvLbQdgy5Z4DXF4MFZhQ+MTrejbNMkfZBp1Bs3O5+bFqnJgyGdPuZQflvIa5A== + dependencies: + "@peculiar/asn1-schema" "^2.3.6" + "@peculiar/json-schema" "^1.1.12" + pvtsutils "^1.3.2" + tslib "^2.5.0" + webcrypto-core "^1.7.7" + "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf" @@ -1315,6 +1431,11 @@ resolved "https://registry.yarnpkg.com/@redux-saga/types/-/types-1.1.0.tgz#0e81ce56b4883b4b2a3001ebe1ab298b84237204" integrity sha512-afmTuJrylUU/0OtqzaRkbyYFFNgCF73Bvel/sw90pvGrWIZ+vyoIJqA6eMSoA6+nb443kTmulmBtC9NerXboNg== +"@rescript/std@9.0.0": + version "9.0.0" + resolved "https://registry.yarnpkg.com/@rescript/std/-/std-9.0.0.tgz#df53f3fa5911cb4e85bd66b92e9e58ddf3e4a7e1" + integrity sha512-zGzFsgtZ44mgL4Xef2gOy1hrRVdrs9mcxCOOKZrIPsmbZW14yTkaF591GXxpQvjXiHtgZ/iA9qLyWH6oSReIxQ== + "@sindresorhus/is@^0.14.0": version "0.14.0" resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" @@ -1637,6 +1758,26 @@ xhr "^2.2.0" xtend "^4.0.1" +"@tsconfig/node10@^1.0.7": + version "1.0.9" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2" + integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA== + +"@tsconfig/node12@^1.0.7": + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" + integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== + +"@tsconfig/node14@^1.0.0": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" + integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== + +"@tsconfig/node16@^1.0.2": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.3.tgz#472eaab5f15c1ffdd7f8628bd4c4f753995ec79e" + integrity sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ== + "@types/accepts@*", "@types/accepts@^1.3.5": version "1.3.5" resolved "https://registry.yarnpkg.com/@types/accepts/-/accepts-1.3.5.tgz#c34bec115cfc746e04fe5a059df4ce7e7b391575" @@ -1674,6 +1815,13 @@ "@types/connect" "*" "@types/node" "*" +"@types/cli-progress@^3.11.0": + version "3.11.0" + resolved "https://registry.yarnpkg.com/@types/cli-progress/-/cli-progress-3.11.0.tgz#ec79df99b26757c3d1c7170af8422e0fc95eef7e" + integrity sha512-XhXhBv1R/q2ahF3BM7qT5HLzJNlIL0wbcGyZVjqOTqAybAnsLisd7gy1UCyIqpL+5Iv6XhlSyzjLCnI2sIdbCg== + dependencies: + "@types/node" "*" + "@types/concat-stream@^1.6.0": version "1.6.1" resolved "https://registry.yarnpkg.com/@types/concat-stream/-/concat-stream-1.6.1.tgz#24bcfc101ecf68e886aaedce60dfd74b632a1b74" @@ -1717,15 +1865,6 @@ "@types/qs" "*" "@types/range-parser" "*" -"@types/express-serve-static-core@^4.17.9": - version "4.17.30" - resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz#0f2f99617fa8f9696170c46152ccf7500b34ac04" - integrity sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ== - dependencies: - "@types/node" "*" - "@types/qs" "*" - "@types/range-parser" "*" - "@types/express@*", "@types/express@^4.17.12": version "4.17.13" resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.13.tgz#a76e2995728999bab51a33fabce1d705a3709034" @@ -1786,26 +1925,36 @@ "@types/koa-compose" "*" "@types/node" "*" -"@types/lodash@^4.14.159": - version "4.14.184" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.184.tgz#23f96cd2a21a28e106dc24d825d4aa966de7a9fe" - integrity sha512-RoZphVtHbxPZizt4IcILciSWiC6dcn+eZ8oX9IWEYfDMcocdd42f7NPI6fQj+6zI8y4E0L7gu2pcZKLGTRaV9Q== - "@types/long@^4.0.0": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.1.tgz#459c65fa1867dafe6a8f322c4c51695663cc55e9" integrity sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w== +"@types/long@^4.0.1": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.2.tgz#b74129719fc8d11c01868010082d483b7545591a" + integrity sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA== + "@types/mime@^1": version "1.3.2" resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== +"@types/minimatch@^3.0.4": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.5.tgz#1001cc5e6a3704b83c236027e77f2f58ea010f40" + integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ== + "@types/node@*": version "18.7.11" resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.11.tgz#486e72cfccde88da24e1f23ff1b7d8bfb64e6250" integrity sha512-KZhFpSLlmK/sdocfSAjqPETTMd0ug6HIMIAwkwUpU79olnZdQtMxpQP+G1wDzCH7na+FltSIhbaZuKdwZ8RDrw== +"@types/node@>=13.7.0": + version "18.16.3" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.16.3.tgz#6bda7819aae6ea0b386ebc5b24bdf602f1b42b01" + integrity sha512-OPs5WnnT1xkCBiuQrZA4+YAV4HEJejmHneyraIaxsbev5yCEr6KMwINNFP9wQeFIw8FWcoTqF3vQsa5CDaI+8Q== + "@types/node@^10.0.3", "@types/node@^10.1.0": version "10.17.60" resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.60.tgz#35f3d6213daed95da7f0f73e75bcc6980e90597b" @@ -1897,6 +2046,33 @@ resolved "https://registry.yarnpkg.com/@ungap/global-this/-/global-this-0.4.4.tgz#8a1b2cfcd3e26e079a847daba879308c924dd695" integrity sha512-mHkm6FvepJECMNthFuIgpAEFmPOk71UyXuIxYfjytvFTnSDBIz7jmViO+LfHI/AjrazWije0PnSP3+/NlwzqtA== +"@whatwg-node/events@^0.0.3": + version "0.0.3" + resolved "https://registry.yarnpkg.com/@whatwg-node/events/-/events-0.0.3.tgz#13a65dd4f5893f55280f766e29ae48074927acad" + integrity sha512-IqnKIDWfXBJkvy/k6tzskWTc2NK3LcqHlb+KHGCrjOCH4jfQckRX0NAiIcC/vIqQkzLYw2r2CTSwAxcrtcD6lA== + +"@whatwg-node/fetch@^0.8.4": + version "0.8.8" + resolved "https://registry.yarnpkg.com/@whatwg-node/fetch/-/fetch-0.8.8.tgz#48c6ad0c6b7951a73e812f09dd22d75e9fa18cae" + integrity sha512-CdcjGC2vdKhc13KKxgsc6/616BQ7ooDIgPeTuAiE8qfCnS0mGzcfCOoZXypQSz73nxI+GWc7ZReIAVhxoE1KCg== + dependencies: + "@peculiar/webcrypto" "^1.4.0" + "@whatwg-node/node-fetch" "^0.3.6" + busboy "^1.6.0" + urlpattern-polyfill "^8.0.0" + web-streams-polyfill "^3.2.1" + +"@whatwg-node/node-fetch@^0.3.6": + version "0.3.6" + resolved "https://registry.yarnpkg.com/@whatwg-node/node-fetch/-/node-fetch-0.3.6.tgz#e28816955f359916e2d830b68a64493124faa6d0" + integrity sha512-w9wKgDO4C95qnXZRwZTfCmLWqyRnooGjcIwG0wADWjw9/HN0p7dtvtgSvItZtUyNteEvgTrd8QojNEqV6DAGTA== + dependencies: + "@whatwg-node/events" "^0.0.3" + busboy "^1.6.0" + fast-querystring "^1.1.1" + fast-url-parser "^1.1.3" + tslib "^2.3.1" + "@wry/context@^0.5.2": version "0.5.4" resolved "https://registry.yarnpkg.com/@wry/context/-/context-0.5.4.tgz#b6c28038872e0a0e1ff14eb40b5bf4cab2ab4e06" @@ -2017,6 +2193,11 @@ acorn-globals@^1.0.4: dependencies: acorn "^2.1.0" +acorn-walk@^8.1.1: + version "8.2.0" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" + integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== + acorn@4.X: version "4.0.13" resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" @@ -2027,6 +2208,11 @@ acorn@^2.1.0, acorn@^2.4.0: resolved "https://registry.yarnpkg.com/acorn/-/acorn-2.7.0.tgz#ab6e7d9d886aaca8b085bc3312b79a198433f0e7" integrity sha1-q259nYhqrKiwhbwzEreaGYQz8Oc= +acorn@^8.4.1: + version "8.8.2" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.2.tgz#1b2f25db02af965399b9776b0c2c391276d37c4a" + integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw== + aes-js@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" @@ -2057,6 +2243,23 @@ ansi-colors@^3.2.1: resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== +ansi-colors@^4.1.1: + version "4.1.3" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" + integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== + +ansi-escapes@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" + integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== + +ansi-escapes@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== + dependencies: + type-fest "^0.21.3" + ansi-mark@^1.0.0: version "1.0.4" resolved "https://registry.yarnpkg.com/ansi-mark/-/ansi-mark-1.0.4.tgz#1cd4ba8d57f15f109d6aaf6ec9ca9786c8a4ee6c" @@ -2100,18 +2303,36 @@ ansi-styles@^3.2.0, ansi-styles@^3.2.1: dependencies: color-convert "^1.9.0" -ansi-styles@^4.0.0, ansi-styles@^4.1.0: +ansi-styles@^4.0.0, ansi-styles@^4.1.0, ansi-styles@^4.3.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== dependencies: color-convert "^2.0.1" +ansicolors@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979" + integrity sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg== + any-promise@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" integrity sha1-q8av7tzqUugJzcA3au0845Y10X8= +any-signal@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-2.1.2.tgz#8d48270de0605f8b218cf9abe8e9c6a0e7418102" + integrity sha512-B+rDnWasMi/eWcajPcCWSlYc7muXOrcYrqgyzcdKisl2H/WTlQ0gip1KyQfr0ZlxJdsuWCj/LWwQm7fhyhRfIQ== + dependencies: + abort-controller "^3.0.0" + native-abort-controller "^1.0.3" + +any-signal@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-3.0.1.tgz#49cae34368187a3472e31de28fb5cb1430caa9a6" + integrity sha512-xgZgJtKEa9YmDqXodIgl7Fl1C8yNXr8w6gXjqK3LW4GcEiYT+6AQfJSE/8SPsEpLLmcvbv8YU+qet94UewHxqg== + anymatch@~3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" @@ -2120,13 +2341,13 @@ anymatch@~3.1.1: normalize-path "^3.0.0" picomatch "^2.0.4" -apisauce@^1.0.1: - version "1.1.5" - resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-1.1.5.tgz#31d41a5cf805e401266cec67faf1a50f4aeae234" - integrity sha512-gKC8qb/bDJsPsnEXLZnXJ7gVx7dh87CEVNeIwv1dvaffnXoh5GHwac5pWR1P2broLiVj/fqFMQvLDDt/RhjiqA== +anymatch@~3.1.2: + version "3.1.3" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== dependencies: - axios "^0.21.2" - ramda "^0.25.0" + normalize-path "^3.0.0" + picomatch "^2.0.4" apisauce@^2.0.1: version "2.0.1" @@ -2136,6 +2357,13 @@ apisauce@^2.0.1: axios "^0.21.1" ramda "^0.25.0" +apisauce@^2.1.5: + version "2.1.6" + resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-2.1.6.tgz#94887f335bf3d735305fc895c8a191c9c2608a7f" + integrity sha512-MdxR391op/FucS2YQRfB/NMRyCnHEPDd4h17LRIuVYi0BpGmMhpxc0shbOpfs5ahABuBEffNCGal5EcsydbBWg== + dependencies: + axios "^0.21.4" + apollo-cache-control@^0.14.0: version "0.14.0" resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.14.0.tgz#95f20c3e03e7994e0d1bd48c59aeaeb575ed0ce7" @@ -2332,6 +2560,11 @@ are-we-there-yet@~1.1.2: delegates "^1.0.0" readable-stream "^2.0.6" +arg@^4.1.0: + version "4.1.3" + resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" + integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== + argparse@^1.0.7: version "1.0.10" resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" @@ -2339,6 +2572,11 @@ argparse@^1.0.7: dependencies: sprintf-js "~1.0.2" +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + argsarray@0.0.1, argsarray@^0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/argsarray/-/argsarray-0.0.1.tgz#6e7207b4ecdb39b0af88303fa5ae22bda8df61cb" @@ -2397,12 +2635,7 @@ asap@~2.0.3, asap@~2.0.6: resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= -asmcrypto.js@^2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/asmcrypto.js/-/asmcrypto.js-2.3.2.tgz#b9f84bd0a1fb82f21f8c29cc284a707ad17bba2e" - integrity sha512-3FgFARf7RupsZETQ1nHnhLUUvpcttcCq1iZCaVAbJZbCZ5VNRrNyvpDyHTOb0KC3llFcsyOT/a99NZcCbeiEsA== - -asn1.js@^5.0.1, asn1.js@^5.2.0: +asn1.js@^5.2.0: version "5.4.1" resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-5.4.1.tgz#11a980b84ebb91781ce35b0fdc2ee294e3783f07" integrity sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA== @@ -2419,6 +2652,15 @@ asn1@~0.2.3: dependencies: safer-buffer "~2.1.0" +asn1js@^3.0.1, asn1js@^3.0.5: + version "3.0.5" + resolved "https://registry.yarnpkg.com/asn1js/-/asn1js-3.0.5.tgz#5ea36820443dbefb51cc7f88a2ebb5b462114f38" + integrity sha512-FVnvrKJwpt9LP2lAMl8qZswRNm3T4q9CON+bxldk2iwk3FFpuwhx2FfinyitizWHsVYyaY+y5JzDR0rCMV5yTQ== + dependencies: + pvtsutils "^1.3.2" + pvutils "^1.1.3" + tslib "^2.4.0" + assemblyscript@0.19.10: version "0.19.10" resolved "https://registry.yarnpkg.com/assemblyscript/-/assemblyscript-0.19.10.tgz#7ede6d99c797a219beb4fa4614c3eab9e6343c8e" @@ -2427,6 +2669,15 @@ assemblyscript@0.19.10: binaryen "101.0.0-nightly.20210723" long "^4.0.0" +assemblyscript@0.19.23: + version "0.19.23" + resolved "https://registry.yarnpkg.com/assemblyscript/-/assemblyscript-0.19.23.tgz#16ece69f7f302161e2e736a0f6a474e6db72134c" + integrity sha512-fwOQNZVTMga5KRsfY80g7cpOl4PsFQczMwHzdtgoqLXaYhkhavufKb0sB0l3T1DUxpAufA0KNhlbpuuhZUwxMA== + dependencies: + binaryen "102.0.0-nightly.20211028" + long "^5.2.0" + source-map-support "^0.5.20" + assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" @@ -2463,12 +2714,10 @@ async@^2.0.1, async@^2.1.2, async@^2.4.0, async@^2.5.0: dependencies: lodash "^4.17.14" -async@^2.6.1, async@^2.6.2, async@^2.6.3: - version "2.6.4" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" - integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== - dependencies: - lodash "^4.17.14" +async@^3.2.3: + version "3.2.4" + resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" + integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== asynckit@^0.4.0: version "0.4.0" @@ -2507,7 +2756,7 @@ aws4@^1.8.0: resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== -axios@^0.21.1, axios@^0.21.2: +axios@^0.21.1, axios@^0.21.4: version "0.21.4" resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== @@ -2792,6 +3041,11 @@ binaryen@101.0.0-nightly.20210723: resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-101.0.0-nightly.20210723.tgz#b6bb7f3501341727681a03866c0856500eec3740" integrity sha512-eioJNqhHlkguVSbblHOtLqlhtC882SOEPKmNFZaDuz1hzQjolxZ+eu3/kaS10n3sGPONsIZsO7R9fR00UyhEUA== +binaryen@102.0.0-nightly.20211028: + version "102.0.0-nightly.20211028" + resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-102.0.0-nightly.20211028.tgz#8f1efb0920afd34509e342e37f84313ec936afb2" + integrity sha512-GCJBVB5exbxzzvyt8MGDv/MeUjs6gkXDvf4xOIItRBptYl0Tz5sm1o/uG95YK0L0VeG5ajDu3hRtkBP2kzqC5w== + bindings@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" @@ -2799,13 +3053,6 @@ bindings@^1.5.0: dependencies: file-uri-to-path "1.0.0" -bip66@^1.1.5: - version "1.1.5" - resolved "https://registry.yarnpkg.com/bip66/-/bip66-1.1.5.tgz#01fa8748785ca70955d5011217d1b3139969ca22" - integrity sha512-nemMHz95EmS38a26XbbdxIYj5csHd3RMP3H5bwQknX0WYHF01qhpufP42mLOwVICuH2JmhIhXiWs89MfUGL7Xw== - dependencies: - safe-buffer "^5.0.1" - bl@^1.0.0: version "1.2.3" resolved "https://registry.yarnpkg.com/bl/-/bl-1.2.3.tgz#1e8dd80142eac80d7158c9dccc047fb620e035e7" @@ -2814,27 +3061,18 @@ bl@^1.0.0: readable-stream "^2.3.5" safe-buffer "^5.1.1" -bl@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/bl/-/bl-3.0.1.tgz#1cbb439299609e419b5a74d7fce2f8b37d8e5c6f" - integrity sha512-jrCW5ZhfQ/Vt07WX1Ngs+yn9BDqPL/gw28S7s9H6QK/gupnizNzJAss5akW20ISgOrbLTlXOOCTJeNUQqruAWQ== - dependencies: - readable-stream "^3.0.1" - -bl@^4.0.3: - version "4.1.0" - resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" - integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== - dependencies: - buffer "^5.5.0" - inherits "^2.0.4" - readable-stream "^3.4.0" - blakejs@^1.1.0: version "1.2.1" resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" integrity sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ== +blob-to-it@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/blob-to-it/-/blob-to-it-1.0.4.tgz#f6caf7a4e90b7bb9215fa6a318ed6bd8ad9898cb" + integrity sha512-iCmk0W4NdbrWgRRuxOriU8aM5ijeVLI61Zulsmg/lUHNr7pYjoj+U77opLefNagevtrrbMt3JQ5Qip7ar178kA== + dependencies: + browser-readablestream-to-it "^1.0.3" + bluebird@^3.4.7, bluebird@^3.5.0: version "3.7.2" resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" @@ -2888,19 +3126,6 @@ boolbase@^1.0.0, boolbase@~1.0.0: resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= -borc@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/borc/-/borc-2.1.2.tgz#6ce75e7da5ce711b963755117dd1b187f6f8cf19" - integrity sha512-Sy9eoUi4OiKzq7VovMn246iTo17kzuyHJKomCfpWMlI6RpfN1gk95w7d7gH264nApVLg0HZfcpz62/g4VH1Y4w== - dependencies: - bignumber.js "^9.0.0" - buffer "^5.5.0" - commander "^2.15.0" - ieee754 "^1.1.13" - iso-url "~0.4.7" - json-text-sequence "~0.1.0" - readable-stream "^3.6.0" - brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -2909,6 +3134,13 @@ brace-expansion@^1.1.7: balanced-match "^1.0.0" concat-map "0.0.1" +brace-expansion@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" + integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + dependencies: + balanced-match "^1.0.0" + braces@^1.8.2: version "1.8.5" resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" @@ -2918,7 +3150,7 @@ braces@^1.8.2: preserve "^0.2.0" repeat-element "^1.1.2" -braces@^3.0.1, braces@~3.0.2: +braces@^3.0.1, braces@^3.0.2, braces@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== @@ -2930,12 +3162,17 @@ brorand@^1.0.1, brorand@^1.1.0: resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= +browser-readablestream-to-it@^1.0.0, browser-readablestream-to-it@^1.0.1, browser-readablestream-to-it@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/browser-readablestream-to-it/-/browser-readablestream-to-it-1.0.3.tgz#ac3e406c7ee6cdf0a502dd55db33bab97f7fba76" + integrity sha512-+12sHB+Br8HIh6VAMVEG5r3UXCyESIgDW7kzk3BjIXa43DVqVwL7GC5TW3jeh+72dtcH99pPVpw0X8i0jt+/kw== + browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== -browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.0.6, browserify-aes@^1.2.0: +browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== @@ -3000,7 +3237,7 @@ browserslist@^4.14.5, browserslist@^4.16.3: escalade "^3.1.1" node-releases "^1.1.70" -bs58@^4.0.0, bs58@^4.0.1: +bs58@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/bs58/-/bs58-4.0.1.tgz#be161e76c354f6f788ae4071f63f34e8c4f0a42a" integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== @@ -3071,7 +3308,7 @@ buffer-xor@^1.0.3: resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== -buffer@^5.0.5, buffer@^5.2.1, buffer@^5.4.2, buffer@^5.4.3, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: +buffer@^5.0.5, buffer@^5.2.1, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: version "5.7.1" resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== @@ -3079,7 +3316,7 @@ buffer@^5.0.5, buffer@^5.2.1, buffer@^5.4.2, buffer@^5.4.3, buffer@^5.5.0, buffe base64-js "^1.3.1" ieee754 "^1.1.13" -buffer@^6.0.3: +buffer@^6.0.1, buffer@^6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== @@ -3094,11 +3331,6 @@ bufferutil@^4.0.1: dependencies: node-gyp-build "^4.2.0" -builtin-status-codes@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" - integrity sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ== - busboy@^0.3.1: version "0.3.1" resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.3.1.tgz#170899274c5bf38aae27d5c62b71268cd585fd1b" @@ -3106,6 +3338,13 @@ busboy@^0.3.1: dependencies: dicer "0.3.0" +busboy@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" + integrity sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA== + dependencies: + streamsearch "^1.1.0" + bytes@3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" @@ -3186,6 +3425,14 @@ caniuse-lite@^1.0.30001181: resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001197.tgz#47ad15b977d2f32b3ec2fe2b087e0c50443771db" integrity sha512-8aE+sqBqtXz4G8g35Eg/XEaFr2N7rd/VQ6eABGBmNtcB8cN6qNJhMi6oSFy4UWWZgqgL3filHT8Nha4meu3tsw== +cardinal@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-2.1.1.tgz#7cc1055d822d212954d07b085dea251cc7bc5505" + integrity sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw== + dependencies: + ansicolors "~0.3.2" + redeyed "~2.1.0" + caseless@^0.12.0, caseless@~0.12.0: version "0.12.0" resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" @@ -3199,6 +3446,11 @@ cbor@^5.1.0: bignumber.js "^9.0.1" nofilter "^1.0.4" +cborg@^1.5.4, cborg@^1.6.0: + version "1.10.1" + resolved "https://registry.yarnpkg.com/cborg/-/cborg-1.10.1.tgz#24cfe52c69ec0f66f95e23dc57f2086954c8d718" + integrity sha512-et6Qm8MOUY2kCWa5GKk2MlBVoPjHv0hQBmlzI/Z7+5V3VJCeIkGehIB3vWknNsm2kOkAIs6wEKJFJo8luWQQ/w== + chalk@1.1.3, chalk@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" @@ -3235,6 +3487,14 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^4.0.2, chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + change-case@3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/change-case/-/change-case-3.0.2.tgz#fd48746cce02f03f0a672577d1d3a8dc2eceb037" @@ -3330,20 +3590,20 @@ chokidar@3.4.2: optionalDependencies: fsevents "~2.1.2" -chokidar@3.5.1: - version "3.5.1" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.1.tgz#ee9ce7bbebd2b79f49f304799d5468e31e14e68a" - integrity sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw== +chokidar@3.5.3: + version "3.5.3" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" + integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== dependencies: - anymatch "~3.1.1" + anymatch "~3.1.2" braces "~3.0.2" - glob-parent "~5.1.0" + glob-parent "~5.1.2" is-binary-path "~2.1.0" is-glob "~4.0.1" normalize-path "~3.0.0" - readdirp "~3.5.0" + readdirp "~3.6.0" optionalDependencies: - fsevents "~2.3.1" + fsevents "~2.3.2" chownr@^1.0.1, chownr@^1.1.4: version "1.1.4" @@ -3355,7 +3615,7 @@ chownr@^2.0.0: resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== -cids@^0.7.1, cids@~0.7.0, cids@~0.7.1: +cids@^0.7.1: version "0.7.5" resolved "https://registry.yarnpkg.com/cids/-/cids-0.7.5.tgz#60a08138a99bfb69b6be4ceb63bfef7a396b28b2" integrity sha512-zT7mPeghoWAu+ppn8+BS1tQ5qGmbMfB4AregnQjA/qHY3GC1m1ptI9GkWNlgeu38r7CuRdXB47uY2XgAYt6QVA== @@ -3366,17 +3626,6 @@ cids@^0.7.1, cids@~0.7.0, cids@~0.7.1: multicodec "^1.0.0" multihashes "~0.4.15" -cids@~0.8.0: - version "0.8.3" - resolved "https://registry.yarnpkg.com/cids/-/cids-0.8.3.tgz#aaf48ac8ed857c3d37dad94d8db1d8c9407b92db" - integrity sha512-yoXTbV3llpm+EBGWKeL9xKtksPE/s6DPoDSY4fn8I8TEW1zehWXPSB0pwAXVDlLaOlrw+sNynj995uD9abmPhA== - dependencies: - buffer "^5.6.0" - class-is "^1.1.0" - multibase "^1.0.0" - multicodec "^1.0.1" - multihashes "^1.0.1" - cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" @@ -3390,6 +3639,13 @@ class-is@^1.1.0: resolved "https://registry.yarnpkg.com/class-is/-/class-is-1.1.0.tgz#9d3c0fba0440d211d843cec3dedfa48055005825" integrity sha512-rhjH9AG1fvabIDoGRVH587413LPjTZgmDF9fOFCbFJQV4yuocX1mHxxvXI4g3cGwbVY9wAYIoKlg1N79frJKQw== +clean-stack@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-3.0.1.tgz#155bf0b2221bf5f4fba89528d24c5953f17fe3a8" + integrity sha512-lR9wNiMRcVQjSB3a7xXGLuz4cr4wJuuXlaAEbRutGowQTmlp7R72/DOgN21e8jdwblMWl9UOJMJXarX94pzKdg== + dependencies: + escape-string-regexp "4.0.0" + cli-cursor@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" @@ -3404,6 +3660,13 @@ cli-cursor@^3.1.0: dependencies: restore-cursor "^3.1.0" +cli-progress@^3.12.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.12.0.tgz#807ee14b66bcc086258e444ad0f19e7d42577942" + integrity sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A== + dependencies: + string-width "^4.2.3" + cli-spinners@^2.0.0: version "2.5.0" resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.5.0.tgz#12763e47251bf951cb75c201dfa58ff1bcb2d047" @@ -3414,6 +3677,16 @@ cli-spinners@^2.2.0: resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.7.0.tgz#f815fd30b5f9eaac02db604c7a231ed7cb2f797a" integrity sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw== +cli-table3@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.0.tgz#b7b1bc65ca8e7b5cef9124e13dc2b21e2ce4faee" + integrity sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ== + dependencies: + object-assign "^4.1.0" + string-width "^4.2.0" + optionalDependencies: + colors "^1.1.2" + cli-table3@~0.5.0: version "0.5.1" resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.5.1.tgz#0252372d94dfc40dbd8df06005f48f31f656f202" @@ -3527,12 +3800,7 @@ colorette@^1.2.1: resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.2.tgz#cbcc79d5e99caea2dbf10eb3a26fd8b3e6acfa94" integrity sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w== -colors@1.3.3: - version "1.3.3" - resolved "https://registry.yarnpkg.com/colors/-/colors-1.3.3.tgz#39e005d546afe01e01f9c4ca8fa50f686a01205d" - integrity sha512-mmGt/1pZqYRjMxB1axhTo16/snVZ5krrKkcmMeVKxzECMMXoCgnvTPp10QgHfcbQZw8Dq2jMNG6je4JlWU0gWg== - -colors@^1.1.2, colors@^1.3.3: +colors@1.4.0, colors@^1.1.2, colors@^1.3.3: version "1.4.0" resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== @@ -3554,7 +3822,7 @@ commander@3.0.2: resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== -commander@^2.15.0, commander@^2.20.3: +commander@^2.20.3: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== @@ -3588,13 +3856,6 @@ concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: readable-stream "^2.2.2" typedarray "^0.0.6" -"concat-stream@github:hugomrdias/concat-stream#feat/smaller": - version "2.0.0" - resolved "https://codeload.github.com/hugomrdias/concat-stream/tar.gz/057bc7b5d6d8df26c8cf00a3f151b6721a0a8034" - dependencies: - inherits "^2.0.3" - readable-stream "^3.0.2" - configstore@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/configstore/-/configstore-4.0.0.tgz#5933311e95d3687efb592c528b922d9262d227e7" @@ -3710,6 +3971,17 @@ cosmiconfig@6.0.0: path-type "^4.0.0" yaml "^1.7.2" +cosmiconfig@7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.0.1.tgz#714d756522cace867867ccb4474c5d01bbae5d6d" + integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.2.1" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.10.0" + create-ecdh@^4.0.0: version "4.0.4" resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.4.tgz#d6e7f4bffa66736085a0762fd3a632684dabcc4e" @@ -3741,6 +4013,11 @@ create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: safe-buffer "^5.0.1" sha.js "^2.4.8" +create-require@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" + integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== + cross-fetch@3.0.6, cross-fetch@^3.0.4: version "3.0.6" resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.0.6.tgz#3a4040bc8941e653e0e9cf17f29ebcd177d3365c" @@ -3764,7 +4041,7 @@ cross-fetch@^2.1.0, cross-fetch@^2.1.1: node-fetch "2.1.2" whatwg-fetch "2.0.4" -cross-spawn@^7.0.0: +cross-spawn@7.0.3, cross-spawn@^7.0.0, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -3773,6 +4050,17 @@ cross-spawn@^7.0.0: shebang-command "^2.0.0" which "^2.0.1" +cross-spawn@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" + integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + crypto-browserify@3.12.0: version "3.12.0" resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" @@ -3908,10 +4196,10 @@ debug@4.1.1: dependencies: ms "^2.1.1" -debug@4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" - integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== +debug@4.3.4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" @@ -3922,13 +4210,6 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" -debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: - version "4.3.4" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" - integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== - dependencies: - ms "2.1.2" - decamelize@^1.1.1, decamelize@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" @@ -4013,11 +4294,6 @@ delegates@^1.0.0: resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= -delimit-stream@0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/delimit-stream/-/delimit-stream-0.1.0.tgz#9b8319477c0e5f8aeb3ce357ae305fc25ea1cd2b" - integrity sha512-a02fiQ7poS5CnjiJBAsjGLPp5EwVoGHNeu9sziBd9huppRfsAFIpv5zNLv0V1gbop53ilngAf5Kf331AwcoRBQ== - depd@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" @@ -4068,11 +4344,6 @@ detect-newline@2.X: resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2" integrity sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I= -detect-node@^2.0.4: - version "2.1.0" - resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.1.0.tgz#c9c70775a49c3d03bc2c06d9a73be550f978f8b1" - integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g== - dicer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/dicer/-/dicer-0.3.0.tgz#eacd98b3bfbf92e8ab5c2fdb71aaac44bb06b872" @@ -4080,7 +4351,7 @@ dicer@0.3.0: dependencies: streamsearch "0.1.2" -diff@4.0.2: +diff@4.0.2, diff@^4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== @@ -4101,10 +4372,21 @@ dir-glob@^3.0.1: dependencies: path-type "^4.0.0" -docker-compose@0.23.4: - version "0.23.4" - resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.4.tgz#43bcabcde55a6ba2873b52fe0ccd99dd8fdceba8" - integrity sha512-yWdXby9uQ8o4syOfvoSJ9ZlTnLipvUmDn59uaYY5VGIUSUAfMPPGqE1DE3pOCnfSg9Tl9UOOFO0PCSAzuIHmuA== +dns-over-http-resolver@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/dns-over-http-resolver/-/dns-over-http-resolver-1.2.3.tgz#194d5e140a42153f55bb79ac5a64dd2768c36af9" + integrity sha512-miDiVSI6KSNbi4SVifzO/reD8rMnxgrlnkrlkugOLQpWQTe2qMdHsZp5DmfKjxNE+/T3VAAYLQUZMv9SMr6+AA== + dependencies: + debug "^4.3.1" + native-fetch "^3.0.0" + receptacle "^1.3.2" + +docker-compose@0.23.19: + version "0.23.19" + resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.19.tgz#9947726e2fe67bdfa9e8efe1ff15aa0de2e10eb8" + integrity sha512-v5vNLIdUqwj4my80wxFDkNH+4S85zsRuH29SO7dCWVWPCMt/ohZBsGN6g6KXWifT0pzQ7uOxqEKCYCDPJ8Vz4g== + dependencies: + yaml "^1.10.2" docker-modem@^1.0.8: version "1.0.9" @@ -4230,15 +4512,6 @@ double-ended-queue@2.1.0-0: resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c" integrity sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw= -drbg.js@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/drbg.js/-/drbg.js-1.0.1.tgz#3e36b6c42b37043823cdbc332d58f31e2445480b" - integrity sha512-F4wZ06PvqxYLFEZKkFxTDcns9oFNk34hvmJSEwdzsxVQ8YI5YaxtACgQatkYgv2VI2CFkUd2Y+xosPQnHv809g== - dependencies: - browserify-aes "^1.0.6" - create-hash "^1.1.2" - create-hmac "^1.1.4" - duplexer3@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" @@ -4267,11 +4540,32 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== +ejs@3.1.6: + version "3.1.6" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.6.tgz#5bfd0a0689743bb5268b3550cceeebbc1702822a" + integrity sha512-9lt9Zse4hPucPkoP7FHDF0LQAlGyF9JVpnClFLFH3aSSbxmyoqINRpp/9wePWJTUl4KOQwRL72Iw3InHPDkoGw== + dependencies: + jake "^10.6.1" + ejs@^2.6.1: version "2.7.4" resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.7.4.tgz#48661287573dcc53e366c7a1ae52c3a120eec9ba" integrity sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA== +ejs@^3.1.8: + version "3.1.9" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.9.tgz#03c9e8777fe12686a9effcef22303ca3d8eeb361" + integrity sha512-rC+QVNMJWv+MtPgkt0y+0rVEIdbtxVADApW9JXrUVlzHetgcyczP/E7DJmWJ4fJCZF2cPcBk0laWO9ZHMG3DmQ== + dependencies: + jake "^10.8.5" + +electron-fetch@^1.7.2: + version "1.9.1" + resolved "https://registry.yarnpkg.com/electron-fetch/-/electron-fetch-1.9.1.tgz#e28bfe78d467de3f2dec884b1d72b8b05322f30f" + integrity sha512-M9qw6oUILGVrcENMSRRefE1MbHPIz0h79EKIeJWK9v563aT9Qkh8aEHPO1H5vi970wPirNY+jO9OpFoLiMsMGA== + dependencies: + encoding "^0.1.13" + electron-to-chromium@^1.3.649: version "1.3.683" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.683.tgz#2c9ab53ff5275cf3dd49278af714d0f8975204f7" @@ -4333,14 +4627,14 @@ encoding-down@^6.3.0: level-codec "^9.0.0" level-errors "^2.0.0" -encoding@^0.1.11: +encoding@^0.1.11, encoding@^0.1.13: version "0.1.13" resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.13.tgz#56574afdd791f54a8e9b2785c0582a2d26210fa9" integrity sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A== dependencies: iconv-lite "^0.6.2" -end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: +end-of-stream@^1.0.0, end-of-stream@^1.1.0: version "1.4.4" resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== @@ -4361,6 +4655,13 @@ enquirer@2.3.4: dependencies: ansi-colors "^3.2.1" +enquirer@2.3.6: + version "2.3.6" + resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" + integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg== + dependencies: + ansi-colors "^4.1.1" + entities@1.0: version "1.0.0" resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26" @@ -4381,15 +4682,10 @@ entities@~2.1.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" integrity sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w== -err-code@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/err-code/-/err-code-1.1.2.tgz#06e0116d3028f6aef4806849eb0ea6a748ae6960" - integrity sha512-CJAN+O0/yA1CKfRn9SXOGctSpEM7DCon/r/5r2eXFMY2zCCJBasFhcM5I+1kh3Ap11FsQCX+vGHceNPvpWKhoA== - -err-code@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" - integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA== +err-code@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/err-code/-/err-code-3.0.1.tgz#a444c7b992705f2b120ee320b09972eef331c920" + integrity sha512-GiaH0KJUewYok+eeY05IIgjtAe4Yltygk9Wqp1V5yVWLdhf0hYZchRjNIT9bb0mSwRcIusT3cx7PJUf3zEIfUA== errno@~0.1.1: version "0.1.8" @@ -4573,7 +4869,7 @@ esdoc@^1.0.4: minimist "1.2.0" taffydb "2.7.3" -esprima@^4.0.0, esprima@^4.0.1: +esprima@^4.0.0, esprima@^4.0.1, esprima@~4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== @@ -4938,6 +5234,21 @@ evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: md5.js "^1.3.4" safe-buffer "^5.1.1" +execa@5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + execa@^3.0.0: version "3.4.0" resolved "https://registry.yarnpkg.com/execa/-/execa-3.4.0.tgz#c08ed4550ef65d858fac269ffc8572446f37eb89" @@ -4968,11 +5279,6 @@ expand-range@^1.8.1: dependencies: fill-range "^2.1.0" -explain-error@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/explain-error/-/explain-error-1.0.4.tgz#a793d3ac0cad4c6ab571e9968fbbab6cb2532929" - integrity sha512-/wSgNMxFusiYRy1rd19LT2SQlIXDppHpumpWo06wxjflD1OYxDLbl6rMVw+U3bxD5Nuhex4TKqv9Aem4D0lVzQ== - express@^4.0.0, express@^4.14.0, express@^4.17.1: version "4.18.2" resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" @@ -5075,11 +5381,21 @@ fast-check@^2.12.1: dependencies: pure-rand "^4.1.1" +fast-decode-uri-component@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fast-decode-uri-component/-/fast-decode-uri-component-1.0.1.tgz#46f8b6c22b30ff7a81357d4f59abfae938202543" + integrity sha512-WKgKWg5eUxvRZGwW8FvfbaH7AXSh2cL+3j5fMGzUMCxWBJ3dV3a7Wz8y2f/uQ0e3B6WmodD3oS54jTQ9HVTIIg== + fast-deep-equal@^3.1.1: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== +fast-fifo@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/fast-fifo/-/fast-fifo-1.2.0.tgz#2ee038da2468e8623066dee96958b0c1763aa55a" + integrity sha512-NcvQXt7Cky1cNau15FWy64IjuO8X0JijhTBBrJj1YlxlDfRkJXNaK9RFUjwpfDPzMdv7wB38jr53l9tkNLxnWg== + fast-future@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/fast-future/-/fast-future-1.0.2.tgz#8435a9aaa02d79248d17d704e76259301d99280a" @@ -5097,6 +5413,17 @@ fast-glob@^3.1.1: micromatch "^4.0.2" picomatch "^2.2.1" +fast-glob@^3.2.9: + version "3.2.12" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.12.tgz#7f39ec99c2e6ab030337142da9e0c18f37afae80" + integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + fast-json-stable-stringify@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" @@ -5107,11 +5434,25 @@ fast-levenshtein@~2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= +fast-querystring@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/fast-querystring/-/fast-querystring-1.1.1.tgz#f4c56ef56b1a954880cfd8c01b83f9e1a3d3fda2" + integrity sha512-qR2r+e3HvhEFmpdHMv//U8FnFlnYjaC6QKDuaXALDkw2kvHO8WDjxH+f/rHGR4Me4pnk8p9JAkRNTjYHAKRn2Q== + dependencies: + fast-decode-uri-component "^1.0.1" + fast-safe-stringify@^2.0.6: version "2.0.7" resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz#124aa885899261f68aedb42a7c080de9da608743" integrity sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA== +fast-url-parser@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d" + integrity sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ== + dependencies: + punycode "^1.3.2" + fastq@^1.6.0: version "1.11.0" resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.0.tgz#bb9fb955a07130a918eb63c1f5161cc32a5d0858" @@ -5171,6 +5512,13 @@ file-uri-to-path@1.0.0: resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== +filelist@^1.0.1, filelist@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5" + integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q== + dependencies: + minimatch "^5.0.1" + filename-regex@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" @@ -5257,11 +5605,6 @@ flat@^4.1.0: dependencies: is-buffer "~2.0.3" -flatmap@0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/flatmap/-/flatmap-0.0.3.tgz#1f18a4d938152d495965f9c958d923ab2dd669b4" - integrity sha512-OuR+o7kHVe+x9RtIujPay7Uw3bvDZBZFSBXClEphZuSDLmZTqMdclasf4vFSsogC8baDz0eaC2NdO/2dlXHBKQ== - follow-redirects@^1.12.1: version "1.14.8" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.8.tgz#016996fb9a11a100566398b1c6839337d7bfa8fc" @@ -5371,15 +5714,15 @@ fs-extra@5.0.0: jsonfile "^4.0.0" universalify "^0.1.0" -fs-extra@9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.0.0.tgz#b6afc31036e247b2466dc99c29ae797d5d4580a3" - integrity sha512-pmEYSk3vYsG/bF651KPUXZ+hvjpgWYw/Gc7W9NFUe3ZVLczKKWIij3IKpOrQcdw4TILtibFslZ0UmR8Vvzig4g== +fs-extra@9.1.0, fs-extra@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== dependencies: at-least-node "^1.0.0" graceful-fs "^4.2.0" jsonfile "^6.0.1" - universalify "^1.0.0" + universalify "^2.0.0" fs-extra@^0.30.0: version "0.30.0" @@ -5401,15 +5744,13 @@ fs-extra@^4.0.2: jsonfile "^4.0.0" universalify "^0.1.0" -fs-extra@^9.1.0: - version "9.1.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" - integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== +fs-jetpack@4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/fs-jetpack/-/fs-jetpack-4.3.1.tgz#cdfd4b64e6bfdec7c7dc55c76b39efaa7853bb20" + integrity sha512-dbeOK84F6BiQzk2yqqCVwCPWTxAvVGJ3fMQc6E2wuEohS28mR6yHngbrKuVCK1KHRx/ccByDylqu4H5PCP2urQ== dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^2.0.0" + minimatch "^3.0.2" + rimraf "^2.6.3" fs-jetpack@^2.2.2: version "2.4.0" @@ -5443,7 +5784,7 @@ fsevents@~2.1.2: resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== -fsevents@~2.3.1: +fsevents@~2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== @@ -5500,6 +5841,16 @@ get-intrinsic@^1.1.0, get-intrinsic@^1.1.1: has "^1.0.3" has-symbols "^1.0.1" +get-iterator@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-iterator/-/get-iterator-1.0.2.tgz#cd747c02b4c084461fac14f48f6b45a80ed25c82" + integrity sha512-v+dm9bNVfOYsY1OrhaCrmyOcYoSeVvbt+hHZ0Au+T+p1y+0Uyj9aMaGIeUTT6xdpRbWzDeYKvfOslPhggQMcsg== + +get-package-type@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" + integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== + get-params@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/get-params/-/get-params-0.1.2.tgz#bae0dfaba588a0c60d7834c0d8dc2ff60eeef2fe" @@ -5529,6 +5880,11 @@ get-stream@^5.0.0, get-stream@^5.1.0: dependencies: pump "^3.0.0" +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + get-symbol-description@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" @@ -5567,7 +5923,7 @@ glob-parent@^3.0.0: is-glob "^3.1.0" path-dirname "^1.0.0" -glob-parent@^5.1.0, glob-parent@~5.1.0: +glob-parent@^5.1.0, glob-parent@^5.1.2, glob-parent@~5.1.0, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== @@ -5600,6 +5956,16 @@ glob@7.1.6, glob@^7.1.1: once "^1.3.0" path-is-absolute "^1.0.0" +glob@9.3.5: + version "9.3.5" + resolved "https://registry.yarnpkg.com/glob/-/glob-9.3.5.tgz#ca2ed8ca452781a3009685607fdf025a899dfe21" + integrity sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q== + dependencies: + fs.realpath "^1.0.0" + minimatch "^8.0.2" + minipass "^4.2.4" + path-scurry "^1.6.1" + glob@^5.0.3: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -5653,21 +6019,33 @@ globby@11.0.2: merge2 "^1.3.0" slash "^3.0.0" -gluegun@^4.6.1: - version "4.6.1" - resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" - integrity sha512-Jd5hV1Uku2rjBg59mYA/bnwLwynK7u9A1zmK/LIb/p5d3pzjDCKRjWFuxZXyPwl9rsvKGhJUQxkFo2HEy8crKQ== +globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== dependencies: - apisauce "^2.0.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + +gluegun@5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.1.2.tgz#ffa0beda0fb6bbc089a867157b08602beae2c8cf" + integrity sha512-Cwx/8S8Z4YQg07a6AFsaGnnnmd8mN17414NcPS3OoDtZRwxgsvwRNJNg69niD6fDa8oNwslCG0xH7rEpRNNE/g== + dependencies: + apisauce "^2.1.5" app-module-path "^2.2.0" - cli-table3 "~0.5.0" - colors "^1.3.3" - cosmiconfig "6.0.0" - cross-spawn "^7.0.0" - ejs "^2.6.1" - enquirer "2.3.4" - execa "^3.0.0" - fs-jetpack "^2.2.2" + cli-table3 "0.6.0" + colors "1.4.0" + cosmiconfig "7.0.1" + cross-spawn "7.0.3" + ejs "3.1.6" + enquirer "2.3.6" + execa "5.1.1" + fs-jetpack "4.3.1" lodash.camelcase "^4.3.0" lodash.kebabcase "^4.1.1" lodash.lowercase "^4.3.0" @@ -5683,21 +6061,21 @@ gluegun@^4.6.1: lodash.trimstart "^4.5.1" lodash.uppercase "^4.3.0" lodash.upperfirst "^4.3.1" - ora "^4.0.0" + ora "4.0.2" pluralize "^8.0.0" - ramdasauce "^2.1.0" - semver "^7.0.0" - which "^2.0.0" - yargs-parser "^16.1.0" + semver "7.3.5" + which "2.0.2" + yargs-parser "^21.0.0" -"gluegun@https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep": - version "4.3.1" - resolved "https://github.com/edgeandnode/gluegun#b34b9003d7bf556836da41b57ef36eb21570620a" +gluegun@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" + integrity sha512-Jd5hV1Uku2rjBg59mYA/bnwLwynK7u9A1zmK/LIb/p5d3pzjDCKRjWFuxZXyPwl9rsvKGhJUQxkFo2HEy8crKQ== dependencies: - apisauce "^1.0.1" + apisauce "^2.0.1" app-module-path "^2.2.0" cli-table3 "~0.5.0" - colors "1.3.3" + colors "^1.3.3" cosmiconfig "6.0.0" cross-spawn "^7.0.0" ejs "^2.6.1" @@ -5782,6 +6160,11 @@ graphql-extensions@^0.15.0: apollo-server-env "^3.1.0" apollo-server-types "^0.9.0" +graphql-import-node@^0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/graphql-import-node/-/graphql-import-node-0.0.5.tgz#caf76a6cece10858b14f27cce935655398fc1bf0" + integrity sha512-OXbou9fqh9/Lm7vwXT0XoRN9J5+WCYKnbiTalgFDvkQERITRmcfncZs6aVABedd5B85yQU5EULS4a5pnbpuI0Q== + graphql-subscriptions@^1.0.0: version "1.2.1" resolved "https://registry.yarnpkg.com/graphql-subscriptions/-/graphql-subscriptions-1.2.1.tgz#2142b2d729661ddf967b7388f7cf1dd4cf2e061d" @@ -5864,6 +6247,11 @@ graphql@15.5.0, graphql@^15.3.0: resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.5.0.tgz#39d19494dbe69d1ea719915b578bf920344a69d5" integrity sha512-OmaM7y0kaK31NKG31q4YbD2beNYa6jBBKtMFT6gLYJljHLJr42IqJ8KX08u3Li/0ifzTU5HjmoOOrwa5BRLeDA== +graphql@^16.6.0: + version "16.6.0" + resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.6.0.tgz#c2dcffa4649db149f6282af726c8c83f1c7c5fdb" + integrity sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw== + growl@1.10.5: version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" @@ -6000,11 +6388,6 @@ header-case@^1.0.0: no-case "^2.2.0" upper-case "^1.1.3" -hi-base32@~0.5.0: - version "0.5.1" - resolved "https://registry.yarnpkg.com/hi-base32/-/hi-base32-0.5.1.tgz#1279f2ddae2673219ea5870c2121d2a33132857e" - integrity sha512-EmBBpvdYh/4XxsnUybsPag6VikPYnN30td+vQk+GI3qpahVEG9+gTkG0aXVxTjBqQ5T6ijbWIu77O+C5WFWsnA== - highlight.js@^10.4.0, highlight.js@^10.4.1: version "10.6.0" resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.6.0.tgz#0073aa71d566906965ba6e1b7be7b2682f5e18b6" @@ -6140,6 +6523,16 @@ human-signals@^1.1.1: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +hyperlinker@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/hyperlinker/-/hyperlinker-1.0.0.tgz#23dc9e38a206b208ee49bc2d6c8ef47027df0c0e" + integrity sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ== + ice-cap@0.0.4: version "0.0.4" resolved "https://registry.yarnpkg.com/ice-cap/-/ice-cap-0.0.4.tgz#8a6d31ab4cac8d4b56de4fa946df3352561b6e18" @@ -6186,6 +6579,11 @@ ignore@^5.1.4: resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== +ignore@^5.2.0: + version "5.2.4" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" + integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + immediate@3.0.6: version "3.0.6" resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.0.6.tgz#9db1dbd0faf8de6fbe0f5dd5e56bb606280de69b" @@ -6201,17 +6599,17 @@ immediate@~3.2.3: resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" integrity sha1-0UD6j2FGWb1lQSMwl92qwlzdmRw= -immutable@3.8.2: - version "3.8.2" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" - integrity sha1-wkOZUUVbs5kT2vKBN28VMOEErfM= +immutable@4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.2.1.tgz#8a4025691018c560a40c67e43d698f816edc44d4" + integrity sha512-7WYV7Q5BTs0nlQm7tl92rDYYoyELLKHoDMBKhrxEoiV4mrfVdRz8hzPiYOzH7yWjzoVEamxRuAqhxL2PLRwZYQ== immutable@~3.7.6: version "3.7.6" resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" integrity sha1-E7TTyxK++hVIKib+Gy665kAHHks= -import-fresh@^3.1.0: +import-fresh@^3.1.0, import-fresh@^3.2.1: version "3.3.0" resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== @@ -6231,6 +6629,11 @@ imurmurhash@^0.1.4: resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + inflight@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" @@ -6254,6 +6657,20 @@ ini@~1.3.0: resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== +interface-datastore@^6.0.2: + version "6.1.1" + resolved "https://registry.yarnpkg.com/interface-datastore/-/interface-datastore-6.1.1.tgz#5150a00de2e7513eaadba58bcafd059cb50004c1" + integrity sha512-AmCS+9CT34pp2u0QQVXjKztkuq3y5T+BIciuiHDDtDZucZD8VudosnSdUyXJV6IsRkN5jc4RFDhCk1O6Q3Gxjg== + dependencies: + interface-store "^2.0.2" + nanoid "^3.0.2" + uint8arrays "^3.0.0" + +interface-store@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/interface-store/-/interface-store-2.0.2.tgz#83175fd2b0c501585ed96db54bb8ba9d55fce34c" + integrity sha512-rScRlhDcz6k199EkHqT8NpM87ebN89ICOzILoBHgaG36/WX50N32BnU/kpZgCGPLhARRAWUUX5/cyaIjt7Kipg== + internal-slot@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.3.tgz#7347e307deeea2faac2ac6205d4bc7d34967f59c" @@ -6275,136 +6692,105 @@ invert-kv@^1.0.0: resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" integrity sha1-EEqOSqym09jNFXqO+L+rLXo//bY= -ip-regex@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" - integrity sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw== - ip-regex@^4.0.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== -ip@^1.1.5: - version "1.1.8" - resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.8.tgz#ae05948f6b075435ed3307acce04629da8cdbf48" - integrity sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg== - ipaddr.js@1.9.1: version "1.9.1" resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== -ipfs-block@~0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/ipfs-block/-/ipfs-block-0.8.1.tgz#05e1068832775e8f1c2da5b64106cc837fd2acb9" - integrity sha512-0FaCpmij+jZBoUYhjoB5ptjdl9QzvrdRIoBmUU5JiBnK2GA+4YM/ifklaB8ePRhA/rRzhd+KYBjvMFMAL4NrVQ== +ipfs-core-types@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/ipfs-core-types/-/ipfs-core-types-0.9.0.tgz#cb201ff7a9470651ba14c4e7fae56661a55bf37e" + integrity sha512-VJ8vJSHvI1Zm7/SxsZo03T+zzpsg8pkgiIi5hfwSJlsrJ1E2v68QPlnLshGHUSYw89Oxq0IbETYl2pGTFHTWfg== dependencies: - cids "~0.7.0" - class-is "^1.1.0" + interface-datastore "^6.0.2" + multiaddr "^10.0.0" + multiformats "^9.4.13" -ipfs-http-client@34.0.0: - version "34.0.0" - resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-34.0.0.tgz#8804d06a11c22306332a8ffa0949b6f672a0c9c8" - integrity sha512-4RCkk8ix4Dqn6sxqFVwuXWCZ1eLFPsVaj6Ijvu1fs9VYgxgVudsW9PWwarlr4mw1xUCmPWYyXnEbGgzBrfMy0Q== +ipfs-core-utils@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/ipfs-core-utils/-/ipfs-core-utils-0.13.0.tgz#8f0ec9aaa7c24f6f307e6e76e7bdc1cefd829894" + integrity sha512-HP5EafxU4/dLW3U13CFsgqVO5Ika8N4sRSIb/dTg16NjLOozMH31TXV0Grtu2ZWo1T10ahTzMvrfT5f4mhioXw== dependencies: + any-signal "^2.1.2" + blob-to-it "^1.0.1" + browser-readablestream-to-it "^1.0.1" + debug "^4.1.1" + err-code "^3.0.1" + ipfs-core-types "^0.9.0" + ipfs-unixfs "^6.0.3" + ipfs-utils "^9.0.2" + it-all "^1.0.4" + it-map "^1.0.4" + it-peekable "^1.0.2" + it-to-stream "^1.0.0" + merge-options "^3.0.4" + multiaddr "^10.0.0" + multiaddr-to-uri "^8.0.0" + multiformats "^9.4.13" + nanoid "^3.1.23" + parse-duration "^1.0.0" + timeout-abort-controller "^2.0.0" + uint8arrays "^3.0.0" + +ipfs-http-client@55.0.0: + version "55.0.0" + resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-55.0.0.tgz#8b713c5fa318e873b7d7ad099a4eb14320a5b0ce" + integrity sha512-GpvEs7C7WL9M6fN/kZbjeh4Y8YN7rY8b18tVWZnKxRsVwM25cIFrRI8CwNt3Ugin9yShieI3i9sPyzYGMrLNnQ== + dependencies: + "@ipld/dag-cbor" "^7.0.0" + "@ipld/dag-json" "^8.0.1" + "@ipld/dag-pb" "^2.1.3" abort-controller "^3.0.0" - async "^2.6.1" - bignumber.js "^9.0.0" - bl "^3.0.0" - bs58 "^4.0.1" - buffer "^5.4.2" - cids "~0.7.1" - concat-stream "github:hugomrdias/concat-stream#feat/smaller" - debug "^4.1.0" - detect-node "^2.0.4" - end-of-stream "^1.4.1" - err-code "^2.0.0" - explain-error "^1.0.4" - flatmap "0.0.3" - glob "^7.1.3" - ipfs-block "~0.8.1" - ipfs-utils "~0.0.3" - ipld-dag-cbor "~0.15.0" - ipld-dag-pb "~0.17.3" - ipld-raw "^4.0.0" - is-ipfs "~0.6.1" - is-pull-stream "0.0.0" - is-stream "^2.0.0" - iso-stream-http "~0.1.2" - iso-url "~0.4.6" - iterable-ndjson "^1.1.0" - just-kebab-case "^1.1.0" - just-map-keys "^1.1.0" - kind-of "^6.0.2" - ky "^0.11.2" - ky-universal "^0.2.2" - lru-cache "^5.1.1" - multiaddr "^6.0.6" - multibase "~0.6.0" - multicodec "~0.5.1" - multihashes "~0.4.14" - ndjson "github:hugomrdias/ndjson#feat/readable-stream3" - once "^1.4.0" - peer-id "~0.12.3" - peer-info "~0.15.1" - promise-nodeify "^3.0.1" - promisify-es6 "^1.0.3" - pull-defer "~0.2.3" - pull-stream "^3.6.9" - pull-to-stream "~0.1.1" - pump "^3.0.0" - qs "^6.5.2" - readable-stream "^3.1.1" - stream-to-pull-stream "^1.7.2" - tar-stream "^2.0.1" - through2 "^3.0.1" - -ipfs-utils@~0.0.3: - version "0.0.4" - resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-0.0.4.tgz#946114cfeb6afb4454b4ccb10d2327cd323b0cce" - integrity sha512-7cZf6aGj2FG3XJWhCNwn4mS93Q0GEWjtBZvEHqzgI43U2qzNDCyzfS1pei1Y5F+tw/zDJ5U4XG0G9reJxR53Ig== - dependencies: - buffer "^5.2.1" - is-buffer "^2.0.3" + any-signal "^2.1.2" + debug "^4.1.1" + err-code "^3.0.1" + ipfs-core-types "^0.9.0" + ipfs-core-utils "^0.13.0" + ipfs-utils "^9.0.2" + it-first "^1.0.6" + it-last "^1.0.4" + merge-options "^3.0.4" + multiaddr "^10.0.0" + multiformats "^9.4.13" + native-abort-controller "^1.0.3" + parse-duration "^1.0.0" + stream-to-it "^0.2.2" + uint8arrays "^3.0.0" + +ipfs-unixfs@^6.0.3: + version "6.0.9" + resolved "https://registry.yarnpkg.com/ipfs-unixfs/-/ipfs-unixfs-6.0.9.tgz#f6613b8e081d83faa43ed96e016a694c615a9374" + integrity sha512-0DQ7p0/9dRB6XCb0mVCTli33GzIzSVx5udpJuVM47tGcD+W+Bl4LsnoLswd3ggNnNEakMv1FdoFITiEnchXDqQ== + dependencies: + err-code "^3.0.1" + protobufjs "^6.10.2" + +ipfs-utils@^9.0.2: + version "9.0.14" + resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-9.0.14.tgz#24f5fda1f4567685eb32bca2543d518f95fd8704" + integrity sha512-zIaiEGX18QATxgaS0/EOQNoo33W0islREABAcxXE8n7y2MGAlB+hdsxXn4J0hGZge8IqVQhW8sWIb+oJz2yEvg== + dependencies: + any-signal "^3.0.0" + browser-readablestream-to-it "^1.0.0" + buffer "^6.0.1" + electron-fetch "^1.7.2" + err-code "^3.0.1" is-electron "^2.2.0" - is-pull-stream "0.0.0" - is-stream "^2.0.0" - kind-of "^6.0.2" - readable-stream "^3.4.0" - -ipld-dag-cbor@~0.15.0: - version "0.15.3" - resolved "https://registry.yarnpkg.com/ipld-dag-cbor/-/ipld-dag-cbor-0.15.3.tgz#283afdb81d5b07db8e4fff7a10ef5e517e87f299" - integrity sha512-m23nG7ZyoVFnkK55/bLAErc7EfiMgaEQlqHWDTGzPI+O5r6bPfp+qbL5zTVSIT8tpbHmu174dwerVtLoVgeVyA== - dependencies: - borc "^2.1.2" - buffer "^5.5.0" - cids "~0.8.0" - is-circular "^1.0.2" - multicodec "^1.0.0" - multihashing-async "~0.8.0" - -ipld-dag-pb@~0.17.3: - version "0.17.4" - resolved "https://registry.yarnpkg.com/ipld-dag-pb/-/ipld-dag-pb-0.17.4.tgz#080841cfdd014d996f8da7f3a522ec8b1f6b6494" - integrity sha512-YwCxETEMuXVspOKOhjIOHJvKvB/OZfCDkpSFiYBQN2/JQjM9y/RFCYzIQGm0wg7dCFLrhvfjAZLTSaKs65jzWA== - dependencies: - cids "~0.7.0" - class-is "^1.1.0" - multicodec "~0.5.1" - multihashing-async "~0.7.0" - protons "^1.0.1" - stable "~0.1.8" - -ipld-raw@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/ipld-raw/-/ipld-raw-4.0.1.tgz#49a6f58cdfece5a4d581925b19ee19255be2a29d" - integrity sha512-WjIdtZ06jJEar8zh+BHB84tE6ZdbS/XNa7+XCArOYfmeJ/c01T9VQpeMwdJQYn5c3s5UvvCu7y4VIi3vk2g1bA== - dependencies: - cids "~0.7.0" - multicodec "^1.0.0" - multihashing-async "~0.8.0" + iso-url "^1.1.5" + it-all "^1.0.4" + it-glob "^1.0.1" + it-to-stream "^1.0.0" + merge-options "^3.0.4" + nanoid "^3.1.20" + native-fetch "^3.0.0" + node-fetch "^2.6.8" + react-native-fetch-api "^3.0.0" + stream-to-it "^0.2.2" is-arguments@^1.0.4, is-arguments@^1.1.0: version "1.1.0" @@ -6445,7 +6831,7 @@ is-buffer@^1.1.5: resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== -is-buffer@^2.0.3, is-buffer@~2.0.3: +is-buffer@~2.0.3: version "2.0.5" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== @@ -6460,11 +6846,6 @@ is-callable@^1.2.3: resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.3.tgz#8b1e0500b73a1d76c70487636f368e519de8db8e" integrity sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ== -is-circular@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-circular/-/is-circular-1.0.2.tgz#2e0ab4e9835f4c6b0ea2b9855a84acd501b8366c" - integrity sha512-YttjnrswnUYRVJvxCvu8z+PGMUSzC2JttP0OEXezlAEdp3EXzhf7IZ3j0gRAybJBQupedIZFhY61Tga6E0qASA== - is-core-module@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.2.0.tgz#97037ef3d52224d85163f5597b2b63d9afed981a" @@ -6479,6 +6860,11 @@ is-date-object@^1.0.1: dependencies: has-tostringtag "^1.0.0" +is-docker@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== + is-dotfile@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" @@ -6586,13 +6972,6 @@ is-interactive@^1.0.0: resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== -is-ip@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-2.0.0.tgz#68eea07e8a0a0a94c2d080dd674c731ab2a461ab" - integrity sha512-9MTn0dteHETtyUx8pxqMwg5hMBi3pvlyglJ+b79KOCca0po23337LbVV2Hl4xmMvfw++ljnO0/+5G6G+0Szh6g== - dependencies: - ip-regex "^2.0.0" - is-ip@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-3.1.0.tgz#2ae5ddfafaf05cb8008a62093cf29734f657c5d8" @@ -6600,18 +6979,6 @@ is-ip@^3.1.0: dependencies: ip-regex "^4.0.0" -is-ipfs@~0.6.1: - version "0.6.3" - resolved "https://registry.yarnpkg.com/is-ipfs/-/is-ipfs-0.6.3.tgz#82a5350e0a42d01441c40b369f8791e91404c497" - integrity sha512-HyRot1dvLcxImtDqPxAaY1miO6WsiP/z7Yxpg2qpaLWv5UdhAPtLvHJ4kMLM0w8GSl8AFsVF23PHe1LzuWrUlQ== - dependencies: - bs58 "^4.0.1" - cids "~0.7.0" - mafmt "^7.0.0" - multiaddr "^7.2.1" - multibase "~0.6.0" - multihashes "~0.4.13" - is-lower-case@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/is-lower-case/-/is-lower-case-1.1.3.tgz#7e147be4768dc466db3bfb21cc60b31e6ad69393" @@ -6668,6 +7035,11 @@ is-plain-obj@^1.1.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= +is-plain-obj@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + is-posix-bracket@^0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" @@ -6683,16 +7055,6 @@ is-promise@4.0.0: resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-4.0.0.tgz#42ff9f84206c1991d26debf520dd5c01042dd2f3" integrity sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ== -is-promise@~1, is-promise@~1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-1.0.1.tgz#31573761c057e33c2e91aab9e96da08cefbe76e5" - integrity sha512-mjWH5XxnhMA8cFnDchr6qRP9S/kLntKuEfIYku+PaN1CnS8v+OG9O/BKpRCVRJvpIkgAZm0Pf5Is3iSSOILlcg== - -is-pull-stream@0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/is-pull-stream/-/is-pull-stream-0.0.0.tgz#a3bc3d1c6d3055151c46bde6f399efed21440ca9" - integrity sha512-NWLwqCc95I6m8FZDYLAmVJc9Xgk8O+8pPOoDKFTC293FH4S7FBcbLCw3WWPCdiT8uUSdzPy47VM08WPDMJJrag== - is-regex@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.2.tgz#81c8ebde4db142f2cf1c53fc86d6a45788266251" @@ -6788,6 +7150,13 @@ is-weakref@^1.0.1: dependencies: call-bind "^1.0.0" +is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + isarray@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" @@ -6808,27 +7177,10 @@ isexe@^2.0.0: resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== -iso-random-stream@^1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/iso-random-stream/-/iso-random-stream-1.1.2.tgz#c703da2c518db573277c5678cc43c5298283d64c" - integrity sha512-7y0tsBBgQs544iTYjyrMp5xvgrbYR8b+plQq1Bryp+03p0LssrxC9C1M0oHv4QESDt7d95c74XvMk/yawKqX+A== - dependencies: - buffer "^6.0.3" - readable-stream "^3.4.0" - -iso-stream-http@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/iso-stream-http/-/iso-stream-http-0.1.2.tgz#b3dfea4c9f23ff26d078d40c539cfc0dfebacd37" - integrity sha512-oHEDNOysIMTNypbg2f1SlydqRBvjl4ZbSE9+0awVxnkx3K2stGTFwB/kpVqnB6UEfF8QD36kAjDwZvqyXBLMnQ== - dependencies: - builtin-status-codes "^3.0.0" - inherits "^2.0.1" - readable-stream "^3.1.1" - -iso-url@~0.4.6, iso-url@~0.4.7: - version "0.4.7" - resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-0.4.7.tgz#de7e48120dae46921079fe78f325ac9e9217a385" - integrity sha512-27fFRDnPAMnHGLq36bWTpKET+eiXct3ENlCcdcMdk+mjXrb2kw3mhBUg1B7ewAC0kVzlOPhADzQgz1SE6Tglog== +iso-url@^1.1.5: + version "1.2.1" + resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-1.2.1.tgz#db96a49d8d9a64a1c889fc07cc525d093afb1811" + integrity sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng== isobject@^2.0.0: version "2.1.0" @@ -6860,12 +7212,50 @@ isurl@^1.0.0-alpha5: has-to-string-tag-x "^1.2.0" is-object "^1.0.1" -iterable-ndjson@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/iterable-ndjson/-/iterable-ndjson-1.1.0.tgz#36f7e8a5bb04fd087d384f29e44fc4280fc014fc" - integrity sha512-OOp1Lb0o3k5MkXHx1YaIY5Z0ELosZfTnBaas9f8opJVcZGBIONA2zY/6CYE+LKkqrSDooIneZbrBGgOZnHPkrg== +it-all@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-all/-/it-all-1.0.6.tgz#852557355367606295c4c3b7eff0136f07749335" + integrity sha512-3cmCc6Heqe3uWi3CVM/k51fa/XbMFpQVzFoDsV0IZNHSQDyAXl3c4MjHkFX5kF3922OGj7Myv1nSEUgRtcuM1A== + +it-first@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/it-first/-/it-first-1.0.7.tgz#a4bef40da8be21667f7d23e44dae652f5ccd7ab1" + integrity sha512-nvJKZoBpZD/6Rtde6FXqwDqDZGF1sCADmr2Zoc0hZsIvnE449gRFnGctxDf09Bzc/FWnHXAdaHVIetY6lrE0/g== + +it-glob@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/it-glob/-/it-glob-1.0.2.tgz#bab9b04d6aaac42884502f3a0bfee84c7a29e15e" + integrity sha512-Ch2Dzhw4URfB9L/0ZHyY+uqOnKvBNeS/SMcRiPmJfpHiM0TsUZn+GkpcZxAoF3dJVdPm/PuIk3A4wlV7SUo23Q== dependencies: - string_decoder "^1.2.0" + "@types/minimatch" "^3.0.4" + minimatch "^3.0.4" + +it-last@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-last/-/it-last-1.0.6.tgz#4106232e5905ec11e16de15a0e9f7037eaecfc45" + integrity sha512-aFGeibeiX/lM4bX3JY0OkVCFkAw8+n9lkukkLNivbJRvNz8lI3YXv5xcqhFUV2lDJiraEK3OXRDbGuevnnR67Q== + +it-map@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/it-map/-/it-map-1.0.6.tgz#6aa547e363eedcf8d4f69d8484b450bc13c9882c" + integrity sha512-XT4/RM6UHIFG9IobGlQPFQUrlEKkU4eBUFG3qhWhfAdh1JfF2x11ShCrKCdmZ0OiZppPfoLuzcfA4cey6q3UAQ== + +it-peekable@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/it-peekable/-/it-peekable-1.0.3.tgz#8ebe933767d9c5aa0ae4ef8e9cb3a47389bced8c" + integrity sha512-5+8zemFS+wSfIkSZyf0Zh5kNN+iGyccN02914BY4w/Dj+uoFEoPSvj5vaWn8pNZJNSxzjW0zHRxC3LUb2KWJTQ== + +it-to-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/it-to-stream/-/it-to-stream-1.0.0.tgz#6c47f91d5b5df28bda9334c52782ef8e97fe3a4a" + integrity sha512-pLULMZMAB/+vbdvbZtebC0nWBTbG581lk6w8P7DfIIIKUfa8FbY7Oi0FxZcFPbxvISs7A9E+cMpLDBc1XhpAOA== + dependencies: + buffer "^6.0.3" + fast-fifo "^1.0.0" + get-iterator "^1.0.2" + p-defer "^3.0.0" + p-fifo "^1.0.0" + readable-stream "^3.6.0" iterall@^1.1.3, iterall@^1.2.1, iterall@^1.3.0: version "1.3.0" @@ -6885,14 +7275,32 @@ iterate-value@^1.0.0: es-get-iterator "^1.0.2" iterate-iterator "^1.0.1" -jayson@3.6.6: - version "3.6.6" - resolved "https://registry.yarnpkg.com/jayson/-/jayson-3.6.6.tgz#189984f624e398f831bd2be8e8c80eb3abf764a1" - integrity sha512-f71uvrAWTtrwoww6MKcl9phQTC+56AopLyEenWvKVAIMz+q0oVGj6tenLZ7Z6UiPBkJtKLj4kt0tACllFQruGQ== +jake@^10.6.1: + version "10.8.6" + resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.6.tgz#227a96786a1e035214e0ba84b482d6223d41ef04" + integrity sha512-G43Ub9IYEFfu72sua6rzooi8V8Gz2lkfk48rW20vEWCGizeaEPlKB1Kh8JIA84yQbiAEfqlPmSpGgCKKxH3rDA== + dependencies: + async "^3.2.3" + chalk "^4.0.2" + filelist "^1.0.4" + minimatch "^3.1.2" + +jake@^10.8.5: + version "10.8.5" + resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.5.tgz#f2183d2c59382cb274226034543b9c03b8164c46" + integrity sha512-sVpxYeuAhWt0OTWITwT98oyV0GsXyMlXCF+3L1SuafBVUIr/uILGRB+NqwkzhgXKvoJpDIpQvqkUALgdmQsQxw== + dependencies: + async "^3.2.3" + chalk "^4.0.2" + filelist "^1.0.1" + minimatch "^3.0.4" + +jayson@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jayson/-/jayson-4.0.0.tgz#145a0ced46f900934c9b307e1332bcb0c7dbdb17" + integrity sha512-v2RNpDCMu45fnLzSk47vx7I+QUaOsox6f5X0CUlabAFwxoP+8MfAY0NQRFwOEYXIxm8Ih5y6OaEa5KYiQMkyAA== dependencies: "@types/connect" "^3.4.33" - "@types/express-serve-static-core" "^4.17.9" - "@types/lodash" "^4.14.159" "@types/node" "^12.12.54" "@types/ws" "^7.4.4" JSONStream "^1.3.5" @@ -6902,7 +7310,6 @@ jayson@3.6.6: eyes "^0.1.8" isomorphic-ws "^4.0.1" json-stringify-safe "^5.0.1" - lodash "^4.17.20" uuid "^8.3.2" ws "^7.4.5" @@ -6911,7 +7318,7 @@ js-sha3@0.5.7, js-sha3@^0.5.7: resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" integrity sha1-DU/9gALVMzqrr0oj7tL2N0yfKOc= -js-sha3@0.8.0, js-sha3@^0.8.0, js-sha3@~0.8.0: +js-sha3@0.8.0, js-sha3@^0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== @@ -6926,14 +7333,6 @@ js-tokens@^3.0.2: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= -js-yaml@3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" - integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - js-yaml@3.14.0: version "3.14.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" @@ -6942,6 +7341,21 @@ js-yaml@3.14.0: argparse "^1.0.7" esprima "^4.0.0" +js-yaml@3.14.1, js-yaml@^3.14.1: + version "3.14.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + jsan@^3.1.13: version "3.1.13" resolved "https://registry.yarnpkg.com/jsan/-/jsan-3.1.13.tgz#4de8c7bf8d1cfcd020c313d438f930cec4b91d86" @@ -7040,13 +7454,6 @@ json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1: resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== -json-text-sequence@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/json-text-sequence/-/json-text-sequence-0.1.1.tgz#a72f217dc4afc4629fff5feb304dc1bd51a2f3d2" - integrity sha512-L3mEegEWHRekSHjc7+sc8eJhba9Clq1PZ8kMkzf8OxElhXc8O4TS5MwcVlj9aEbm5dr81N90WHC5nAz3UO971w== - dependencies: - delimit-stream "0.1.0" - json5@^0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" @@ -7110,16 +7517,6 @@ jsprim@^1.2.2: json-schema "0.4.0" verror "1.10.0" -just-kebab-case@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/just-kebab-case/-/just-kebab-case-1.1.0.tgz#ebe854fde84b0afa4e597fcd870b12eb3c026755" - integrity sha512-QkuwuBMQ9BQHMUEkAtIA4INLrkmnnveqlFB1oFi09gbU0wBdZo6tTnyxNWMR84zHxBuwK7GLAwqN8nrvVxOLTA== - -just-map-keys@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/just-map-keys/-/just-map-keys-1.2.1.tgz#ef6e16133b7d34329962dfae9101d581abb1b143" - integrity sha512-Dmyz1Cy2SWM+PpqDPB1kdDglyexdzMthnAsvOIE9w4OPj8NDRuY1mh20x/JfG5w6fCGw9F0WmcofJhYZ4MiuyA== - keccak@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.2.tgz#4c2c6e8c54e04f2670ee49fa734eb9da152206e0" @@ -7129,11 +7526,6 @@ keccak@^3.0.0: node-gyp-build "^4.2.0" readable-stream "^3.6.0" -keypair@^1.0.1: - version "1.0.4" - resolved "https://registry.yarnpkg.com/keypair/-/keypair-1.0.4.tgz#a749a45f388593f3950f18b3757d32a93bd8ce83" - integrity sha512-zwhgOhhniaL7oxMgUMKKw5219PWWABMO+dgMnzJOQ2/5L3XJtTJGhW2PEXlxXj9zaccdReZJZ83+4NPhVfNVDg== - keyv@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" @@ -7148,7 +7540,7 @@ kind-of@^3.0.2: dependencies: is-buffer "^1.1.5" -kind-of@^6.0.0, kind-of@^6.0.2: +kind-of@^6.0.0: version "6.0.3" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== @@ -7160,19 +7552,6 @@ klaw@^1.0.0: optionalDependencies: graceful-fs "^4.1.9" -ky-universal@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/ky-universal/-/ky-universal-0.2.2.tgz#7a36e1a75641a98f878157463513965f799f5bfe" - integrity sha512-fb32o/fKy/ux2ALWa9HU2hvGtfOq7/vn2nH0FpVE+jwNzyTeORlAbj3Fiw+WLMbUlmVqZIWupnLZ2USHvqwZHw== - dependencies: - abort-controller "^3.0.0" - node-fetch "^2.3.0" - -ky@^0.11.2: - version "0.11.2" - resolved "https://registry.yarnpkg.com/ky/-/ky-0.11.2.tgz#4ffe6621d9d9ab61bf0f5500542e3a96d1ba0815" - integrity sha512-5Aou5BWue5/mkPqIRqzSWW+0Hkl403pr/2AIrCKYw7cVl/Xoe8Xe4KLBO0PRjbz7GnRe1/8wW1KhqQNFFE7/GQ== - lazy-debug-legacy@0.0.X: version "0.0.1" resolved "https://registry.yarnpkg.com/lazy-debug-legacy/-/lazy-debug-legacy-0.0.1.tgz#537716c0776e4cf79e3ed1b621f7658c2911b1b1" @@ -7366,40 +7745,6 @@ levn@~0.3.0: prelude-ls "~1.1.2" type-check "~0.3.2" -libp2p-crypto-secp256k1@~0.3.0: - version "0.3.1" - resolved "https://registry.yarnpkg.com/libp2p-crypto-secp256k1/-/libp2p-crypto-secp256k1-0.3.1.tgz#4cbeb857f5cfe5fefb1253e6b2994420c0ca166e" - integrity sha512-evrfK/CeUSd/lcELUdDruyPBvxDmLairth75S32OLl3H+++2m2fV24JEtxzdFS9JH3xEFw0h6JFO8DBa1bP9dA== - dependencies: - async "^2.6.2" - bs58 "^4.0.1" - multihashing-async "~0.6.0" - nodeify "^1.0.1" - safe-buffer "^5.1.2" - secp256k1 "^3.6.2" - -libp2p-crypto@~0.16.1: - version "0.16.4" - resolved "https://registry.yarnpkg.com/libp2p-crypto/-/libp2p-crypto-0.16.4.tgz#fb1a4ba39d56789303947784b5b0d6cefce12fdc" - integrity sha512-II8HxKc9jbmQp34pprlluNxsBCWJDjHRPYJzuRy7ragztNip9Zb7uJ4lCje6gGzz4DNAcHkAUn+GqCIK1592iA== - dependencies: - asmcrypto.js "^2.3.2" - asn1.js "^5.0.1" - async "^2.6.1" - bn.js "^4.11.8" - browserify-aes "^1.2.0" - bs58 "^4.0.1" - iso-random-stream "^1.1.0" - keypair "^1.0.1" - libp2p-crypto-secp256k1 "~0.3.0" - multihashing-async "~0.5.1" - node-forge "^0.10.0" - pem-jwk "^2.0.0" - protons "^1.0.1" - rsa-pem-to-jwk "^1.1.3" - tweetnacl "^1.0.0" - ursa-optional "~0.10.0" - lines-and-columns@^1.1.6: version "1.2.4" resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" @@ -7640,7 +7985,7 @@ lodash.zipwith@^4.2.0: resolved "https://registry.yarnpkg.com/lodash.zipwith/-/lodash.zipwith-4.2.0.tgz#afacf03fd2f384af29e263c3c6bda3b80e3f51fd" integrity sha1-r6zwP9LzhK8p4mPDxr2juA4/Uf0= -lodash@4.17.21, lodash@^4.1.0, lodash@^4.15.0, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.4, lodash@^4.2.1: +lodash@4.17.21, lodash@^4.1.0, lodash@^4.15.0, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.4, lodash@^4.2.1: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -7676,10 +8021,10 @@ long@^4.0.0: resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== -looper@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/looper/-/looper-3.0.0.tgz#2efa54c3b1cbaba9b94aee2e5914b0be57fbb749" - integrity sha512-LJ9wplN/uSn72oJRsXTx+snxPet5c8XiZmOKCm906NVYu+ag6SB6vUcnJcWxgnl2NfbIyeobAn7Bwv6xRj2XJg== +long@^5.2.0: + version "5.2.3" + resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" + integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" @@ -7717,13 +8062,6 @@ lowercase-keys@^2.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== -lru-cache@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" - integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== - dependencies: - yallist "^3.0.2" - lru-cache@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" @@ -7731,25 +8069,16 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" +lru-cache@^9.0.0: + version "9.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-9.1.1.tgz#c58a93de58630b688de39ad04ef02ef26f1902f1" + integrity sha512-65/Jky17UwSb0BuB9V+MyDpsOtXKmYwzhyl+cOa9XUiI4uV2Ouy/2voFP3+al0BjZbJgMBD8FojMpAf+Z+qn4A== + ltgt@2.2.1, ltgt@^2.1.2, ltgt@~2.2.0: version "2.2.1" resolved "https://registry.yarnpkg.com/ltgt/-/ltgt-2.2.1.tgz#f35ca91c493f7b73da0e07495304f17b31f87ee5" integrity sha1-81ypHEk/e3PaDgdJUwTxezH4fuU= -mafmt@^6.0.2: - version "6.0.10" - resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-6.0.10.tgz#3ad251c78f14f8164e66f70fd3265662da41113a" - integrity sha512-FjHDnew6dW9lUu3eYwP0FvvJl9uvNbqfoJM+c1WJcSyutNEIlyu6v3f/rlPnD1cnmue38IjuHlhBdIh3btAiyw== - dependencies: - multiaddr "^6.1.0" - -mafmt@^7.0.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-7.1.0.tgz#4126f6d0eded070ace7dbbb6fb04977412d380b5" - integrity sha512-vpeo9S+hepT3k2h5iFxzEHvvR0GPBx9uKaErmnRzYNcaKb03DgOArjEMlgG4a9LcuZZ89a3I8xbeto487n26eA== - dependencies: - multiaddr "^7.3.0" - make-dir@^1.0.0: version "1.3.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" @@ -7757,6 +8086,11 @@ make-dir@^1.0.0: dependencies: pify "^3.0.0" +make-error@^1.1.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== + map-stream@0.0.6: version "0.0.6" resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.0.6.tgz#d2ef4eb811a28644c7a8989985c69c2fdd496827" @@ -7808,6 +8142,13 @@ merge-descriptors@1.0.1: resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== +merge-options@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/merge-options/-/merge-options-3.0.4.tgz#84709c2aa2a4b24c1981f66c179fe5565cc6dbb7" + integrity sha512-2Sug1+knBjkaMsMgf1ctR1Ujx+Ayku4EdJN4Z+C2+JzoeF7A3OZ9KM2GY0CpQS51NR61LTurMJrRKPhSs3ZRTQ== + dependencies: + is-plain-obj "^2.1.0" + merge-stream@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-1.0.1.tgz#4041202d508a342ba00174008df0c251b8c135e1" @@ -7820,7 +8161,7 @@ merge-stream@^2.0.0: resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== -merge2@^1.3.0: +merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== @@ -7871,6 +8212,14 @@ micromatch@^4.0.2: braces "^3.0.1" picomatch "^2.0.5" +micromatch@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + dependencies: + braces "^3.0.2" + picomatch "^2.3.1" + miller-rabin@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" @@ -7952,13 +8301,27 @@ minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: dependencies: brace-expansion "^1.1.7" -minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1: +minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: brace-expansion "^1.1.7" +minimatch@^5.0.1: + version "5.1.6" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" + integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^8.0.2: + version "8.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-8.0.4.tgz#847c1b25c014d4e9a7f68aaf63dedd668a626229" + integrity sha512-W0Wvr9HyFXZRGIDgCicunpQ299OKXs9RgZfaukz4qAW/pJhcpUfupc9c+OObPOFueNy8VSrZgEmDtk6Kh4WzDA== + dependencies: + brace-expansion "^2.0.1" + minimist@0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" @@ -7989,6 +8352,16 @@ minipass@^3.0.0: dependencies: yallist "^4.0.0" +minipass@^4.2.4: + version "4.2.8" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" + integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== + +minipass@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d" + integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ== + minizlib@^1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d" @@ -8101,29 +8474,24 @@ ms@2.1.3, ms@^2.1.1: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -multiaddr@^6.0.3, multiaddr@^6.0.6, multiaddr@^6.1.0: - version "6.1.1" - resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-6.1.1.tgz#9aae57b3e399089b9896d9455afa8f6b117dff06" - integrity sha512-Q1Ika0F9MNhMtCs62Ue+GWIJtRFEhZ3Xz8wH7/MZDVZTWhil1/H2bEGN02kUees3hkI3q1oHSjmXYDM0gxaFjQ== +multiaddr-to-uri@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/multiaddr-to-uri/-/multiaddr-to-uri-8.0.0.tgz#65efe4b1f9de5f6b681aa42ff36a7c8db7625e58" + integrity sha512-dq4p/vsOOUdVEd1J1gl+R2GFrXJQH8yjLtz4hodqdVbieg39LvBOdMQRdQnfbg5LSM/q1BYNVf5CBbwZFFqBgA== dependencies: - bs58 "^4.0.1" - class-is "^1.1.0" - hi-base32 "~0.5.0" - ip "^1.1.5" - is-ip "^2.0.0" - varint "^5.0.0" + multiaddr "^10.0.0" -multiaddr@^7.2.1, multiaddr@^7.3.0: - version "7.5.0" - resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-7.5.0.tgz#976c88e256e512263445ab03b3b68c003d5f485e" - integrity sha512-GvhHsIGDULh06jyb6ev+VfREH9evJCFIRnh3jUt9iEZ6XDbyoisZRFEI9bMvK/AiR6y66y6P+eoBw9mBYMhMvw== +multiaddr@^10.0.0: + version "10.0.1" + resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-10.0.1.tgz#0d15848871370860a4d266bb44d93b3dac5d90ef" + integrity sha512-G5upNcGzEGuTHkzxezPrrD6CaIHR9uo+7MwqhNVcXTs33IInon4y7nMiGxl2CY5hG7chvYQUQhz5V52/Qe3cbg== dependencies: - buffer "^5.5.0" - cids "~0.8.0" - class-is "^1.1.0" + dns-over-http-resolver "^1.2.3" + err-code "^3.0.1" is-ip "^3.1.0" - multibase "^0.7.0" - varint "^5.0.0" + multiformats "^9.4.5" + uint8arrays "^3.0.0" + varint "^6.0.0" multibase@^0.7.0: version "0.7.0" @@ -8133,14 +8501,6 @@ multibase@^0.7.0: base-x "^3.0.8" buffer "^5.5.0" -multibase@^1.0.0, multibase@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/multibase/-/multibase-1.0.1.tgz#4adbe1de0be8a1ab0274328b653c3f1903476724" - integrity sha512-KcCxpBVY8fdVKu4dJMAahq4F/2Z/9xqEjIiR7PiMe7LRGeorFn2NLmicN6nLBCqQvft6MG2Lc9X5P0IdyvnxEw== - dependencies: - base-x "^3.0.8" - buffer "^5.5.0" - multibase@~0.6.0: version "0.6.1" resolved "https://registry.yarnpkg.com/multibase/-/multibase-0.6.1.tgz#b76df6298536cc17b9f6a6db53ec88f85f8cc12b" @@ -8149,14 +8509,14 @@ multibase@~0.6.0: base-x "^3.0.8" buffer "^5.5.0" -multicodec@^0.5.5, multicodec@~0.5.1: +multicodec@^0.5.5: version "0.5.7" resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-0.5.7.tgz#1fb3f9dd866a10a55d226e194abba2dcc1ee9ffd" integrity sha512-PscoRxm3f+88fAtELwUnZxGDkduE2HD9Q6GHUOywQLjOGT/HAdhjLDYNZ1e7VR0s0TP0EwZ16LNUTFpoBGivOA== dependencies: varint "^5.0.0" -multicodec@^1.0.0, multicodec@^1.0.1: +multicodec@^1.0.0: version "1.0.4" resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-1.0.4.tgz#46ac064657c40380c28367c90304d8ed175a714f" integrity sha512-NDd7FeS3QamVtbgfvu5h7fd1IlbaC4EQ0/pgU4zqE2vdHCmBGsUa0TiM8/TdSeG6BMPC92OOCf8F1ocE/Wkrrg== @@ -8164,7 +8524,12 @@ multicodec@^1.0.0, multicodec@^1.0.1: buffer "^5.6.0" varint "^5.0.0" -multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4.15: +multiformats@^9.4.13, multiformats@^9.4.2, multiformats@^9.4.5, multiformats@^9.5.4: + version "9.9.0" + resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-9.9.0.tgz#c68354e7d21037a8f1f8833c8ccd68618e8f1d37" + integrity sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg== + +multihashes@^0.4.15, multihashes@~0.4.15: version "0.4.21" resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-0.4.21.tgz#dc02d525579f334a7909ade8a122dabb58ccfcb5" integrity sha512-uVSvmeCWf36pU2nB4/1kzYZjsXD9vofZKpgudqkceYY5g2aZZXJ5r9lxuzoRLl1OAp28XljXsEJ/X/85ZsKmKw== @@ -8173,71 +8538,6 @@ multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4. multibase "^0.7.0" varint "^5.0.0" -multihashes@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-1.0.1.tgz#a89415d68283cf6287c6e219e304e75ce7fb73fe" - integrity sha512-S27Tepg4i8atNiFaU5ZOm3+gl3KQlUanLs/jWcBxQHFttgq+5x1OgbQmf2d8axJ/48zYGBd/wT9d723USMFduw== - dependencies: - buffer "^5.6.0" - multibase "^1.0.1" - varint "^5.0.0" - -multihashing-async@~0.5.1: - version "0.5.2" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.5.2.tgz#4af40e0dde2f1dbb12a7c6b265181437ac26b9de" - integrity sha512-mmyG6M/FKxrpBh9xQDUvuJ7BbqT93ZeEeH5X6LeMYKoYshYLr9BDdCsvDtZvn+Egf+/Xi+aOznrWL4vp3s+p0Q== - dependencies: - blakejs "^1.1.0" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js "^3.0.1" - nodeify "^1.0.1" - -multihashing-async@~0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.6.0.tgz#c1fc6696a624b9bf39b160b0c4c4e7ba3f394453" - integrity sha512-Qv8pgg99Lewc191A5nlXy0bSd2amfqlafNJZmarU6Sj7MZVjpR94SCxQjf4DwPtgWZkiLqsjUQBXA2RSq+hYyA== - dependencies: - blakejs "^1.1.0" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js "^3.0.1" - nodeify "^1.0.1" - -multihashing-async@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.7.0.tgz#3234fb98295be84386b85bfd20377d3e5be20d6b" - integrity sha512-SCbfl3f+DzJh+/5piukga9ofIOxwfT05t8R4jfzZIJ88YE9zU9+l3K2X+XB19MYyxqvyK9UJRNWbmQpZqQlbRA== - dependencies: - blakejs "^1.1.0" - buffer "^5.2.1" - err-code "^1.1.2" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js-revisited "^3.0.0" - -multihashing-async@~0.8.0: - version "0.8.2" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.8.2.tgz#3d5da05df27d83be923f6d04143a0954ff87f27f" - integrity sha512-2lKa1autuCy8x7KIEj9aVNbAb3aIMRFYIwN7mq/zD4pxgNIVgGlm+f6GKY4880EOF2Y3GktHYssRy7TAJQ2DyQ== - dependencies: - blakejs "^1.1.0" - buffer "^5.4.3" - err-code "^2.0.0" - js-sha3 "^0.8.0" - multihashes "^1.0.1" - murmurhash3js-revisited "^3.0.0" - -murmurhash3js-revisited@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/murmurhash3js-revisited/-/murmurhash3js-revisited-3.0.0.tgz#6bd36e25de8f73394222adc6e41fa3fac08a5869" - integrity sha512-/sF3ee6zvScXMb1XFJ8gDsSnY+X8PbOyjIuBhtgis10W2Jx4ZjIhikUCIF9c4gpJxVnQIsPAFrSwTCuAjicP6g== - -murmurhash3js@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/murmurhash3js/-/murmurhash3js-3.0.1.tgz#3e983e5b47c2a06f43a713174e7e435ca044b998" - integrity sha512-KL8QYUaxq7kUbcl0Yto51rMcYt7E/4N4BG3/c96Iqw1PQrTRspu8Cpx4TZ4Nunib1d4bEkIH3gjCYlP2RLBdow== - mute-stream@0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" @@ -8248,11 +8548,6 @@ nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== -nan@^2.14.0, nan@^2.14.2: - version "2.16.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.16.0.tgz#664f43e45460fb98faf00edca0bb0d7b8dce7916" - integrity sha512-UdAqHyFngu7TfQKsCBgAA6pWDkT8MAO7d0jyOecVhN5354xbLqdn8mV9Tat9gepAupm0bt2DbeaSC8vS52MuFA== - nano-json-stream-parser@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/nano-json-stream-parser/-/nano-json-stream-parser-0.1.2.tgz#0cc8f6d0e2b622b479c40d499c46d64b755c6f5f" @@ -8263,6 +8558,11 @@ nanoid@^2.0.0: resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-2.1.11.tgz#ec24b8a758d591561531b4176a01e3ab4f0f0280" integrity sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA== +nanoid@^3.0.2, nanoid@^3.1.20, nanoid@^3.1.23: + version "3.3.6" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c" + integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== + napi-macros@~1.8.1: version "1.8.2" resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda" @@ -8273,14 +8573,20 @@ napi-macros@~2.0.0: resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-2.0.0.tgz#2b6bae421e7b96eb687aa6c77a7858640670001b" integrity sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg== -"ndjson@github:hugomrdias/ndjson#feat/readable-stream3": - version "1.5.0" - resolved "https://codeload.github.com/hugomrdias/ndjson/tar.gz/4db16da6b42e5b39bf300c3a7cde62abb3fa3a11" - dependencies: - json-stringify-safe "^5.0.1" - minimist "^1.2.0" - split2 "^3.1.0" - through2 "^3.0.0" +native-abort-controller@^1.0.3, native-abort-controller@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/native-abort-controller/-/native-abort-controller-1.0.4.tgz#39920155cc0c18209ff93af5bc90be856143f251" + integrity sha512-zp8yev7nxczDJMoP6pDxyD20IU0T22eX8VwN2ztDccKvSZhRaV33yP1BGwKSZfXuqWUzsXopVFjBdau9OOAwMQ== + +native-fetch@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/native-fetch/-/native-fetch-3.0.0.tgz#06ccdd70e79e171c365c75117959cf4fe14a09bb" + integrity sha512-G3Z7vx0IFb/FQ4JxvtqGABsOTIqRWvgQz6e+erkB+JJD6LrszQtMozEHI4EkmgZQvnGHrpLVzUWk7t4sJCIkVw== + +natural-orderby@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/natural-orderby/-/natural-orderby-2.0.3.tgz#8623bc518ba162f8ff1cdb8941d74deb0fdcc016" + integrity sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q== needle@^2.2.1: version "2.6.0" @@ -8301,6 +8607,11 @@ next-tick@~1.0.0: resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" integrity sha1-yobR/ogoFpsBICCOPchCS524NCw= +nice-try@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" + integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== + no-case@^2.2.0, no-case@^2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" @@ -8349,13 +8660,6 @@ node-fetch@2.6.1: resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== -node-fetch@^2.3.0: - version "2.6.7" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" - integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== - dependencies: - whatwg-url "^5.0.0" - node-fetch@^2.6.1: version "2.6.6" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.6.tgz#1751a7c01834e8e1697758732e9efb6eeadfaf89" @@ -8363,10 +8667,12 @@ node-fetch@^2.6.1: dependencies: whatwg-url "^5.0.0" -node-forge@^0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" - integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA== +node-fetch@^2.6.8: + version "2.6.9" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.9.tgz#7c7f744b5cc6eb5fd404e0c7a9fec630a55657e6" + integrity sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg== + dependencies: + whatwg-url "^5.0.0" node-gyp-build@^4.2.0: version "4.5.0" @@ -8416,14 +8722,6 @@ node-releases@^1.1.70: resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== -nodeify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/nodeify/-/nodeify-1.0.1.tgz#64ab69a7bdbaf03ce107b4f0335c87c0b9e91b1d" - integrity sha512-n7C2NyEze8GCo/z73KdbjRsBiLbv6eBn1FxwYKQ23IqGo7pQY3mhQan61Sv7eEDJCiyUjTVrVkXTzJCo1dW7Aw== - dependencies: - is-promise "~1.0.0" - promise "~1.3.0" - nofilter@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/nofilter/-/nofilter-1.0.4.tgz#78d6f4b6a613e7ced8b015cec534625f7667006e" @@ -8490,7 +8788,7 @@ npm-packlist@^1.1.6: npm-bundled "^1.0.1" npm-normalize-package-bin "^1.0.1" -npm-run-path@^4.0.0: +npm-run-path@^4.0.0, npm-run-path@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== @@ -8554,11 +8852,6 @@ object-assign@4.1.0: resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0" integrity sha1-ejs9DpgGPUP0wD8uiubNUahog6A= -object-assign@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa" - integrity sha512-CdsOUYIh5wIiozhJ3rLQgmUTgcyzFwZZrqhkKhODMoGtPKM+wt0h0CNIoauJWMsS9822EdzPsF/6mb4nLvPN5g== - object-assign@^4, object-assign@^4.0.0, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" @@ -8589,6 +8882,11 @@ object-path@^0.11.4: resolved "https://registry.yarnpkg.com/object-path/-/object-path-0.11.8.tgz#ed002c02bbdd0070b78a27455e8ae01fc14d4742" integrity sha512-YJjNZrlXJFM42wTBn6zgOJVar9KFJvzx6sTWDte8sWZF//cnjl0BxHNpfZx+ZffXX63A9q0b1zsFiBX4g4X5KA== +object-treeify@^1.1.33: + version "1.1.33" + resolved "https://registry.yarnpkg.com/object-treeify/-/object-treeify-1.1.33.tgz#f06fece986830a3cba78ddd32d4c11d1f76cdf40" + integrity sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A== + object.assign@4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" @@ -8661,7 +8959,7 @@ onetime@^2.0.0: dependencies: mimic-fn "^1.0.0" -onetime@^5.1.0: +onetime@^5.1.0, onetime@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== @@ -8681,13 +8979,6 @@ optimism@^0.14.0: "@wry/context" "^0.5.2" "@wry/trie" "^0.2.1" -optimist@~0.3.5: - version "0.3.7" - resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9" - integrity sha512-TCx0dXQzVtSCg2OgY/bO9hjM9cV4XYx09TVK+s3+FhkjT6LovsLe+pPMzpWf+6yXK/hUizs2gUoTw3jHM0VaTQ== - dependencies: - wordwrap "~0.0.2" - optionator@^0.8.1: version "0.8.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" @@ -8700,6 +8991,19 @@ optionator@^0.8.1: type-check "~0.3.2" word-wrap "~1.2.3" +ora@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/ora/-/ora-4.0.2.tgz#0e1e68fd45b135d28648b27cf08081fa6e8a297d" + integrity sha512-YUOZbamht5mfLxPmk4M35CD/5DuOkAacxlEUbStVXpBAt4fyhBf+vZHI/HRkI++QUp3sNoeA2Gw4C+hi4eGSig== + dependencies: + chalk "^2.4.2" + cli-cursor "^3.1.0" + cli-spinners "^2.2.0" + is-interactive "^1.0.0" + log-symbols "^3.0.0" + strip-ansi "^5.2.0" + wcwidth "^1.0.1" + ora@^3.4.0: version "3.4.0" resolved "https://registry.yarnpkg.com/ora/-/ora-3.4.0.tgz#bf0752491059a3ef3ed4c85097531de9fdbcd318" @@ -8781,6 +9085,19 @@ p-cancelable@^1.0.0: resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== +p-defer@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83" + integrity sha512-ugZxsxmtTln604yeYd29EGrNhazN2lywetzpKhfmQjW/VJmhpDmWbiX+h0zL8V91R0UXkhb3KtPmyq9PZw3aYw== + +p-fifo@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-fifo/-/p-fifo-1.0.0.tgz#e29d5cf17c239ba87f51dde98c1d26a9cfe20a63" + integrity sha512-IjoCxXW48tqdtDFz6fqo5q1UfFVjjVZe8TC1QRflvNUJtNfCUhxOUw6MOVZhDPjqhSzc26xKdugsO17gmzd5+A== + dependencies: + fast-fifo "^1.0.0" + p-defer "^3.0.0" + p-finally@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" @@ -8892,6 +9209,11 @@ parse-cache-control@^1.0.1: resolved "https://registry.yarnpkg.com/parse-cache-control/-/parse-cache-control-1.0.1.tgz#8eeab3e54fa56920fe16ba38f77fa21aacc2d74e" integrity sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg== +parse-duration@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/parse-duration/-/parse-duration-1.0.3.tgz#b6681f5edcc2689643b34c09ea63f86f58a35814" + integrity sha512-o6NAh12na5VvR6nFejkU0gpQ8jmOY9Y9sTU2ke3L3G/d/3z8jqmbBbeyBGHU73P4JLXfc7tJARygIK3WGIkloA== + parse-glob@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" @@ -8969,6 +9291,14 @@ pascal-case@^3.1.1, pascal-case@^3.1.2: no-case "^3.0.4" tslib "^2.0.3" +password-prompt@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/password-prompt/-/password-prompt-1.1.2.tgz#85b2f93896c5bd9e9f2d6ff0627fa5af3dc00923" + integrity sha512-bpuBhROdrhuN3E7G/koAju0WjVw9/uQOG5Co5mokNj0MiOSBVZS1JTwM4zl55hu0WFmIEFvO9cU9sJQiBIYeIA== + dependencies: + ansi-escapes "^3.1.0" + cross-spawn "^6.0.5" + path-case@^2.1.0: version "2.1.1" resolved "https://registry.yarnpkg.com/path-case/-/path-case-2.1.1.tgz#94b8037c372d3fe2906e465bb45e25d226e8eea5" @@ -9003,6 +9333,11 @@ path-is-absolute@^1.0.0, path-is-absolute@^1.0.1: resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= +path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + integrity sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw== + path-key@^3.0.0, path-key@^3.1.0: version "3.1.1" resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" @@ -9013,6 +9348,14 @@ path-parse@^1.0.6: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== +path-scurry@^1.6.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.7.0.tgz#99c741a2cfbce782294a39994d63748b5a24f6db" + integrity sha512-UkZUeDjczjYRE495+9thsgcVgsaCPkaw80slmfVFgllxY+IO8ubTsOpFVjDPROBqJdHfVPUFRHPBV/WciOVfWg== + dependencies: + lru-cache "^9.0.0" + minipass "^5.0.0" + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" @@ -9054,39 +9397,12 @@ pbkdf2@^3.0.3: safe-buffer "^5.0.1" sha.js "^2.4.8" -peer-id@~0.12.2, peer-id@~0.12.3: - version "0.12.5" - resolved "https://registry.yarnpkg.com/peer-id/-/peer-id-0.12.5.tgz#b22a1edc5b4aaaa2bb830b265ba69429823e5179" - integrity sha512-3xVWrtIvNm9/OPzaQBgXDrfWNx63AftgFQkvqO6YSZy7sP3Fuadwwbn54F/VO9AnpyW/26i0WRQz9FScivXrmw== - dependencies: - async "^2.6.3" - class-is "^1.1.0" - libp2p-crypto "~0.16.1" - multihashes "~0.4.15" - -peer-info@~0.15.1: - version "0.15.1" - resolved "https://registry.yarnpkg.com/peer-info/-/peer-info-0.15.1.tgz#21254a7c516d0dd046b150120b9aaf1b9ad02146" - integrity sha512-Y91Q2tZRC0CpSTPd1UebhGqniOrOAk/aj60uYUcWJXCoLTAnGu+4LJGoiay8ayudS6ice7l3SKhgL/cS62QacA== - dependencies: - mafmt "^6.0.2" - multiaddr "^6.0.3" - peer-id "~0.12.2" - unique-by "^1.0.0" - -pem-jwk@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/pem-jwk/-/pem-jwk-2.0.0.tgz#1c5bb264612fc391340907f5c1de60c06d22f085" - integrity sha512-rFxu7rVoHgQ5H9YsP50dDWf0rHjreVA2z0yPiWr5WdH/UHb29hKtF7h6l8vNd1cbYR1t0QL+JKhW55a2ZV4KtA== - dependencies: - asn1.js "^5.0.1" - performance-now@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== -picomatch@^2.0.4, picomatch@^2.2.1: +picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== @@ -9128,11 +9444,6 @@ pkg-conf@^1.1.2: object-assign "^4.0.1" symbol "^0.2.1" -pkginfo@0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/pkginfo/-/pkginfo-0.4.1.tgz#b5418ef0439de5425fc4995042dced14fb2a84ff" - integrity sha1-tUGO8EOd5UJfxJlQQtztFPsqhP8= - pluralize@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-8.0.0.tgz#1a6fa16a38d12a1901e0320fa017051c539ce3b1" @@ -9460,11 +9771,6 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= -promise-nodeify@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/promise-nodeify/-/promise-nodeify-3.0.1.tgz#f0f5d9720ee9ec71dd2bfa92667be504c10229c2" - integrity sha512-ghsSuzZXJX8iO7WVec2z7GI+Xk/EyiD+JZK7AZKhUqYfpLa/Zs4ylUD+CwwnKlG6G3HnkUPMAi6PO7zeqGKssg== - promise-to-callback@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/promise-to-callback/-/promise-to-callback-1.0.0.tgz#5d2a749010bfb67d963598fcd3960746a68feef7" @@ -9498,18 +9804,6 @@ promise@^8.0.0: dependencies: asap "~2.0.6" -promise@~1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/promise/-/promise-1.3.0.tgz#e5cc9a4c8278e4664ffedc01c7da84842b040175" - integrity sha512-R9WrbTF3EPkVtWjp7B7umQGVndpsi+rsDAfrR4xAALQpFLa/+2OriecLhawxzvii2gd9+DZFwROWDuUUaqS5yA== - dependencies: - is-promise "~1" - -promisify-es6@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/promisify-es6/-/promisify-es6-1.0.3.tgz#b012668c4df3c965ce13daac2b3a4d1726a96346" - integrity sha512-N9iVG+CGJsI4b4ZGazjwLnxErD2d9Pe4DPvvXSxYA9tFNu8ymXME4Qs5HIQ0LMJpNM7zj+m0NlNnNeqFpKzqnA== - prop-types@^15.7.2: version "15.7.2" resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" @@ -9519,20 +9813,24 @@ prop-types@^15.7.2: object-assign "^4.1.1" react-is "^16.8.1" -protocol-buffers-schema@^3.3.1: - version "3.6.0" - resolved "https://registry.yarnpkg.com/protocol-buffers-schema/-/protocol-buffers-schema-3.6.0.tgz#77bc75a48b2ff142c1ad5b5b90c94cd0fa2efd03" - integrity sha512-TdDRD+/QNdrCGCE7v8340QyuXd4kIWIgapsE2+n/SaGiSSbomYl4TjHlvIoCWRpE7wFt02EpB35VVA2ImcBVqw== - -protons@^1.0.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/protons/-/protons-1.2.1.tgz#5f1e0db8b2139469cd1c3b4e332a4c2d95d0a218" - integrity sha512-2oqDyc/SN+tNcJf8XxrXhYL7sQn2/OMl8mSdD7NVGsWjMEmAbks4eDVnCyf0vAoRbBWyWTEXWk4D8XfuKVl3zg== +protobufjs@^6.10.2: + version "6.11.3" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.11.3.tgz#637a527205a35caa4f3e2a9a4a13ddffe0e7af74" + integrity sha512-xL96WDdCZYdU7Slin569tFX712BxsxslWwAfAhCYjQKGTq7dAU91Lomy6nLLhh/dyGhk/YH4TwTSRxTzhuHyZg== dependencies: - buffer "^5.5.0" - protocol-buffers-schema "^3.3.1" - signed-varint "^2.0.1" - varint "^5.0.0" + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/long" "^4.0.1" + "@types/node" ">=13.7.0" + long "^4.0.0" proxy-addr@~2.0.7: version "2.0.7" @@ -9569,23 +9867,6 @@ public-encrypt@^4.0.0: randombytes "^2.0.1" safe-buffer "^5.1.2" -pull-defer@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/pull-defer/-/pull-defer-0.2.3.tgz#4ee09c6d9e227bede9938db80391c3dac489d113" - integrity sha512-/An3KE7mVjZCqNhZsr22k1Tx8MACnUnHZZNPSJ0S62td8JtYr/AiRG42Vz7Syu31SoTLUzVIe61jtT/pNdjVYA== - -pull-stream@^3.2.3, pull-stream@^3.6.9: - version "3.6.14" - resolved "https://registry.yarnpkg.com/pull-stream/-/pull-stream-3.6.14.tgz#529dbd5b86131f4a5ed636fdf7f6af00781357ee" - integrity sha512-KIqdvpqHHaTUA2mCYcLG1ibEbu/LCKoJZsBWyv9lSYtPkJPBq8m3Hxa103xHi6D2thj5YXa0TqK3L3GUkwgnew== - -pull-to-stream@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/pull-to-stream/-/pull-to-stream-0.1.1.tgz#fa2058528528e3542b81d6f17cbc42288508ff37" - integrity sha512-thZkMv6F9PILt9zdvpI2gxs19mkDrlixYKX6cOBxAW16i1NZH+yLAmF4r8QfJ69zuQh27e01JZP9y27tsH021w== - dependencies: - readable-stream "^3.1.1" - pump@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.3.tgz#5dfe8311c33bbf6fc18261f9f34702c47c08a954" @@ -9607,6 +9888,11 @@ punycode@2.1.0: resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.0.tgz#5f863edc89b96db09074bad7947bf09056ca4e7d" integrity sha1-X4Y+3Im5bbCQdLrXlHvwkFbKTn0= +punycode@^1.3.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== + punycode@^2.1.0, punycode@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" @@ -9617,7 +9903,19 @@ pure-rand@^4.1.1: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-4.1.2.tgz#cbad2a3e3ea6df0a8d80d8ba204779b5679a5205" integrity sha512-uLzZpQWfroIqyFWmX/pl0OL2JHJdoU3dbh0dvZ25fChHFJJi56J5oQZhW6QgbT2Llwh1upki84LnTwlZvsungA== -qs@6.11.0, qs@^6.4.0, qs@^6.5.2: +pvtsutils@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/pvtsutils/-/pvtsutils-1.3.2.tgz#9f8570d132cdd3c27ab7d51a2799239bf8d8d5de" + integrity sha512-+Ipe2iNUyrZz+8K/2IOo+kKikdtfhRKzNpQbruF2URmqPtoqAs8g3xS7TJvFF2GcPXjh7DkqMnpVveRFq4PgEQ== + dependencies: + tslib "^2.4.0" + +pvutils@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/pvutils/-/pvutils-1.1.3.tgz#f35fc1d27e7cd3dfbd39c0826d173e806a03f5a3" + integrity sha512-pMpnA0qRdFp32b1sJl1wOJNxZLQ2cbQx+k6tjNtZ8CpvVhNqEPRgivZ2WOUev2YMajecdH7ctUPDvEe87nariQ== + +qs@6.11.0, qs@^6.4.0: version "6.11.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== @@ -9729,6 +10027,13 @@ react-is@^16.7.0, react-is@^16.8.1: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== +react-native-fetch-api@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/react-native-fetch-api/-/react-native-fetch-api-3.0.0.tgz#81e1bb6562c292521bc4eca52fe1097f4c1ebab5" + integrity sha512-g2rtqPjdroaboDKTsJCTlcmtw54E25OjyaunUP0anOZn4Fuo2IKs8BVfe02zVggA/UysbmfSnRJIqtNkAgggNA== + dependencies: + p-defer "^3.0.0" + read-pkg-up@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02" @@ -9776,7 +10081,7 @@ readable-stream@1.1.14, readable-stream@^1.0.33: isarray "0.0.1" string_decoder "~0.10.x" -"readable-stream@2 || 3", readable-stream@^3.0.0, readable-stream@^3.0.1, readable-stream@^3.0.2, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: +"readable-stream@2 || 3", readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== @@ -9832,13 +10137,27 @@ readdirp@~3.4.0: dependencies: picomatch "^2.2.1" -readdirp@~3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.5.0.tgz#9ba74c019b15d365278d2e91bb8c48d7b4d42c9e" - integrity sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ== +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== dependencies: picomatch "^2.2.1" +receptacle@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/receptacle/-/receptacle-1.3.2.tgz#a7994c7efafc7a01d0e2041839dab6c4951360d2" + integrity sha512-HrsFvqZZheusncQRiEE7GatOAETrARKV/lnfYicIm8lbvp/JQOdADOfhjBd2DajvoszEyxSM6RlAAIZgEoeu/A== + dependencies: + ms "^2.1.1" + +redeyed@~2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-2.1.1.tgz#8984b5815d99cb220469c99eeeffe38913e6cc0b" + integrity sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ== + dependencies: + esprima "~4.0.0" + redux-cli-logger@^2.0.1: version "2.1.0" resolved "https://registry.yarnpkg.com/redux-cli-logger/-/redux-cli-logger-2.1.0.tgz#7e546502a4b08c7fac4fe2faee2326a6326cb4a1" @@ -10101,6 +10420,11 @@ restore-cursor@^3.1.0: onetime "^5.1.0" signal-exit "^3.0.2" +retimer@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/retimer/-/retimer-3.0.0.tgz#98b751b1feaf1af13eb0228f8ea68b8f9da530df" + integrity sha512-WKE0j11Pa0ZJI5YIk0nflGI7SQsfl2ljihVy7ogh7DeQSeYAUi0ubZ/yEueGtDfUPk6GH5LRw1hBdLq4IwUBWA== + retry@0.13.1: version "0.13.1" resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658" @@ -10152,21 +10476,6 @@ rn-host-detect@^1.1.5: resolved "https://registry.yarnpkg.com/rn-host-detect/-/rn-host-detect-1.2.0.tgz#8b0396fc05631ec60c1cb8789e5070cdb04d0da0" integrity sha512-btNg5kzHcjZZ7t7mvvV/4wNJ9e3MPgrWivkRgWURzXL0JJ0pwWlU4zrbmdlz3HHzHOxhBhHB4D+/dbMFfu4/4A== -rsa-pem-to-jwk@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/rsa-pem-to-jwk/-/rsa-pem-to-jwk-1.1.3.tgz#245e76bdb7e7234cfee7ca032d31b54c38fab98e" - integrity sha512-ZlVavEvTnD8Rzh/pdB8NH4VF5GNEtF6biGQcTtC4GKFMsbZR08oHtOYefbhCN+JnJIuMItiCDCMycdcMrw6blA== - dependencies: - object-assign "^2.0.0" - rsa-unpack "0.0.6" - -rsa-unpack@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/rsa-unpack/-/rsa-unpack-0.0.6.tgz#f50ebd56a628378e631f297161026ce9ab4eddba" - integrity sha512-HRrl8GHjjPziPFRDJPq/v5OxZ3IPdksV5h3cime/oHgcgM1k1toO5OdtzClgBqRf5dF6IgptOB0g/zFb0w5zQw== - dependencies: - optimist "~0.3.5" - run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" @@ -10233,20 +10542,6 @@ scrypt-js@^3.0.0, scrypt-js@^3.0.1: resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== -secp256k1@^3.6.2: - version "3.8.0" - resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-3.8.0.tgz#28f59f4b01dbee9575f56a47034b7d2e3b3b352d" - integrity sha512-k5ke5avRZbtl9Tqx/SA7CbY3NF6Ro+Sj9cZxezFzuBlLDmyqPiL8hJJ+EmzD8Ig4LUDByHJ3/iPOVoRixs/hmw== - dependencies: - bindings "^1.5.0" - bip66 "^1.1.5" - bn.js "^4.11.8" - create-hash "^1.2.0" - drbg.js "^1.0.1" - elliptic "^6.5.2" - nan "^2.14.0" - safe-buffer "^5.1.2" - secp256k1@^4.0.1: version "4.0.3" resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-4.0.3.tgz#c4559ecd1b8d3c1827ed2d1b94190d69ce267303" @@ -10278,6 +10573,13 @@ semver@7.3.5: dependencies: lru-cache "^6.0.0" +semver@7.4.0: + version "7.4.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.4.0.tgz#8481c92feffc531ab1e012a8ffc15bdd3a0f4318" + integrity sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw== + dependencies: + lru-cache "^6.0.0" + semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" @@ -10297,6 +10599,13 @@ semver@^7.3.4: dependencies: lru-cache "^6.0.0" +semver@^7.3.7: + version "7.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.0.tgz#ed8c5dc8efb6c629c88b23d41dc9bf40c1d96cd0" + integrity sha512-+XC0AD/R7Q2mPSRuy2Id0+CGTZ98+8f+KvwirxOKIEyid+XSx6HbC63p+O4IndTHuX5Z+JxQ0TghCkO5Cg/2HA== + dependencies: + lru-cache "^6.0.0" + semver@~5.4.1: version "5.4.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" @@ -10395,6 +10704,13 @@ shallowequal@^1.0.2: resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg== + dependencies: + shebang-regex "^1.0.0" + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -10402,6 +10718,11 @@ shebang-command@^2.0.0: dependencies: shebang-regex "^3.0.0" +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ== + shebang-regex@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" @@ -10421,18 +10742,11 @@ signal-exit@^3.0.0: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== -signal-exit@^3.0.2: +signal-exit@^3.0.2, signal-exit@^3.0.3: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== -signed-varint@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/signed-varint/-/signed-varint-2.0.1.tgz#50a9989da7c98c2c61dad119bc97470ef8528129" - integrity sha512-abgDPg1106vuZZOvw7cFwdCABddfJRz5akcCcchzTbhyhYnsG31y4AlZEgp315T7W3nQq5P4xeOm186ZiPVFzw== - dependencies: - varint "~5.0.0" - signedsource@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/signedsource/-/signedsource-1.0.0.tgz#1ddace4981798f93bd833973803d80d52e93ad6a" @@ -10526,6 +10840,14 @@ source-map-support@^0.5.19, source-map-support@^0.5.3: buffer-from "^1.0.0" source-map "^0.6.0" +source-map-support@^0.5.20: + version "0.5.21" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + source-map-url@^0.4.0: version "0.4.1" resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.1.tgz#0af66605a745a5a2f91cf1bbf8a7afbc283dec56" @@ -10582,13 +10904,6 @@ split-ca@^1.0.0: resolved "https://registry.yarnpkg.com/split-ca/-/split-ca-1.0.1.tgz#6c83aff3692fa61256e0cd197e05e9de157691a6" integrity sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ== -split2@^3.1.0: - version "3.2.2" - resolved "https://registry.yarnpkg.com/split2/-/split2-3.2.2.tgz#bf2cf2a37d838312c249c89206fd7a17dd12365f" - integrity sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg== - dependencies: - readable-stream "^3.0.0" - sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -10622,11 +10937,6 @@ sshpk@^1.7.0: safer-buffer "^2.0.2" tweetnacl "~0.14.0" -stable@~0.1.8: - version "0.1.8" - resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" - integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== - statuses@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" @@ -10647,19 +10957,23 @@ stream-shift@^1.0.0: resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== -stream-to-pull-stream@^1.7.2: - version "1.7.3" - resolved "https://registry.yarnpkg.com/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz#4161aa2d2eb9964de60bfa1af7feaf917e874ece" - integrity sha512-6sNyqJpr5dIOQdgNy/xcDWwDuzAsAwVzhzrWlAPAQ7Lkjx/rv0wgvxEyKwTq6FmNd5rjTrELt/CLmaSw7crMGg== +stream-to-it@^0.2.2: + version "0.2.4" + resolved "https://registry.yarnpkg.com/stream-to-it/-/stream-to-it-0.2.4.tgz#d2fd7bfbd4a899b4c0d6a7e6a533723af5749bd0" + integrity sha512-4vEbkSs83OahpmBybNJXlJd7d6/RxzkkSdT3I0mnGt79Xd2Kk+e1JqbvAvsQfCeKj3aKb0QIWkyK3/n0j506vQ== dependencies: - looper "^3.0.0" - pull-stream "^3.2.3" + get-iterator "^1.0.2" streamsearch@0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" integrity sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo= +streamsearch@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" + integrity sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg== + strict-uri-encode@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" @@ -10691,6 +11005,15 @@ string-width@^3.0.0, string-width@^3.1.0: is-fullwidth-code-point "^2.0.0" strip-ansi "^5.1.0" +string-width@^4.0.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^4.1.0, string-width@^4.2.0: version "4.2.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.2.tgz#dafd4f9559a7585cfba529c6a0a4f73488ebd4c5" @@ -10716,7 +11039,7 @@ string.prototype.trimstart@^1.0.4: call-bind "^1.0.2" define-properties "^1.1.3" -string_decoder@^1.1.1, string_decoder@^1.2.0: +string_decoder@^1.1.1: version "1.3.0" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== @@ -10756,7 +11079,7 @@ strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0: +strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10850,13 +11173,28 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7.1.0: +supports-color@^7.0.0, supports-color@^7.1.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== dependencies: has-flag "^4.0.0" +supports-color@^8.1.1: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +supports-hyperlinks@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" + integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== + dependencies: + has-flag "^4.0.0" + supports-color "^7.0.0" + swap-case@^1.1.0: version "1.1.2" resolved "https://registry.yarnpkg.com/swap-case/-/swap-case-1.1.2.tgz#c39203a4587385fad3c850a0bd1bcafa081974e3" @@ -10954,17 +11292,6 @@ tar-stream@^1.1.2: to-buffer "^1.1.1" xtend "^4.0.0" -tar-stream@^2.0.1: - version "2.2.0" - resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-2.2.0.tgz#acad84c284136b060dc3faa64474aa9aebd77287" - integrity sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ== - dependencies: - bl "^4.0.3" - end-of-stream "^1.4.1" - fs-constants "^1.0.0" - inherits "^2.0.3" - readable-stream "^3.1.1" - tar@^4, tar@^4.0.2: version "4.4.19" resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" @@ -11038,7 +11365,7 @@ through2@3.0.1: dependencies: readable-stream "2 || 3" -through2@3.0.2, through2@^3.0.0, through2@^3.0.1: +through2@3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/through2/-/through2-3.0.2.tgz#99f88931cfc761ec7678b41d5d7336b5b6a07bf4" integrity sha512-enaDQ4MUyP2W6ZyT6EsMzqBPZaM/avg8iuo+l2d3QCs0J+6RaqkHV/2/lOwDTueBHeJ/2LG9lrLW3d5rWPucuQ== @@ -11071,6 +11398,15 @@ timed-out@^4.0.0, timed-out@^4.0.1: resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= +timeout-abort-controller@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/timeout-abort-controller/-/timeout-abort-controller-2.0.0.tgz#d6a59209132e520413092dd4b4d71eaaf5887feb" + integrity sha512-2FAPXfzTPYEgw27bQGTHc0SzrbmnU2eso4qo172zMLZzaGqeu09PFa5B2FCUHM1tflgRqPgn5KQgp6+Vex4uNA== + dependencies: + abort-controller "^3.0.0" + native-abort-controller "^1.0.4" + retimer "^3.0.0" + tiny-queue@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/tiny-queue/-/tiny-queue-0.2.1.tgz#25a67f2c6e253b2ca941977b5ef7442ef97a6046" @@ -11084,10 +11420,10 @@ title-case@^2.1.0: no-case "^2.2.0" upper-case "^1.0.3" -tmp-promise@3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.2.tgz#6e933782abff8b00c3119d63589ca1fb9caaa62a" - integrity sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA== +tmp-promise@3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.3.tgz#60a1a1cc98c988674fcbfd23b6e3367bdeac4ce7" + integrity sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ== dependencies: tmp "^0.2.0" @@ -11204,11 +11540,35 @@ ts-invariant@^0.6.0: "@ungap/global-this" "^0.4.2" tslib "^1.9.3" +ts-node@^10.9.1: + version "10.9.1" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b" + integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw== + dependencies: + "@cspotcode/source-map-support" "^0.8.0" + "@tsconfig/node10" "^1.0.7" + "@tsconfig/node12" "^1.0.7" + "@tsconfig/node14" "^1.0.0" + "@tsconfig/node16" "^1.0.2" + acorn "^8.4.1" + acorn-walk "^8.1.1" + arg "^4.1.0" + create-require "^1.1.0" + diff "^4.0.1" + make-error "^1.1.1" + v8-compile-cache-lib "^3.0.1" + yn "3.1.1" + tslib@^1.10.0, tslib@^1.14.1, tslib@^1.9.3: version "1.14.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== +tslib@^2.0.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.0.tgz#42bfed86f5787aeb41d031866c8f402429e0fddf" + integrity sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg== + tslib@^2.0.3, tslib@~2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" @@ -11236,11 +11596,6 @@ tweetnacl@^0.14.3, tweetnacl@~0.14.0: resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== -tweetnacl@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596" - integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== - type-check@~0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" @@ -11248,6 +11603,11 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== + type-is@^1.6.16, type-is@~1.6.18: version "1.6.18" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" @@ -11302,6 +11662,13 @@ ua-parser-js@^0.7.18: resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.24.tgz#8d3ecea46ed4f1f1d63ec25f17d8568105dc027c" integrity sha512-yo+miGzQx5gakzVK3QFfN0/L9uVhosXBBO7qmnk7c2iw1IhL212wfA3zbnI54B0obGwC/5NWub/iT9sReMx+Fw== +uint8arrays@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/uint8arrays/-/uint8arrays-3.1.1.tgz#2d8762acce159ccd9936057572dade9459f65ae0" + integrity sha512-+QJa8QRnbdXVpHYjLoTpJIdCTiw9Ir62nocClWuXIq2JIh4Uta0cQsTSpFL678p2CN8B+XSApwcU+pQEqVpKWg== + dependencies: + multiformats "^9.4.2" + ultron@~1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.1.1.tgz#9fe1536a10a664a65266a1e3ccf85fd36302bc9c" @@ -11337,11 +11704,6 @@ underscore@^1.8.3: resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.12.0.tgz#4814940551fc80587cef7840d1ebb0f16453be97" integrity sha512-21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ== -unique-by@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unique-by/-/unique-by-1.0.0.tgz#5220c86ba7bc572fb713ad74651470cb644212bd" - integrity sha512-rJRXK5V0zL6TiSzhoGNpJp5dr+TZBLoPJFC06rLn17Ug++7Aa0Qnve5v+skXeQxx6/sI7rBsSesa6MAcmFi8Ew== - unique-stream@^2.0.2: version "2.3.1" resolved "https://registry.yarnpkg.com/unique-stream/-/unique-stream-2.3.1.tgz#c65d110e9a4adf9a6c5948b28053d9a8d04cbeac" @@ -11362,11 +11724,6 @@ universalify@^0.1.0, universalify@^0.1.2: resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== -universalify@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-1.0.0.tgz#b61a1da173e8435b2fe3c67d29b9adf8594bd16d" - integrity sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug== - universalify@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" @@ -11440,13 +11797,10 @@ url-to-options@^1.0.1: resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= -ursa-optional@~0.10.0: - version "0.10.2" - resolved "https://registry.yarnpkg.com/ursa-optional/-/ursa-optional-0.10.2.tgz#bd74e7d60289c22ac2a69a3c8dea5eb2817f9681" - integrity sha512-TKdwuLboBn7M34RcvVTuQyhvrA8gYKapuVdm0nBP0mnBc7oECOfUQZrY91cefL3/nm64ZyrejSRrhTVdX7NG/A== - dependencies: - bindings "^1.5.0" - nan "^2.14.2" +urlpattern-polyfill@^8.0.0: + version "8.0.2" + resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-8.0.2.tgz#99f096e35eff8bf4b5a2aa7d58a1523d6ebc7ce5" + integrity sha512-Qp95D4TPJl1kC9SKigDcqgyM2VDVO4RiJc2d4qe5GrYm+zbIQCWWKAFaJNQ4BhdFeDGwBmAxqJBwWSJDb9T3BQ== utf-8-validate@^5.0.2: version "5.0.4" @@ -11523,6 +11877,11 @@ uuid@^8.0.0, uuid@^8.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +v8-compile-cache-lib@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" + integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== + vali-date@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/vali-date/-/vali-date-1.0.0.tgz#1b904a59609fb328ef078138420934f6b86709a6" @@ -11541,11 +11900,16 @@ validate-npm-package-license@^3.0.1: spdx-correct "^3.0.0" spdx-expression-parse "^3.0.0" -varint@^5.0.0, varint@~5.0.0: +varint@^5.0.0: version "5.0.2" resolved "https://registry.yarnpkg.com/varint/-/varint-5.0.2.tgz#5b47f8a947eb668b848e034dcfa87d0ff8a7f7a4" integrity sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow== +varint@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/varint/-/varint-6.0.0.tgz#9881eb0ce8feaea6512439d19ddf84bf551661d0" + integrity sha512-cXEIW6cfr15lFv563k4GuVuW/fiwjknytD37jIOLSdSWuOI6WnO/oKwmP2FQTU2l01LP8/M5TSAJpzUaGe3uWg== + vary@^1, vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" @@ -11604,6 +11968,11 @@ wcwidth@^1.0.1: dependencies: defaults "^1.0.3" +web-streams-polyfill@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6" + integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q== + web3-bzz@1.2.9: version "1.2.9" resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.2.9.tgz#25f8a373bc2dd019f47bf80523546f98b93c8790" @@ -12104,6 +12473,17 @@ web3@^1.0.0-beta.34: web3-shh "1.3.4" web3-utils "1.3.4" +webcrypto-core@^1.7.7: + version "1.7.7" + resolved "https://registry.yarnpkg.com/webcrypto-core/-/webcrypto-core-1.7.7.tgz#06f24b3498463e570fed64d7cab149e5437b162c" + integrity sha512-7FjigXNsBfopEj+5DV2nhNpfic2vumtjjgPmeDKk45z+MJwXKKfhPB7118Pfzrmh4jqOMST6Ch37iPAHoImg5g== + dependencies: + "@peculiar/asn1-schema" "^2.3.6" + "@peculiar/json-schema" "^1.1.12" + asn1js "^3.0.1" + pvtsutils "^1.3.2" + tslib "^2.4.0" + webidl-conversions@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-2.0.1.tgz#3bf8258f7d318c7443c36f2e169402a1a6703506" @@ -12198,6 +12578,13 @@ which@2.0.2, which@^2.0.0, which@^2.0.1: dependencies: isexe "^2.0.0" +which@^1.2.9: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + wide-align@1.1.3, wide-align@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" @@ -12205,6 +12592,13 @@ wide-align@1.1.3, wide-align@^1.1.0: dependencies: string-width "^1.0.2 || 2" +widest-line@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" + integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== + dependencies: + string-width "^4.0.0" + window-size@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.2.0.tgz#b4315bb4214a3d7058ebeee892e13fa24d98b075" @@ -12215,10 +12609,10 @@ word-wrap@~1.2.3: resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== -wordwrap@~0.0.2: - version "0.0.3" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" - integrity sha512-1tMA907+V4QmxV7dbRvb4/8MaRALK6q9Abid3ndMYnbyo8piisCmeONVqVSXqQA3KaP4SLt5b7ud6E2sqP8TFw== +wordwrap@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== workerpool@6.0.0: version "6.0.0" @@ -12251,6 +12645,15 @@ wrap-ansi@^6.2.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" @@ -12397,7 +12800,7 @@ yaeti@^0.0.6: resolved "https://registry.yarnpkg.com/yaeti/-/yaeti-0.0.6.tgz#f26f484d72684cf42bedfb76970aa1608fbf9577" integrity sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc= -yallist@^3.0.0, yallist@^3.0.2, yallist@^3.1.1: +yallist@^3.0.0, yallist@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== @@ -12407,14 +12810,7 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yaml@1.9.2: - version "1.9.2" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.9.2.tgz#f0cfa865f003ab707663e4f04b3956957ea564ed" - integrity sha512-HPT7cGGI0DuRcsO51qC1j9O16Dh1mZ2bnXwsi0jrSpsLz0WxOLSLXfkABVl6bZO629py3CU+OMJtpNHDLB97kg== - dependencies: - "@babel/runtime" "^7.9.2" - -yaml@^1.7.2: +yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: version "1.10.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== @@ -12459,6 +12855,11 @@ yargs-parser@^2.4.0: camelcase "^3.0.0" lodash.assign "^4.0.6" +yargs-parser@^21.0.0: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + yargs-unparser@1.6.1: version "1.6.1" resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.1.tgz#bd4b0ee05b4c94d058929c32cb09e3fce71d3c5f" @@ -12538,6 +12939,11 @@ yargs@^15.3.1: y18n "^4.0.0" yargs-parser "^18.1.2" +yn@3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" + integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== + yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index 568863a6fa6..25d28726108 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -234,7 +234,7 @@ async fn file_data_sources() { let stop_block = test_ptr(5); let err = ctx.start_and_sync_to_error(stop_block.clone()).await; let message = "entity type `IpfsFile1` is not on the 'entities' list for data source `File2`. \ - Hint: Add `IpfsFile1` to the 'entities' list, which currently is: `IpfsFile`.\twasm backtrace:\t 0: 0x365d - !src/mapping/handleFile1\t in handler `handleFile1` at block #5 ()".to_string(); + Hint: Add `IpfsFile1` to the 'entities' list, which currently is: `IpfsFile`.\twasm backtrace:\t 0: 0x3649 - !src/mapping/handleFile1\t in handler `handleFile1` at block #5 ()".to_string(); let expected_err = SubgraphError { subgraph_id: ctx.deployment.hash.clone(), message, @@ -345,8 +345,8 @@ async fn template_static_filters_false_positives() { assert_eq!( poi.unwrap(), [ - 172, 174, 50, 50, 108, 187, 89, 216, 16, 123, 40, 207, 250, 97, 247, 138, 180, 67, 20, - 5, 114, 187, 237, 104, 187, 122, 220, 9, 131, 67, 50, 237 + 253, 249, 50, 171, 127, 117, 77, 13, 79, 132, 88, 246, 223, 214, 225, 39, 112, 19, 73, + 97, 193, 132, 103, 19, 191, 5, 28, 14, 232, 137, 76, 9 ], ); } From 5c5ec10a874108e9d9c1d4323ddc7774a79925e5 Mon Sep 17 00:00:00 2001 From: Sebastian T F Date: Wed, 31 May 2023 18:09:11 +0530 Subject: [PATCH 0266/2104] Fix typo in bug.yml (#4657) --- .github/ISSUE_TEMPLATE/bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 944e74845ef..4fe935160de 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -16,7 +16,7 @@ body: id: graph-node-logs attributes: label: Relevant log output - description: Please copy and paste any relevant log output (either graph-node or hosted service logs). This will be automatically formatted into code, so no need for backticks. Leave black if it doesn't apply. + description: Please copy and paste any relevant log output (either graph-node or hosted service logs). This will be automatically formatted into code, so no need for backticks. Leave blank if it doesn't apply. render: Shell - type: markdown attributes: From a85e1df62da44fe00cebc68801d7f80b3fa4d38f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 19 May 2023 15:55:19 -0700 Subject: [PATCH 0267/2104] graph, node: Move serde_regex and toml into graph --- Cargo.lock | 40 +++++++++++++++---------------- graph/Cargo.toml | 2 ++ graph/src/lib.rs | 2 ++ node/Cargo.toml | 2 -- node/src/config.rs | 12 +++++----- store/postgres/src/chain_store.rs | 4 ++-- 6 files changed, 32 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa757e690ab..eaf98b0b07a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1580,6 +1580,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_plain", + "serde_regex", "serde_yaml", "slog", "slog-async", @@ -1594,6 +1595,7 @@ dependencies = [ "tokio", "tokio-retry", "tokio-stream", + "toml 0.7.4", "tonic", "tonic-build", "url", @@ -1791,10 +1793,8 @@ dependencies = [ "lazy_static", "prometheus", "serde", - "serde_regex", "shellexpand", "termcolor", - "toml 0.7.1", "url", ] @@ -2907,15 +2907,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -3975,9 +3966,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" dependencies = [ "serde", ] @@ -4716,9 +4707,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772c1426ab886e7362aedf4abc9c0d1348a979517efedfc25862944d10137af0" +checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" dependencies = [ "serde", "serde_spanned", @@ -4728,24 +4719,24 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.1" +version = "0.19.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90a238ee2e6ede22fb95350acc78e21dc40da00bb66c0334bde83de4ed89424e" +checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" dependencies = [ "indexmap", - "nom8", "serde", "serde_spanned", "toml_datetime", + "winnow", ] [[package]] @@ -5743,6 +5734,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" diff --git a/graph/Cargo.toml b/graph/Cargo.toml index ad1b3fd6c13..e2f21eae40d 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -33,6 +33,7 @@ semver = { version = "1.0.16", features = ["serde"] } serde = { version = "1.0.126", features = ["rc"] } serde_derive = "1.0.125" serde_json = { version = "1.0", features = ["arbitrary_precision"] } +serde_regex = "1.1.0" serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } @@ -47,6 +48,7 @@ tiny-keccak = "1.5.0" tokio = { version = "1.28.1", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-retry = "0.3.0" +toml = "0.7.4" url = "2.3.1" prometheus = "0.13.3" priority-queue = "0.7.0" diff --git a/graph/src/lib.rs b/graph/src/lib.rs index ad123b7100c..264bab1c221 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -89,6 +89,7 @@ pub mod prelude { pub use serde; pub use serde_derive::{Deserialize, Serialize}; pub use serde_json; + pub use serde_regex; pub use serde_yaml; pub use slog::{self, crit, debug, error, info, o, trace, warn, Logger}; pub use std::convert::TryFrom; @@ -100,6 +101,7 @@ pub mod prelude { pub use thiserror; pub use tiny_keccak; pub use tokio; + pub use toml; pub use tonic; pub use web3; diff --git a/node/Cargo.toml b/node/Cargo.toml index b432572d790..8183a336256 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -36,8 +36,6 @@ graph-server-websocket = { path = "../server/websocket" } graph-server-metrics = { path = "../server/metrics" } graph-store-postgres = { path = "../store/postgres" } serde = { version = "1.0.126", features = ["derive", "rc"] } -serde_regex = "1.1.0" -toml = "0.7.1" shellexpand = "3.1.0" termcolor = "1.2.0" diesel = "1.4.8" diff --git a/node/src/config.rs b/node/src/config.rs index 12b9a4c2cc8..f23ad6fe843 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -10,7 +10,7 @@ use graph::{ de::{self, value, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, }, - serde_json, Logger, NodeId, StoreError, + serde_json, serde_regex, toml, Logger, NodeId, StoreError, }, }; use graph_chain_ethereum::{self as ethereum, NodeCapabilities}; @@ -1155,7 +1155,7 @@ mod tests { use graph::blockchain::BlockchainKind; use graph::firehose::SubgraphLimit; use graph::prelude::regex::Regex; - use graph::prelude::NodeId; + use graph::prelude::{toml, NodeId}; use http::{HeaderMap, HeaderValue}; use std::collections::BTreeSet; use std::fs::read_to_string; @@ -1460,7 +1460,7 @@ mod tests { details = { type = "firehose", url = "http://localhost:9000" } match = [ { name = "some_node_.*", limit = 10 }, - { name = "other_node_.*", limit = 0 } ] + { name = "other_node_.*", limit = 0 } ] "#, ) .unwrap(); @@ -1498,7 +1498,7 @@ mod tests { details = { type = "substreams", url = "http://localhost:9000" } match = [ { name = "some_node_.*", limit = 101 }, - { name = "other_node_.*", limit = 0 } ] + { name = "other_node_.*", limit = 0 } ] "#, ) .unwrap(); @@ -1536,7 +1536,7 @@ mod tests { details = { type = "substreams", url = "http://localhost:9000" } match = [ { name = "some_node_.*", limit = 10 }, - { name = "other_node_.*", limit = 0 } ] + { name = "other_node_.*", limit = 0 } ] "#, ) .unwrap(); @@ -1574,7 +1574,7 @@ mod tests { details = { type = "substreams", url = "http://localhost:9000" } match = [ { name = "some_node_.*", limit = 101 }, - { name = "other_node_.*", limit = 0 } ] + { name = "other_node_.*", limit = 0 } ] "#, ) .unwrap(); diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 93ffd27bef9..300a4e79b39 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -973,7 +973,7 @@ mod data { use public::eth_call_meta as meta; cache::table - .find(id.as_ref()) + .find::<&[u8]>(id.as_ref()) .inner_join(meta::table) .select(( cache::return_value, @@ -1101,7 +1101,7 @@ mod data { Storage::Shared => { use public::eth_call_meta as meta; - update(meta::table.find(contract_address.as_ref())) + update(meta::table.find::<&[u8]>(contract_address.as_ref())) .set(meta::accessed_at.eq(sql("CURRENT_DATE"))) .execute(conn) } From 947d8253bbe649c75a946dfa36e7a98c1ac2e638 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 19 May 2023 16:18:18 -0700 Subject: [PATCH 0268/2104] all: Introduce subgraph-specific settings It is now possible to set the history_blocks for a subgraph from a config file that matches on subgraph names. Settings are matched in the order in which they appear in the config file. `graphman config check` checks that config file for validity if one is set in the environment --- core/src/subgraph/registrar.rs | 8 ++ graph/src/components/subgraph/mod.rs | 2 + graph/src/components/subgraph/settings.rs | 94 +++++++++++++++++++++++ graph/src/env/mod.rs | 6 ++ node/src/main.rs | 17 ++++ node/src/manager/commands/config.rs | 24 +++++- node/src/manager/commands/run.rs | 2 + tests/src/fixture/mod.rs | 2 + 8 files changed, 151 insertions(+), 4 deletions(-) create mode 100644 graph/src/components/subgraph/settings.rs diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 7f706fcd622..2490b2a39e4 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -6,6 +6,7 @@ use graph::blockchain::Blockchain; use graph::blockchain::BlockchainKind; use graph::blockchain::BlockchainMap; use graph::components::store::{DeploymentId, DeploymentLocator, SubscriptionManager}; +use graph::components::subgraph::Settings; use graph::data::subgraph::schema::DeploymentCreate; use graph::data::subgraph::Graft; use graph::prelude::{ @@ -24,6 +25,7 @@ pub struct SubgraphRegistrar { node_id: NodeId, version_switching_mode: SubgraphVersionSwitchingMode, assignment_event_stream_cancel_guard: CancelGuard, // cancels on drop + settings: Arc, } impl SubgraphRegistrar @@ -41,6 +43,7 @@ where chains: Arc, node_id: NodeId, version_switching_mode: SubgraphVersionSwitchingMode, + settings: Arc, ) -> Self { let logger = logger_factory.component_logger("SubgraphRegistrar", None); let logger_factory = logger_factory.with_parent(logger.clone()); @@ -58,6 +61,7 @@ where node_id, version_switching_mode, assignment_event_stream_cancel_guard: CancelGuard::new(), + settings, } } @@ -297,6 +301,10 @@ where SubgraphRegistrarError::ResolveError(SubgraphManifestResolveError::ResolveError(e)) })?; + // Give priority to deployment specific history_blocks value. + let history_blocks = + history_blocks.or(self.settings.for_name(&name).map(|c| c.history_blocks)); + let deployment_locator = match kind { BlockchainKind::Arweave => { create_subgraph_version::( diff --git a/graph/src/components/subgraph/mod.rs b/graph/src/components/subgraph/mod.rs index 6976de1e2d2..4cb9eb91142 100644 --- a/graph/src/components/subgraph/mod.rs +++ b/graph/src/components/subgraph/mod.rs @@ -4,6 +4,7 @@ mod instance_manager; mod proof_of_indexing; mod provider; mod registrar; +mod settings; pub use crate::prelude::Entity; @@ -16,3 +17,4 @@ pub use self::proof_of_indexing::{ }; pub use self::provider::SubgraphAssignmentProvider; pub use self::registrar::{SubgraphRegistrar, SubgraphVersionSwitchingMode}; +pub use self::settings::{Setting, Settings}; diff --git a/graph/src/components/subgraph/settings.rs b/graph/src/components/subgraph/settings.rs new file mode 100644 index 00000000000..a7512614583 --- /dev/null +++ b/graph/src/components/subgraph/settings.rs @@ -0,0 +1,94 @@ +//! Facilities for dealing with subgraph-specific settings +use std::fs::read_to_string; + +use crate::{ + anyhow, + prelude::{regex::Regex, SubgraphName}, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum Predicate { + #[serde(alias = "name", with = "serde_regex")] + Name(Regex), +} + +impl Predicate { + fn matches(&self, name: &SubgraphName) -> bool { + match self { + Predicate::Name(rx) => rx.is_match(name.as_str()), + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Setting { + #[serde(alias = "match")] + pred: Predicate, + pub history_blocks: i32, +} + +impl Setting { + fn matches(&self, name: &SubgraphName) -> bool { + self.pred.matches(name) + } +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct Settings { + #[serde(alias = "setting")] + settings: Vec, +} + +impl Settings { + pub fn from_file(path: &str) -> Result { + Self::from_str(&read_to_string(path)?) + } + + pub fn from_str(toml: &str) -> Result { + toml::from_str::(toml).map_err(anyhow::Error::from) + } + + pub fn for_name(&self, name: &SubgraphName) -> Option<&Setting> { + self.settings.iter().find(|setting| setting.matches(name)) + } +} + +#[cfg(test)] +mod test { + use super::{Predicate, Settings}; + + #[test] + fn parses_correctly() { + let content = r#" + [[setting]] + match = { name = ".*" } + history_blocks = 10000 + + [[setting]] + match = { name = "xxxxx" } + history_blocks = 10000 + + [[setting]] + match = { name = ".*!$" } + history_blocks = 10000 + "#; + + let section = Settings::from_str(content).unwrap(); + assert_eq!(section.settings.len(), 3); + + let rule1 = match §ion.settings[0].pred { + Predicate::Name(name) => name, + }; + assert_eq!(rule1.as_str(), ".*"); + + let rule2 = match §ion.settings[1].pred { + Predicate::Name(name) => name, + }; + assert_eq!(rule2.as_str(), "xxxxx"); + let rule1 = match §ion.settings[2].pred { + Predicate::Name(name) => name, + }; + assert_eq!(rule1.as_str(), ".*!$"); + } +} diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index 7e7f071e144..08e1d3ed9c1 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -172,6 +172,9 @@ pub struct EnvVars { /// Set by the environment variable `ETHEREUM_REORG_THRESHOLD`. The default /// value is 250 blocks. pub reorg_threshold: BlockNumber, + /// Set by the env var `GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS` which should point + /// to a file with subgraph-specific settings + pub subgraph_settings: Option, } impl EnvVars { @@ -229,6 +232,7 @@ impl EnvVars { external_ws_base_url: inner.external_ws_base_url, static_filters_threshold: inner.static_filters_threshold, reorg_threshold: inner.reorg_threshold, + subgraph_settings: inner.subgraph_settings, }) } @@ -347,6 +351,8 @@ struct Inner { // JSON-RPC specific. #[envconfig(from = "ETHEREUM_REORG_THRESHOLD", default = "250")] reorg_threshold: BlockNumber, + #[envconfig(from = "GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS")] + subgraph_settings: Option, } #[derive(Clone, Debug)] diff --git a/node/src/main.rs b/node/src/main.rs index cd42d414f9c..58b3699754e 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -9,6 +9,7 @@ use graph::blockchain::{ BasicBlockchainBuilder, Blockchain, BlockchainBuilder, BlockchainKind, BlockchainMap, }; use graph::components::store::BlockStore; +use graph::components::subgraph::Settings; use graph::data::graphql::effort::LoadManager; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; @@ -137,6 +138,21 @@ async fn main() { } Ok(config) => config, }; + + let subgraph_settings = match env_vars.subgraph_settings { + Some(ref path) => { + info!(logger, "Reading subgraph configuration file `{}`", path); + match Settings::from_file(path) { + Ok(rules) => rules, + Err(e) => { + eprintln!("configuration error in subgraph settings {}: {}", path, e); + std::process::exit(1); + } + } + } + None => Settings::default(), + }; + if opt.check_config { match config.to_json() { Ok(txt) => println!("{}", txt), @@ -481,6 +497,7 @@ async fn main() { blockchain_map, node_id.clone(), version_switching_mode, + Arc::new(subgraph_settings), )); graph::spawn( subgraph_registrar diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index fa8b1013ac0..4687b4f0a4f 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -2,7 +2,9 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ anyhow::bail, + components::subgraph::Settings, endpoint::EndpointMetrics, + env::EnvVars, itertools::Itertools, prelude::{ anyhow::{anyhow, Error}, @@ -39,13 +41,27 @@ pub fn check(config: &Config, print: bool) -> Result<(), Error> { Ok(txt) => { if print { println!("{}", txt); - } else { - println!("Successfully validated configuration"); + return Ok(()); } - Ok(()) } - Err(e) => Err(anyhow!("error serializing config: {}", e)), + Err(e) => bail!("error serializing config: {}", e), } + + let env_vars = EnvVars::from_env().unwrap(); + if let Some(path) = &env_vars.subgraph_settings { + match Settings::from_file(path) { + Ok(_) => { + println!("Successfully validated subgraph settings from {path}"); + } + Err(e) => { + eprintln!("configuration error in subgraph settings {}: {}", path, e); + std::process::exit(1); + } + } + }; + + println!("Successfully validated configuration"); + Ok(()) } pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Error> { diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index ee630685485..f94b09119c4 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -17,6 +17,7 @@ use graph::blockchain::client::ChainClient; use graph::blockchain::{BlockchainKind, BlockchainMap}; use graph::cheap_clone::CheapClone; use graph::components::store::{BlockStore as _, DeploymentLocator}; +use graph::components::subgraph::Settings; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; use graph::firehose::FirehoseEndpoints; @@ -198,6 +199,7 @@ pub async fn run( blockchain_map, node_id.clone(), SubgraphVersionSwitchingMode::Instant, + Arc::new(Settings::default()), )); let (name, hash) = if subgraph.contains(':') { diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 3dc053aab73..10c715dcf1a 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -18,6 +18,7 @@ use graph::blockchain::{ use graph::cheap_clone::CheapClone; use graph::components::metrics::MetricsRegistry; use graph::components::store::{BlockStore, DeploymentLocator}; +use graph::components::subgraph::Settings; use graph::data::graphql::effort::LoadManager; use graph::data::query::{Query, QueryTarget}; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; @@ -408,6 +409,7 @@ pub async fn setup( blockchain_map.clone(), node_id.clone(), SubgraphVersionSwitchingMode::Instant, + Arc::new(Settings::default()), )); SubgraphRegistrar::create_subgraph(subgraph_registrar.as_ref(), subgraph_name.clone()) From b62e75c662b00a550e1d06886acb842bf89a3412 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 19 May 2023 16:56:20 -0700 Subject: [PATCH 0269/2104] node: Add `graphman config setting` --- node/src/bin/manager.rs | 10 ++++++++++ node/src/manager/commands/config.rs | 28 +++++++++++++++++++++++++--- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 0e6b6cd26e8..61e03e067f2 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -383,6 +383,15 @@ pub enum ConfigCommand { features: String, network: String, }, + /// Show subgraph-specific settings + /// + /// GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS can add a file that contains + /// subgraph-specific settings. This command determines which settings + /// would apply when a subgraph is deployed and prints the result + Setting { + /// The subgraph name for which to print settings + name: String, + }, } #[derive(Clone, Debug, Subcommand)] @@ -1079,6 +1088,7 @@ async fn main() -> anyhow::Result<()> { commands::config::provider(logger, &ctx.config, registry, features, network) .await } + Setting { name } => commands::config::setting(&name), } } Remove { name } => commands::remove::run(ctx.subgraph_store(), &name), diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index 4687b4f0a4f..7f595e97e5d 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -1,14 +1,14 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::{ - anyhow::bail, - components::subgraph::Settings, + anyhow::{bail, Context}, + components::subgraph::{Setting, Settings}, endpoint::EndpointMetrics, env::EnvVars, itertools::Itertools, prelude::{ anyhow::{anyhow, Error}, - MetricsRegistry, NodeId, + MetricsRegistry, NodeId, SubgraphName, }, slog::Logger, }; @@ -158,3 +158,25 @@ pub async fn provider( ); Ok(()) } + +pub fn setting(name: &str) -> Result<(), Error> { + let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; + let env_vars = EnvVars::from_env().unwrap(); + if let Some(path) = &env_vars.subgraph_settings { + let settings = Settings::from_file(path) + .with_context(|| format!("syntax error in subgraph settings `{}`", path))?; + match settings.for_name(&name) { + Some(Setting { history_blocks, .. }) => { + println!("setting for `{name}` will use history_blocks = {history_blocks}"); + } + None => { + println!("no specific setting for `{name}`, defaults will be used"); + } + } + } else { + println!("No subgraph-specific settings will be applied because"); + println!("GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS is not set"); + }; + + Ok(()) +} From 8bb377570f7e3d42dcf97e48ac51906717e78406 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 31 May 2023 11:28:57 -0700 Subject: [PATCH 0270/2104] store: Produce more informative error in Layout.find_many --- store/postgres/src/relational.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index cad6e6b319a..02d657fb3c1 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -559,9 +559,15 @@ impl Layout { entity_id: entity_data.id(), causality_region: CausalityRegion::from_entity(&entity_data), }; - let overwrite = entities.insert(key, entity_data).is_some(); - if overwrite { - return Err(constraint_violation!("duplicate entity in result set")); + if entities.contains_key(&key) { + return Err(constraint_violation!( + "duplicate entity {}[{}] in result set, block = {}", + key.entity_type, + key.entity_id, + block + )); + } else { + entities.insert(key, entity_data); } } Ok(entities) From aa9db72aeb3f5cd61b180e52559e82ffaa58dddc Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 31 May 2023 14:42:23 -0700 Subject: [PATCH 0271/2104] graph: Create an explicit IdType for entity ids --- graph/src/data/store/mod.rs | 7 ++++++ graph/src/schema/input_schema.rs | 37 +++++++++++++++++--------------- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 89e12584dd7..3044ec13699 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -172,6 +172,13 @@ impl ValueType { } } +/// The types that can be used for the `id` of an entity +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum IdType { + String, + Bytes, +} + // Note: Do not modify fields without also making a backward compatible change to the StableHash impl (below) /// An attribute value is represented as an enum with variants for all supported value types. #[derive(Clone, Deserialize, Serialize, PartialEq, Eq)] diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 51646cf2417..8fd3c4c8ee7 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -2,7 +2,7 @@ use std::collections::{BTreeMap, HashSet}; use std::str::FromStr; use std::sync::Arc; -use anyhow::{anyhow, Error}; +use anyhow::{anyhow, Context, Error}; use store::Entity; use crate::cheap_clone::CheapClone; @@ -182,41 +182,44 @@ impl InputSchema { } } - /// Construct a value for the entity type's id attribute - pub fn id_value(&self, key: &EntityKey) -> Result { + pub fn id_type(&self, entity_type: &EntityType) -> Result { let base_type = self .inner .schema .document - .get_object_type_definition(key.entity_type.as_str()) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, - key.entity_id, - key.entity_type - ) - })? + .get_object_type_definition(entity_type.as_str()) + .ok_or_else(|| anyhow!("unknown entity type `{}`", entity_type))? .field("id") .unwrap() .field_type .get_base_type(); match base_type { - "ID" | "String" => Ok(store::Value::String(key.entity_id.to_string())), - "Bytes" => Ok(store::Value::Bytes(scalar::Bytes::from_str( - &key.entity_id, - )?)), + "ID" | "String" => Ok(store::IdType::String), + "Bytes" => Ok(store::IdType::Bytes), s => { return Err(anyhow!( "Entity type {} uses illegal type {} for id column", - key.entity_type, + entity_type, s )) } } } + /// Construct a value for the entity type's id attribute + pub fn id_value(&self, key: &EntityKey) -> Result { + let id_type = self + .id_type(&key.entity_type) + .with_context(|| format!("error determining id_type for {:?}", key))?; + match id_type { + store::IdType::String => Ok(store::Value::String(key.entity_id.to_string())), + store::IdType::Bytes => Ok(store::Value::Bytes(scalar::Bytes::from_str( + &key.entity_id, + )?)), + } + } + pub fn is_immutable(&self, entity_type: &EntityType) -> bool { self.inner.immutable_types.contains(entity_type) } From 019fc4f31fa739877ee8eea288302cef7bbb82a8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 31 May 2023 15:00:07 -0700 Subject: [PATCH 0272/2104] substreams: Make sure Bytes id get prefixed with 0x Fixes https://github.com/graphprotocol/graph-node/issues/4661 --- chain/substreams/src/trigger.rs | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index a382b37e5fa..68593bd9db3 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -9,7 +9,10 @@ use graph::{ store::{DeploymentLocator, EntityKey, EntityType, SubgraphFork}, subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, }, - data::{store::scalar::Bytes, value::Word}, + data::{ + store::{scalar::Bytes, IdType}, + value::Word, + }, data_source::{self, CausalityRegion}, prelude::{ anyhow, async_trait, BigDecimal, BigInt, BlockHash, BlockNumber, BlockState, @@ -194,10 +197,30 @@ where return Err(MappingError::Unknown(anyhow!("Detected UNSET entity operation, either a server error or there's a new type of operation and we're running an outdated protobuf"))); } Operation::Create | Operation::Update => { - let entity_type: &str = &entity_change.entity; - let entity_id: String = entity_change.id.clone(); + let schema = state.entity_cache.schema.as_ref(); + let entity_type = EntityType::new(entity_change.entity.to_string()); + // Make sure that the `entity_id` gets set to a value + // that is safe for roundtrips through the database. In + // particular, if the type of the id is `Bytes`, we have + // to make sure that the `entity_id` starts with `0x` as + // that will be what the key for such an entity have + // when it is read from the database. + // + // Needless to say, this is a very ugly hack, and the + // real fix is what's described in [this + // issue](https://github.com/graphprotocol/graph-node/issues/4663) + let entity_id: String = match schema.id_type(&entity_type)? { + IdType::String => entity_change.id.clone(), + IdType::Bytes => { + if entity_change.id.starts_with("0x") { + entity_change.id.clone() + } else { + format!("0x{}", entity_change.id) + } + } + }; let key = EntityKey { - entity_type: EntityType::new(entity_type.to_string()), + entity_type: entity_type.clone(), entity_id: entity_id.clone().into(), causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data }; @@ -220,7 +243,7 @@ where write_poi_event( proof_of_indexing, &ProofOfIndexingEvent::SetEntity { - entity_type, + entity_type: entity_type.as_str(), id: &entity_id, // TODO: This should be an entity so we do not have to build the intermediate HashMap data: &data, From 8b88d020e8e42bb6cd7293f08aa7f05392742beb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 31 May 2023 15:08:27 -0700 Subject: [PATCH 0273/2104] substreams: Avoid a clone when constructing the EntityKey --- chain/substreams/src/trigger.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 68593bd9db3..01ee2e0a643 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -219,13 +219,8 @@ where } } }; - let key = EntityKey { - entity_type: entity_type.clone(), - entity_id: entity_id.clone().into(), - causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data - }; - let mut data: HashMap = HashMap::from_iter(vec![]); + let mut data: HashMap = HashMap::from_iter(vec![]); for field in entity_change.fields.iter() { let new_value: &codec::value::Typed = match &field.new_value { Some(codec::Value { @@ -236,7 +231,7 @@ where let value: Value = decode_value(new_value)?; *data - .entry(Word::from(field.name.clone())) + .entry(Word::from(field.name.as_str())) .or_insert(Value::Null) = value; } @@ -252,6 +247,12 @@ where logger, ); + let key = EntityKey { + entity_type: entity_type, + entity_id: Word::from(entity_id), + causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data + }; + let id = state.entity_cache.schema.id_value(&key)?; data.insert(Word::from("id"), id); From d611db102827bec7939a9f10a17d57812ac035ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 17:47:43 +0000 Subject: [PATCH 0274/2104] build(deps): bump quote from 1.0.27 to 1.0.28 Bumps [quote](https://github.com/dtolnay/quote) from 1.0.27 to 1.0.28. - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.27...1.0.28) --- updated-dependencies: - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eaf98b0b07a..3176564b3dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3485,9 +3485,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] From 2d16012dcfbf9f88bd8174c6dd8aee018cf42f18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 22:01:13 +0000 Subject: [PATCH 0275/2104] build(deps): bump chrono from 0.4.24 to 0.4.25 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.24 to 0.4.25. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.24...v0.4.25) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 12 +++++++++--- graph/Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3176564b3dd..0e1b1bd54a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -45,6 +45,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -482,13 +488,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "fdbc37d37da9e5bce8173f3a41b71d9bf3c674deebbaceacd0ebdabde76efb03" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "time 0.1.44", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index e2f21eae40d..31e6a867c62 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -13,7 +13,7 @@ bytes = "1.0.1" cid = "0.10.1" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2", "chrono"] } diesel_derives = "1.4" -chrono = "0.4.24" +chrono = "0.4.25" envconfig = "0.10.0" Inflector = "0.11.3" isatty = "0.1.9" From 0ce43c2fcb1521270c05e6c939931260082fdf5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 May 2023 17:23:17 +0000 Subject: [PATCH 0276/2104] build(deps): bump tokio from 1.28.1 to 1.28.2 Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.1 to 1.28.2. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.28.1...tokio-1.28.2) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e1b1bd54a8..5703990c246 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4531,9 +4531,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.28.1" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 31e6a867c62..921a76ba5f2 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -45,7 +45,7 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.6.3" tiny-keccak = "1.5.0" -tokio = { version = "1.28.1", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.28.2", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-retry = "0.3.0" toml = "0.7.4" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 54912491cff..e4d7c829c4a 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,7 +22,7 @@ hyper = "0.14" serde = "1.0" serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } -tokio = { version = "1.28.1", features = ["rt", "macros", "process"] } +tokio = { version = "1.28.2", features = ["rt", "macros", "process"] } uuid = { version = "1.3.2", features = ["v4"] } [dev-dependencies] From 1577dd3e59b610c295399b63af57ce74abe993cd Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 24 May 2023 08:49:27 +0530 Subject: [PATCH 0277/2104] store: Add paused_at and assigned_at to subgraph_deployment_assignmen table --- .../down.sql | 4 ++++ .../up.sql | 4 ++++ store/postgres/src/primary.rs | 2 ++ 3 files changed, 10 insertions(+) create mode 100644 store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/down.sql create mode 100644 store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/up.sql diff --git a/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/down.sql b/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/down.sql new file mode 100644 index 00000000000..0bb6211f8b3 --- /dev/null +++ b/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/down.sql @@ -0,0 +1,4 @@ +-- Define the 'down' migration to remove the 'paused_at' and 'assigned_at' fields from 'subgraph_deployment_assignment' table +ALTER TABLE subgraphs.subgraph_deployment_assignment +DROP COLUMN paused_at, +DROP COLUMN assigned_at; \ No newline at end of file diff --git a/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/up.sql b/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/up.sql new file mode 100644 index 00000000000..ee76b78db8d --- /dev/null +++ b/store/postgres/migrations/2023-05-23-1715-update-subgraph-deployment-assignment/up.sql @@ -0,0 +1,4 @@ +-- Define the 'up' migration to add the 'paused_at' and 'assigned_at' fields to 'subgraph_deployment_assignment' table +ALTER TABLE subgraphs.subgraph_deployment_assignment +ADD COLUMN paused_at TIMESTAMPTZ NULL, +ADD COLUMN assigned_at TIMESTAMPTZ NULL; \ No newline at end of file diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 90bdfae26a0..9c5ea002509 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -93,6 +93,8 @@ table! { subgraphs.subgraph_deployment_assignment { id -> Integer, node_id -> Text, + paused_at -> Nullable, + assigned_at -> Nullable, } } From 3e3dc023b4919e2c253c983d3d662bda08032896 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 24 May 2023 15:31:15 +0530 Subject: [PATCH 0278/2104] store: postgres - add pause_subgraph and resume_subgraph methods --- store/postgres/src/primary.rs | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 9c5ea002509..4c77798fd91 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -981,6 +981,44 @@ impl<'a> Connection<'a> { } } + pub fn pause_subgraph(&self, site: &Site) -> Result<(), StoreError> { + use subgraph_deployment_assignment as a; + + let conn = self.conn.as_ref(); + + let updates = update(a::table.filter(a::id.eq(site.id))) + .set(a::paused_at.eq(sql("now()"))) + .execute(conn)?; + match updates { + 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), + 1 => Ok(()), + _ => { + // `id` is the primary key of the subgraph_deployment_assignment table, + // and we can therefore only update no or one entry + unreachable!() + } + } + } + + pub fn resume_subgraph(&self, site: &Site) -> Result<(), StoreError> { + use subgraph_deployment_assignment as a; + + let conn = self.conn.as_ref(); + + let updates = update(a::table.filter(a::id.eq(site.id))) + .set(a::paused_at.eq(sql("null"))) + .execute(conn)?; + match updates { + 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), + 1 => Ok(()), + _ => { + // `id` is the primary key of the subgraph_deployment_assignment table, + // and we can therefore only update no or one entry + unreachable!() + } + } + } + pub fn reassign_subgraph( &self, site: &Site, From ba422c9040dba85476133a18c66f2bb95065e852 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 24 May 2023 17:59:35 +0530 Subject: [PATCH 0279/2104] node: Add graphman commands to pause and resume subgraphs --- node/src/bin/manager.rs | 18 ++++++++++++++++++ node/src/manager/commands/assign.rs | 28 ++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 61e03e067f2..f1071a5fdf9 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -158,6 +158,16 @@ pub enum Command { /// The deployment (see `help info`) deployment: DeploymentSearch, }, + /// Pause a deployment + Pause { + /// The deployment (see `help info`) + deployment: DeploymentSearch, + }, + /// Resume a deployment + Resume { + /// The deployment (see `help info`) + deployment: DeploymentSearch, + }, /// Rewind a subgraph to a specific block Rewind { /// Force rewinding even if the block hash is not found in the local @@ -1101,6 +1111,14 @@ async fn main() -> anyhow::Result<()> { let sender = ctx.notification_sender(); commands::assign::reassign(ctx.primary_pool(), &sender, &deployment, node) } + Pause { deployment } => { + let sender = ctx.notification_sender(); + commands::assign::pause_or_resume(ctx.primary_pool(), &sender, &deployment, true) + } + Resume { deployment } => { + let sender = ctx.notification_sender(); + commands::assign::pause_or_resume(ctx.primary_pool(), &sender, &deployment, false) + } Rewind { force, sleep, diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index c9b69e693bc..4907f6292da 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -69,3 +69,31 @@ pub fn reassign( } Ok(()) } + +pub fn pause_or_resume( + primary: ConnectionPool, + sender: &NotificationSender, + search: &DeploymentSearch, + pause: bool, +) -> Result<(), Error> { + let locator = search.locate_unique(&primary)?; + + let conn = primary.get()?; + let conn = catalog::Connection::new(conn); + + let site = conn + .locate_site(locator.clone())? + .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; + + if pause { + println!("pausing {locator}"); + conn.pause_subgraph(&site)?; + println!("paused {locator}") + } else { + println!("resuming {locator}"); + conn.resume_subgraph(&site)?; + println!("resumed {locator}") + } + + Ok(()) +} From c41a021126ca4774c6cb59d1cd5817a0aaf3011d Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Wed, 24 May 2023 20:22:37 +0530 Subject: [PATCH 0280/2104] core,graph,node,store: send StoreEvents for Pause and Resume and handle it --- core/src/subgraph/registrar.rs | 11 ++++- graph/src/components/store/traits.rs | 9 ++++ node/src/manager/commands/assign.rs | 26 ++++++++---- store/postgres/src/primary.rs | 62 ++++++++++++++++++++++++++-- store/postgres/src/subgraph_store.rs | 12 ++++++ 5 files changed, 105 insertions(+), 15 deletions(-) diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 2490b2a39e4..184b7b6c2f5 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -171,13 +171,20 @@ where match operation { EntityChangeOperation::Set => { store - .assigned_node(&deployment) + .assignment_status(&deployment) .map_err(|e| { anyhow!("Failed to get subgraph assignment entity: {}", e) }) .map(|assigned| -> Box + Send> { - if let Some(assigned) = assigned { + if let Some((assigned,is_paused)) = assigned { if assigned == node_id { + + if is_paused{ + // Subgraph is paused, so we don't start it + debug!(logger, "Deployment assignee is this node, but it is paused, so we don't start it"; "assigned_to" => assigned, "node_id" => &node_id,"paused" => is_paused); + return Box::new(stream::empty()); + } + // Start subgraph on this node debug!(logger, "Deployment assignee is this node, broadcasting add event"; "assigned_to" => assigned, "node_id" => &node_id); Box::new(stream::once(Ok(AssignmentEvent::Add { diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 01875b9bfa2..a9e06e264a7 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -95,6 +95,15 @@ pub trait SubgraphStore: Send + Sync + 'static { fn assigned_node(&self, deployment: &DeploymentLocator) -> Result, StoreError>; + /// Returns Option<(node_id,is_paused)> where `node_id` is the node that + /// the subgraph is assigned to, and `is_paused` is true if the + /// subgraph is paused. + /// Returns None if the deployment does not exist. + fn assignment_status( + &self, + deployment: &DeploymentLocator, + ) -> Result, StoreError>; + fn assignments(&self, node: &NodeId) -> Result, StoreError>; /// Return `true` if a subgraph `name` exists, regardless of whether the diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index 4907f6292da..dd6e9212ad5 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -85,15 +85,23 @@ pub fn pause_or_resume( .locate_site(locator.clone())? .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; - if pause { - println!("pausing {locator}"); - conn.pause_subgraph(&site)?; - println!("paused {locator}") - } else { - println!("resuming {locator}"); - conn.resume_subgraph(&site)?; - println!("resumed {locator}") - } + let change = match conn.assignment_status(&site)? { + Some((_, paused)) => { + if paused == pause { + println!("deployment {locator} is already {paused}"); + vec![] + } else { + println!("pausing {locator}"); + conn.pause_subgraph(&site)? + } + } + None => { + println!("resuming {locator}"); + conn.resume_subgraph(&site)? + } + }; + println!("Operation completed"); + conn.send_store_event(sender, &StoreEvent::new(change))?; Ok(()) } diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 4c77798fd91..f2e5cb42df2 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -405,6 +405,7 @@ pub fn make_dummy_site(deployment: DeploymentHash, namespace: Namespace, network /// mirrored through `Mirror::refresh_tables` and must be queries, i.e., /// read-only mod queries { + use diesel::data_types::PgTimestamp; use diesel::dsl::{any, exists, sql}; use diesel::pg::PgConnection; use diesel::prelude::{ @@ -626,6 +627,36 @@ mod queries { .transpose() } + /// Returns Option<(node_id,is_paused)> where `node_id` is the node that + /// the subgraph is assigned to, and `is_paused` is true if the + /// subgraph is paused. + /// Returns None if the deployment does not exist. + pub(super) fn assignment_status( + conn: &PgConnection, + site: &Site, + ) -> Result, StoreError> { + a::table + .filter(a::id.eq(site.id)) + .select((a::node_id, a::paused_at)) + .first::<(String, Option)>(conn) + .optional()? + .map(|(node, ts)| { + let node_id = NodeId::new(&node).map_err(|()| { + constraint_violation!( + "invalid node id `{}` in assignment for `{}`", + node, + site.deployment + ) + })?; + + match ts { + Some(_) => Ok((node_id, true)), + None => Ok((node_id, false)), + } + }) + .transpose() + } + pub(super) fn version_info( conn: &PgConnection, version: &str, @@ -981,7 +1012,7 @@ impl<'a> Connection<'a> { } } - pub fn pause_subgraph(&self, site: &Site) -> Result<(), StoreError> { + pub fn pause_subgraph(&self, site: &Site) -> Result, StoreError> { use subgraph_deployment_assignment as a; let conn = self.conn.as_ref(); @@ -991,7 +1022,11 @@ impl<'a> Connection<'a> { .execute(conn)?; match updates { 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), - 1 => Ok(()), + 1 => { + let change = + EntityChange::for_assignment(site.into(), EntityChangeOperation::Removed); + Ok(vec![change]) + } _ => { // `id` is the primary key of the subgraph_deployment_assignment table, // and we can therefore only update no or one entry @@ -1000,7 +1035,7 @@ impl<'a> Connection<'a> { } } - pub fn resume_subgraph(&self, site: &Site) -> Result<(), StoreError> { + pub fn resume_subgraph(&self, site: &Site) -> Result, StoreError> { use subgraph_deployment_assignment as a; let conn = self.conn.as_ref(); @@ -1010,7 +1045,10 @@ impl<'a> Connection<'a> { .execute(conn)?; match updates { 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), - 1 => Ok(()), + 1 => { + let change = EntityChange::for_assignment(site.into(), EntityChangeOperation::Set); + Ok(vec![change]) + } _ => { // `id` is the primary key of the subgraph_deployment_assignment table, // and we can therefore only update no or one entry @@ -1148,6 +1186,14 @@ impl<'a> Connection<'a> { queries::assigned_node(self.conn.as_ref(), site) } + /// Returns Option<(node_id,is_paused)> where `node_id` is the node that + /// the subgraph is assigned to, and `is_paused` is true if the + /// subgraph is paused. + /// Returns None if the deployment does not exist. + pub fn assignment_status(&self, site: &Site) -> Result, StoreError> { + queries::assignment_status(self.conn.as_ref(), site) + } + /// Create a copy of the site `src` in the shard `shard`, but mark it as /// not active. If there already is a site in `shard`, return that /// instead. @@ -1761,6 +1807,14 @@ impl Mirror { self.read(|conn| queries::assigned_node(conn, site)) } + /// Returns Option<(node_id,is_paused)> where `node_id` is the node that + /// the subgraph is assigned to, and `is_paused` is true if the + /// subgraph is paused. + /// Returns None if the deployment does not exist. + pub fn assignment_status(&self, site: &Site) -> Result, StoreError> { + self.read(|conn| queries::assignment_status(conn, site)) + } + pub fn find_active_site(&self, subgraph: &DeploymentHash) -> Result, StoreError> { self.read(|conn| queries::find_active_site(conn, subgraph)) } diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 0ce48b4ac88..0c338ee42ad 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1286,6 +1286,18 @@ impl SubgraphStoreTrait for SubgraphStore { self.mirror.assigned_node(site.as_ref()) } + /// Returns Option<(node_id,is_paused)> where `node_id` is the node that + /// the subgraph is assigned to, and `is_paused` is true if the + /// subgraph is paused. + /// Returns None if the deployment does not exist. + fn assignment_status( + &self, + deployment: &DeploymentLocator, + ) -> Result, StoreError> { + let site = self.find_site(deployment.id.into())?; + self.mirror.assignment_status(site.as_ref()) + } + fn assignments(&self, node: &NodeId) -> Result, StoreError> { self.mirror .assignments(node) From 018952922c7c16e8caebd1f5dbd913122f81dc86 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Thu, 25 May 2023 13:55:16 +0530 Subject: [PATCH 0281/2104] graph,store,core - add active_assigments() for unpaused subgraphs and wire it in subgraph startup --- core/src/subgraph/registrar.rs | 2 +- graph/src/components/store/traits.rs | 3 +++ node/src/manager/commands/assign.rs | 20 ++++++++++++-------- store/postgres/src/primary.rs | 20 ++++++++++++++++++++ store/postgres/src/subgraph_store.rs | 6 ++++++ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 184b7b6c2f5..d048feddf80 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -226,7 +226,7 @@ where let logger = self.logger.clone(); let node_id = self.node_id.clone(); - future::result(self.store.assignments(&self.node_id)) + future::result(self.store.active_assignments(&self.node_id)) .map_err(|e| anyhow!("Error querying subgraph assignments: {}", e)) .and_then(move |deployments| { // This operation should finish only after all subgraphs are diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index a9e06e264a7..9ca494a4f62 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -106,6 +106,9 @@ pub trait SubgraphStore: Send + Sync + 'static { fn assignments(&self, node: &NodeId) -> Result, StoreError>; + /// Returns assignments that are not paused + fn active_assignments(&self, node: &NodeId) -> Result, StoreError>; + /// Return `true` if a subgraph `name` exists, regardless of whether the /// subgraph has any deployments attached to it fn subgraph_exists(&self, name: &SubgraphName) -> Result; diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index dd6e9212ad5..74b1961fb68 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -74,7 +74,7 @@ pub fn pause_or_resume( primary: ConnectionPool, sender: &NotificationSender, search: &DeploymentSearch, - pause: bool, + should_pause: bool, ) -> Result<(), Error> { let locator = search.locate_unique(&primary)?; @@ -86,18 +86,22 @@ pub fn pause_or_resume( .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; let change = match conn.assignment_status(&site)? { - Some((_, paused)) => { - if paused == pause { - println!("deployment {locator} is already {paused}"); - vec![] - } else { + Some((_, is_paused)) => { + if should_pause { + if is_paused { + println!("deployment {locator} is already paused"); + return Ok(()); + } println!("pausing {locator}"); conn.pause_subgraph(&site)? + } else { + println!("resuming {locator}"); + conn.resume_subgraph(&site)? } } None => { - println!("resuming {locator}"); - conn.resume_subgraph(&site)? + println!("deployment {locator} not found"); + return Ok(()); } }; println!("Operation completed"); diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index f2e5cb42df2..29aec86d670 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -588,6 +588,22 @@ mod queries { .collect::, _>>() } + // All assignments for a node that are currently not paused + pub(super) fn active_assignments( + conn: &PgConnection, + node: &NodeId, + ) -> Result, StoreError> { + ds::table + .inner_join(a::table.on(a::id.eq(ds::id))) + .filter(a::node_id.eq(node.as_str())) + .filter(a::paused_at.is_null()) + .select(ds::all_columns) + .load::(conn)? + .into_iter() + .map(Site::try_from) + .collect::, _>>() + } + pub(super) fn fill_assignments( conn: &PgConnection, infos: &mut [status::Info], @@ -1803,6 +1819,10 @@ impl Mirror { self.read(|conn| queries::assignments(conn, node)) } + pub fn active_assignments(&self, node: &NodeId) -> Result, StoreError> { + self.read(|conn| queries::active_assignments(conn, node)) + } + pub fn assigned_node(&self, site: &Site) -> Result, StoreError> { self.read(|conn| queries::assigned_node(conn, site)) } diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 0c338ee42ad..a44f59bb718 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1304,6 +1304,12 @@ impl SubgraphStoreTrait for SubgraphStore { .map(|sites| sites.iter().map(|site| site.into()).collect()) } + fn active_assignments(&self, node: &NodeId) -> Result, StoreError> { + self.mirror + .active_assignments(node) + .map(|sites| sites.iter().map(|site| site.into()).collect()) + } + fn subgraph_exists(&self, name: &SubgraphName) -> Result { self.mirror.subgraph_exists(name) } From 7f136f6f4745b38080f3052eb7004ecf54a7cdb1 Mon Sep 17 00:00:00 2001 From: Krishnanand V P Date: Fri, 26 May 2023 23:17:42 +0530 Subject: [PATCH 0282/2104] NEWS.md : add graphman commands pause and resume --- NEWS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 176faf76aef..1ae4cf93ba1 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,7 +4,8 @@ - `graphman rewind` has changed, block-number and block-hash are now flags instead of arguments - `graphman rewind` now has an extra flag `--start-block` which will rewind to the startBlock set in manifest or to the genesis block if no startBlock is set - +- `graphman` now has two new commands `pause` and `resume` that can be used to pause and resume a deployment + +- **Derived fields getter**: Derived fields can now be accessed from within the mapping code during indexing. ([#4434](https://github.com/graphprotocol/graph-node/pull/4434)) +- **Sorting interfaces by child entity**: Interfaces can now be sorted by non-derived child entities. ([#4058](https://github.com/graphprotocol/graph-node/pull/4058)) +- **File data sources can now be spawned from handlers of other file data sources**: This enables the use of file data sources for scenarios where a file data source needs to be spawned from another one. One practical application of this feature is in handling NFT metadata. In such cases, the metadata itself is stored as a file on IPFS and contains embedded IPFS CID for the actual file for the NFT. ([#4713](https://github.com/graphprotocol/graph-node/pull/4713)) +- Allow redeployment of grafted subgraphs even when graft_base is not available: This will allow renaming of already synced grafted subgraphs even when the graft base is not available, which previously failed due to `graft-base` validation errors. ([#4695](https://github.com/graphprotocol/graph-node/pull/4695)) +- `history_blocks` is now available in the index-node API. ([#4662](https://github.com/graphprotocol/graph-node/pull/4662)) +- Added a new `subgraph features` table in `primary` to easily track information like `apiVersion`, `specVersion`, `features`, and data source kinds used by subgraphs. ([#4679](https://github.com/graphprotocol/graph-node/pull/4679)) +- `subgraphFeatures` endpoint now includes data from `subgraph_features` table. +- `ens_name_by_hash` is now undeprecated: This reintroduces support for fetching ENS names by their hash, dependent on the availability of the underlying [Rainbow Table](https://github.com/graphprotocol/ens-rainbow) ([#4751](https://github.com/graphprotocol/graph-node/pull/4751)). +- Deterministically failed subgraphs now return valid POIs for subsequent blocks after the block at which it failed. ([#4774](https://github.com/graphprotocol/graph-node/pull/4774)) +- `eth-call` logs now include block hash and block number: This enables easier debugging of eth-call issues. ([#4718](https://github.com/graphprotocol/graph-node/pull/4718)) +- Enabled support for substreams on already supported networks. ([#4767](https://github.com/graphprotocol/graph-node/pull/4767)) +- Add new GraphQL scalar type `Int8`. This new scalar type allows subgraph developers to represent 8-bit signed integers. ([#4511](https://github.com/graphprotocol/graph-node/pull/4511)) +- Add support for overriding module params for substreams-based subgraphs when params are provided in the subgraph manifest. ([#4759](https://github.com/graphprotocol/graph-node/pull/4759)) + +### Breaking changes + +- Duplicate provider labels are not allowed in graph-node config anymore + +### Bug fixes + +- Fixed `PublicProofsOfIndexing` returning the error `Null value resolved for non-null field proofOfIndexing` when fetching POIs for blocks that are not in the cache ([#4768](https://github.com/graphprotocol/graph-node/pull/4768)) +- Fixed an issue where Block stream would fail when switching back to an RPC-based block ingestor from a Firehose ingestor. ([#4790](https://github.com/graphprotocol/graph-node/pull/4790)) +- Fixed an issue where derived loaders were not working with entities with Bytes as IDs ([#4773](https://github.com/graphprotocol/graph-node/pull/4773)) +- Firehose connection test now retries for 30 secs before setting the provider status to `Broken` ([#4754](https://github.com/graphprotocol/graph-node/pull/4754)) +- Fixed the `nonFatalErrors` field not populating in the index node API. ([#4615](https://github.com/graphprotocol/graph-node/pull/4615)) +- Fixed `graph-node` panicking on the first startup when both Firehose and RPC providers are configured together. ([#4680](https://github.com/graphprotocol/graph-node/pull/4680)) +- Fixed block ingestor failing to startup with the error `net version for chain mainnet has changed from 0 to 1` when switching from Firehose to an RPC provider. ([#4692](https://github.com/graphprotocol/graph-node/pull/4692)) +- Fixed Firehose endpoints getting rate-limited due to duplicated providers during connection pool initialization. ([#4778](https://github.com/graphprotocol/graph-node/pull/4778)) +- Fixed a determinism issue where stale entities where being returned when using `get_many` and `get_derived` ([#4801]https://github.com/graphprotocol/graph-node/pull/4801) + +### Graphman + +- Added two new `graphman` commands `pause` and `resume`: Instead of reassigning to a non-existent node these commands can now be used for pausing and resuming subgraphs. ([#4642](https://github.com/graphprotocol/graph-node/pull/4642)) +- Added a new `graphman` command `restart` to restart a subgraph. ([#4742](https://github.com/graphprotocol/graph-node/pull/4742)) + +**Full Changelog**: https://github.com/graphprotocol/graph-node/compare/v0.31.0...c350e4f35c49bcf8a8b521851f790234ba2c0295 + + ## v0.31.0 ### What's new + - **Fulltext searches can now be combined with `where` filtering**, further narrowing down search results. [#4442](https://github.com/graphprotocol/graph-node/pull/4442) - Tweaked how RPC provider limiting rules are interpreted from configurations. In particular, node IDs that don't match any rules of a provider won't have access to said provider instead of having access to it for an unlimited number of subgraphs. Read the [docs](https://github.com/graphprotocol/graph-node/pull/4353/files) for more information. [#4353](https://github.com/graphprotocol/graph-node/pull/4353) - Introduced WASM host function `store.get_in_block`, which is a much faster variant of `store.get` limited to entities created or updated in the current block. [#4540](https://github.com/graphprotocol/graph-node/pull/4540) @@ -33,6 +78,7 @@ Derived fields getter by @flametuner in https://github.com/grahprotocol/graph-no - Removed support for `GRAPH_ETHEREUM_IS_FIREHOSE_PREFERRED`, `REVERSIBLE_ORDER_BY_OFF`, and `GRAPH_STORE_CONNECTION_TRY_ALWAYS` env. variables. [#4375](https://github.om/graphprotocol/graph-node/pull/4375), [#4436](https://github.com/graphprotocol/graph-node/pull/4436) ### Bug fixes + - Fixed a bug that would cause subgraphs to fail with a `subgraph writer poisoned by previous error` message following certain database errors. [#4533](https://github.com/graphprotocol/graph-node/pull/4533) - Fixed a bug that would cause subgraphs to fail with a `store error: no connection to the server` message when database connection e.g. gets killed. [#4435](https://github.com/graphprotocol/graph-node/pull/4435) - The `subgraph_reassign` JSON-RPC method doesn't fail anymore when multiple deployment copies are found: only the active copy is reassigned, the others are ignored. [#4395](https://github.com/graphprotocol/graph-node/pull/4395) @@ -44,6 +90,7 @@ Derived fields getter by @flametuner in https://github.com/grahprotocol/graph-no - Fixed faulty `startBlock` selection logic in substreams. [#4463](https://github.com/graphprotocol/graph-node/pull/4463) ### Graphman + - The behavior for `graphman prune` has changed: running just `graphman prune` will mark the subgraph for ongoing pruning in addition to performing an initial pruning. To avoid ongoing pruning, use `graphman prune --once` ([docs](./docs/implementation/pruning.md)). [#4429](https://github.com/graphprotocol/graph-node/pull/4429) - The env. var. `GRAPH_STORE_HISTORY_COPY_THRESHOLD` –which serves as a configuration setting for `graphman prune`– has been renamed to `GRAPH_STORE_HISTORY_REBUILD_THRESHOLD`. [#4505](https://github.com/graphprotocol/graph-node/pull/4505) - You can now list all existing deployments via `graphman info --all`. [#4347](https://github.com/graphprotocol/graph-node/pull/4347) @@ -53,6 +100,7 @@ Derived fields getter by @flametuner in https://github.com/grahprotocol/graph-no - `graphman reassign` now emits a warning when it suspects a typo in node IDs. [#4377](https://github.com/graphprotocol/graph-node/pull/4377) ### Metrics and logging + - Subgraph syncing time metric `deployment_sync_secs` now stops updating once the subgraph has synced. [#4489](https://github.com/graphprotocol/graph-node/pull/4489) - New `endpoint_request` metric to track error rates of different providers. [#4490](https://github.com/graphprotocol/graph-node/pull/4490), [#4504](https://github.com/graphprotocol/graph-node/pull/4504), [#4430](https://github.com/graphprotocol/graph-node/pull/4430) - New metrics `chain_head_cache_num_blocks`, `chain_head_cache_oldest_block`, `chain_head_cache_latest_block`, `chain_head_cache_hits`, and `chain_head_cache_misses` to monitor the effectiveness of `graph-node`'s in-memory chain head caches. [#4440](https://github.com/graphprotocol/graph-node/pull/4440) @@ -92,7 +140,7 @@ New `graph-node` installations now **mandate** PostgreSQL to use C locale and UT - Lots of visual and filtering improvements to [#4232](https://github.com/graphprotocol/graph-node/pull/4232) - More aggressive in-memory caching of blocks close the chain head, potentially alleviating database load. [#4215](https://github.com/graphprotocol/graph-node/pull/4215) - New counter Prometheus metric `query_validation_error_counter`, labelled by deployment ID and error code. [#4230](https://github.com/graphprotocol/graph-node/pull/4230) -graph_elasticsearch_logs_sent + graph_elasticsearch_logs_sent - Turned "Flushing logs to Elasticsearch" log into a Prometheus metric (`graph_elasticsearch_logs_sent`) to reduce log noise. [#4333](https://github.com/graphprotocol/graph-node/pull/4333) - New materialized view `info.chain_sizes`, which works the same way as the already existing `info.subgraph_sizes` and `info.table_sizes`. [#4318](https://github.com/graphprotocol/graph-node/pull/4318) - New `graphman stats` subcommands `set-target` and `target` to manage statistics targets for specific deployments (i.e. how much data PostgreSQL samples when analyzing a table). [#4092](https://github.com/graphprotocol/graph-node/pull/4092) @@ -181,41 +229,44 @@ Dependency upgrades: ### What's new -* Grafted subgraphs can now add their own data sources. [#3989](https://github.com/graphprotocol/graph-node/pull/3989), [#4027](https://github.com/graphprotocol/graph-node/pull/4027), [#4030](https://github.com/graphprotocol/graph-node/pull/4030) -* Add support for filtering by nested interfaces. [#3677](https://github.com/graphprotocol/graph-node/pull/3677) -* Add support for message handlers in Cosmos [#3975](https://github.com/graphprotocol/graph-node/pull/3975) -* Dynamic data sources for Firehose-backed subgraphs. [#4075](https://github.com/graphprotocol/graph-node/pull/4075) -* Various logging improvements. [#4078](https://github.com/graphprotocol/graph-node/pull/4078), [#4084](https://github.com/graphprotocol/graph-node/pull/4084), [#4031](https://github.com/graphprotocol/graph-node/pull/4031), [#4144](https://github.com/graphprotocol/graph-node/pull/4144), [#3990](https://github.com/graphprotocol/graph-node/pull/3990) -* Some DB queries now have GCP Cloud Insight -compliant tags that show where the query originated from. [#4079](https://github.com/graphprotocol/graph-node/pull/4079) -* New configuration variable `GRAPH_STATIC_FILTERS_THRESHOLD` to conditionally enable static filtering based on the number of dynamic data sources. [#4008](https://github.com/graphprotocol/graph-node/pull/4008) -* New configuration variable `GRAPH_STORE_BATCH_TARGET_DURATION`. [#4133](https://github.com/graphprotocol/graph-node/pull/4133) +- Grafted subgraphs can now add their own data sources. [#3989](https://github.com/graphprotocol/graph-node/pull/3989), [#4027](https://github.com/graphprotocol/graph-node/pull/4027), [#4030](https://github.com/graphprotocol/graph-node/pull/4030) +- Add support for filtering by nested interfaces. [#3677](https://github.com/graphprotocol/graph-node/pull/3677) +- Add support for message handlers in Cosmos [#3975](https://github.com/graphprotocol/graph-node/pull/3975) +- Dynamic data sources for Firehose-backed subgraphs. [#4075](https://github.com/graphprotocol/graph-node/pull/4075) +- Various logging improvements. [#4078](https://github.com/graphprotocol/graph-node/pull/4078), [#4084](https://github.com/graphprotocol/graph-node/pull/4084), [#4031](https://github.com/graphprotocol/graph-node/pull/4031), [#4144](https://github.com/graphprotocol/graph-node/pull/4144), [#3990](https://github.com/graphprotocol/graph-node/pull/3990) +- Some DB queries now have GCP Cloud Insight -compliant tags that show where the query originated from. [#4079](https://github.com/graphprotocol/graph-node/pull/4079) +- New configuration variable `GRAPH_STATIC_FILTERS_THRESHOLD` to conditionally enable static filtering based on the number of dynamic data sources. [#4008](https://github.com/graphprotocol/graph-node/pull/4008) +- New configuration variable `GRAPH_STORE_BATCH_TARGET_DURATION`. [#4133](https://github.com/graphprotocol/graph-node/pull/4133) #### Docker image -* The official Docker image now runs on Debian 11 "Bullseye". [#4081](https://github.com/graphprotocol/graph-node/pull/4081) -* We now ship [`envsubst`](https://github.com/a8m/envsubst) with the official Docker image, allowing you to easily run templating logic on your configuration files. [#3974](https://github.com/graphprotocol/graph-node/pull/3974) + +- The official Docker image now runs on Debian 11 "Bullseye". [#4081](https://github.com/graphprotocol/graph-node/pull/4081) +- We now ship [`envsubst`](https://github.com/a8m/envsubst) with the official Docker image, allowing you to easily run templating logic on your configuration files. [#3974](https://github.com/graphprotocol/graph-node/pull/3974) #### Graphman We have a new documentation page for `graphman`, check it out [here](https://github.com/graphprotocol/graph-node/blob/2da697b1af17b1c947679d1b1a124628146545a6/docs/graphman.md)! -* Subgraph pruning with `graphman`! [#3898](https://github.com/graphprotocol/graph-node/pull/3898), [#4125](https://github.com/graphprotocol/graph-node/pull/4125), [#4153](https://github.com/graphprotocol/graph-node/pull/4153), [#4152](https://github.com/graphprotocol/graph-node/pull/4152), [#4156](https://github.com/graphprotocol/graph-node/pull/4156), [#4041](https://github.com/graphprotocol/graph-node/pull/4041) -* New command `graphman drop` to hastily delete a subgraph deployment. [#4035](https://github.com/graphprotocol/graph-node/pull/4035) -* New command `graphman chain call-cache` for clearing the call cache for a given chain. [#4066](https://github.com/graphprotocol/graph-node/pull/4066) -* Add `--delete-duplicates` flag to `graphman check-blocks` by @tilacog in https://github.com/graphprotocol/graph-node/pull/3988 +- Subgraph pruning with `graphman`! [#3898](https://github.com/graphprotocol/graph-node/pull/3898), [#4125](https://github.com/graphprotocol/graph-node/pull/4125), [#4153](https://github.com/graphprotocol/graph-node/pull/4153), [#4152](https://github.com/graphprotocol/graph-node/pull/4152), [#4156](https://github.com/graphprotocol/graph-node/pull/4156), [#4041](https://github.com/graphprotocol/graph-node/pull/4041) +- New command `graphman drop` to hastily delete a subgraph deployment. [#4035](https://github.com/graphprotocol/graph-node/pull/4035) +- New command `graphman chain call-cache` for clearing the call cache for a given chain. [#4066](https://github.com/graphprotocol/graph-node/pull/4066) +- Add `--delete-duplicates` flag to `graphman check-blocks` by @tilacog in https://github.com/graphprotocol/graph-node/pull/3988 #### Performance -* Restarting a node now takes much less time because `postgres_fdw` user mappings are only rebuilt upon schema changes. If necessary, you can also use the new commands `graphman database migrate` and `graphman database remap` to respectively apply schema migrations or run remappings manually. [#4009](https://github.com/graphprotocol/graph-node/pull/4009), [#4076](https://github.com/graphprotocol/graph-node/pull/4076) -* Database replicas now won't fall behind as much when copying subgraph data. [#3966](https://github.com/graphprotocol/graph-node/pull/3966) [#3986](https://github.com/graphprotocol/graph-node/pull/3986) -* Block handlers optimization with Firehose >= 1.1.0. [#3971](https://github.com/graphprotocol/graph-node/pull/3971) -* Reduced the amount of data that a non-primary shard has to mirror from the primary shard. [#4015](https://github.com/graphprotocol/graph-node/pull/4015) -* We now use advisory locks to lock deployments' tables against concurrent writes. [#4010](https://github.com/graphprotocol/graph-node/pull/4010) + +- Restarting a node now takes much less time because `postgres_fdw` user mappings are only rebuilt upon schema changes. If necessary, you can also use the new commands `graphman database migrate` and `graphman database remap` to respectively apply schema migrations or run remappings manually. [#4009](https://github.com/graphprotocol/graph-node/pull/4009), [#4076](https://github.com/graphprotocol/graph-node/pull/4076) +- Database replicas now won't fall behind as much when copying subgraph data. [#3966](https://github.com/graphprotocol/graph-node/pull/3966) [#3986](https://github.com/graphprotocol/graph-node/pull/3986) +- Block handlers optimization with Firehose >= 1.1.0. [#3971](https://github.com/graphprotocol/graph-node/pull/3971) +- Reduced the amount of data that a non-primary shard has to mirror from the primary shard. [#4015](https://github.com/graphprotocol/graph-node/pull/4015) +- We now use advisory locks to lock deployments' tables against concurrent writes. [#4010](https://github.com/graphprotocol/graph-node/pull/4010) #### Bug fixes -* Fixed a bug that would cause some failed subgraphs to never restart. [#3959](https://github.com/graphprotocol/graph-node/pull/3959) -* Fixed a bug that would cause bad POIs for Firehose-backed subgraphs when processing `CREATE` calls. [#4085](https://github.com/graphprotocol/graph-node/pull/4085) -* Fixed a bug which would cause failure to redeploy a subgraph immediately after deletion. [#4044](https://github.com/graphprotocol/graph-node/pull/4044) -* Firehose connections are now load-balanced. [#4083](https://github.com/graphprotocol/graph-node/pull/4083) -* Determinism fixes. **See above.** [#4055](https://github.com/graphprotocol/graph-node/pull/4055), [#4149](https://github.com/graphprotocol/graph-node/pull/4149) + +- Fixed a bug that would cause some failed subgraphs to never restart. [#3959](https://github.com/graphprotocol/graph-node/pull/3959) +- Fixed a bug that would cause bad POIs for Firehose-backed subgraphs when processing `CREATE` calls. [#4085](https://github.com/graphprotocol/graph-node/pull/4085) +- Fixed a bug which would cause failure to redeploy a subgraph immediately after deletion. [#4044](https://github.com/graphprotocol/graph-node/pull/4044) +- Firehose connections are now load-balanced. [#4083](https://github.com/graphprotocol/graph-node/pull/4083) +- Determinism fixes. **See above.** [#4055](https://github.com/graphprotocol/graph-node/pull/4055), [#4149](https://github.com/graphprotocol/graph-node/pull/4149) #### Dependency updates @@ -501,12 +552,11 @@ These are some of the features that will probably be helpful for indexers 😊 - A token can be set via `GRAPH_POI_ACCESS_TOKEN` to limit access to the POI route - The new `graphman` commands 🙂 - ### Api Version 0.0.7 and Spec Version 0.0.5 + This release brings API Version 0.0.7 in mappings, which allows Ethereum event handlers to require transaction receipts to be present in the `Event` object. Refer to [PR #3373](https://github.com/graphprotocol/graph-node/pull/3373) for instructions on how to enable that. - ## 0.25.2 This release includes two changes: @@ -528,7 +578,9 @@ We strongly recommend updating to this version as quickly as possible. ## 0.25.0 ### Api Version 0.0.6 + This release ships support for API version 0.0.6 in mappings: + - Added `nonce` field for `Transaction` objects. - Added `baseFeePerGas` field for `Block` objects ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). @@ -536,12 +588,12 @@ This release ships support for API version 0.0.6 in mappings: All cached block data must be refetched to account for the new `Block` and `Trasaction` struct versions, so this release includes a `graph-node` startup check that will: + 1. Truncate all block cache tables. 2. Bump the `db_version` value from `2` to `3`. _(Table truncation is a fast operation and no downtime will occur because of that.)_ - ### Ethereum - 'Out of gas' errors on contract calls are now considered deterministic errors, @@ -553,10 +605,12 @@ _(Table truncation is a fast operation and no downtime will occur because of tha is now hardcoded to 50 million. ### Multiblockchain + - Initial support for NEAR subgraphs. - Added `FirehoseBlockStream` implementation of `BlockStream` (#2716) ### Misc + - Rust docker image is now based on Debian Buster. - Optimizations to the PostgreSQL notification queue. - Improve PostgreSQL robustness in multi-sharded setups. (#2815) @@ -571,7 +625,6 @@ _(Table truncation is a fast operation and no downtime will occur because of tha - Handle revert cases from Hardhat and Ganache (#2984) - Fix bug on experimental prefetching optimization feature (#2899) - ## 0.24.2 This release only adds a fix for an issue where certain GraphQL queries @@ -608,7 +661,9 @@ For instance, the following query... ```graphql { - subgraphFeatures(subgraphId: "QmW9ajg2oTyPfdWKyUkxc7cTJejwdyCbRrSivfryTfFe5D") { + subgraphFeatures( + subgraphId: "QmW9ajg2oTyPfdWKyUkxc7cTJejwdyCbRrSivfryTfFe5D" + ) { features errors } @@ -622,10 +677,7 @@ For instance, the following query... "data": { "subgraphFeatures": { "errors": [], - "features": [ - "nonFatalErrors", - "ipfsOnEthereumContracts" - ] + "features": ["nonFatalErrors", "ipfsOnEthereumContracts"] } } } @@ -665,14 +717,17 @@ and the long awaited AssemblyScript version upgrade! resolving issue [#2409](https://github.com/graphprotocol/graph-node/issues/2409). Done in [#2511](https://github.com/graphprotocol/graph-node/pull/2511). ### Logs + - The log `"Skipping handler because the event parameters do not match the event signature."` was downgraded from info to trace level. - Some block ingestor error logs were upgrded from debug to info level [#2666](https://github.com/graphprotocol/graph-node/pull/2666). ### Metrics + - `query_semaphore_wait_ms` is now by shard, and has the `pool` and `shard` labels. - `deployment_failed` metric added, it is `1` if the subgraph has failed and `0` otherwise. ### Other + - Upgrade to tokio 1.0 and futures 0.3 [#2679](https://github.com/graphprotocol/graph-node/pull/2679), the first major contribution by StreamingFast! - Support Celo block reward events [#2670](https://github.com/graphprotocol/graph-node/pull/2670). - Reduce the maximum WASM stack size and make it configurable [#2719](https://github.com/graphprotocol/graph-node/pull/2719). @@ -707,14 +762,17 @@ In the meantime, here are the changes for this release: - Using `ethereum.call` in mappings in globals is deprecated ### Graphman + Graphman is a CLI tool to manage your subgraphs. It is now included in the Docker container [#2289](https://github.com/graphprotocol/graph-node/pull/2289). And new commands have been added: + - `graphman copy` can copy subgraphs across DB shards [#2313](https://github.com/graphprotocol/graph-node/pull/2313). - `graphman rewind` to rewind a deployment to a given block [#2373](https://github.com/graphprotocol/graph-node/pull/2373). - `graphman query` to log info about a GraphQL query [#2206](https://github.com/graphprotocol/graph-node/pull/2206). - `graphman create` to create a subgraph name [#2419](https://github.com/graphprotocol/graph-node/pull/2419). ### Metrics + - The `deployment_blocks_behind` metric has been removed, and a `deployment_head` metric has been added. To see how far a deployment is behind, use the difference between `ethereum_chain_head_number` and @@ -724,6 +782,7 @@ Graphman is a CLI tool to manage your subgraphs. It is now included in the Docke ## 0.22.0 ### Feature: Block store sharding + This release makes it possible to [shard the block and call cache](./docs/config.md) for chain data across multiple independent Postgres databases. **This feature is considered experimental. We encourage users to try this out in a test environment, but do not recommend it yet for production @@ -731,17 +790,20 @@ use.** In particular, the details of how sharding is configured may change in ba ways in the future. ### Feature: Non-fatal errors update + Non-fatal errors (see release 0.20 for details) is documented and can now be enabled on graph-cli. Various related bug fixes have been made #2121 #2136 #2149 #2160. ### Improvements + - Add bitwise operations and string constructor to BigInt #2151. - docker: Allow custom ethereum poll interval #2139. - Deterministic error work in preparation for gas #2112 ### Bug fixes + - Fix not contains filter #2146. -- Resolve __typename in _meta field #2118 +- Resolve \_\_typename in \_meta field #2118 - Add CORS for all HTTP responses #2196 ## 0.21.1 From f9caa1ba854aa538cd3707416eb9404f1204b3d3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:18:05 -0700 Subject: [PATCH 0374/2104] graph: Remove unused trait QueryLoadManager --- graph/src/components/graphql.rs | 7 +------ graph/src/data/graphql/effort.rs | 14 +------------- graph/src/lib.rs | 4 +--- 3 files changed, 3 insertions(+), 22 deletions(-) diff --git a/graph/src/components/graphql.rs b/graph/src/components/graphql.rs index ed7738308c2..9aa7072b69b 100644 --- a/graph/src/components/graphql.rs +++ b/graph/src/components/graphql.rs @@ -1,6 +1,6 @@ use futures::prelude::*; -use crate::data::query::{CacheStatus, Query, QueryTarget}; +use crate::data::query::{Query, QueryTarget}; use crate::data::subscription::{Subscription, SubscriptionError, SubscriptionResult}; use crate::data::{graphql::effort::LoadManager, query::QueryResults}; use crate::prelude::DeploymentHash; @@ -52,8 +52,3 @@ pub trait GraphQLMetrics: Send + Sync + 'static { fn observe_query_validation(&self, duration: Duration, id: &DeploymentHash); fn observe_query_validation_error(&self, error_codes: Vec<&str>, id: &DeploymentHash); } - -#[async_trait] -pub trait QueryLoadManager: Send + Sync { - fn record_work(&self, shape_hash: u64, duration: Duration, cache_status: CacheStatus); -} diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index d4f18862e4a..c0438d1e9e6 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -12,7 +12,7 @@ use crate::components::store::PoolWaitStats; use crate::data::graphql::shape_hash::shape_hash; use crate::data::query::{CacheStatus, QueryExecutionError}; use crate::prelude::q; -use crate::prelude::{async_trait, debug, info, o, warn, Logger, QueryLoadManager, ENV_VARS}; +use crate::prelude::{debug, info, o, warn, Logger, ENV_VARS}; use crate::util::stats::MovingStats; struct QueryEffort { @@ -490,15 +490,3 @@ impl LoadManager { kill_rate } } - -#[async_trait] -impl QueryLoadManager for LoadManager { - fn record_work(&self, shape_hash: u64, duration: Duration, cache_status: CacheStatus) { - if let Some(counter) = self.query_counters.get(&cache_status) { - counter.inc() - } - if !ENV_VARS.load_management_is_disabled() { - self.effort.add(shape_hash, duration, &self.effort_gauge); - } - } -} diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 54715109d27..f11fe2c84e2 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -115,9 +115,7 @@ pub mod prelude { EthereumBlock, EthereumBlockWithCalls, EthereumCall, LightEthereumBlock, LightEthereumBlockExt, }; - pub use crate::components::graphql::{ - GraphQLMetrics, GraphQlRunner, QueryLoadManager, SubscriptionResultFuture, - }; + pub use crate::components::graphql::{GraphQLMetrics, GraphQlRunner, SubscriptionResultFuture}; pub use crate::components::link_resolver::{JsonStreamValue, JsonValueStream, LinkResolver}; pub use crate::components::metrics::{ stopwatch::StopwatchMetrics, subgraph::*, Collector, Counter, CounterVec, Gauge, GaugeVec, From 97a3c3ac26b4922805c86a2f282e4629c5987cd5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:21:23 -0700 Subject: [PATCH 0375/2104] graph, store: Expose shard and deployment id from QueryStore --- graph/src/components/store/traits.rs | 7 +++++++ store/postgres/src/query_store.rs | 10 +++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index ad1e25bf63a..636561d5173 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -557,6 +557,13 @@ pub trait QueryStore: Send + Sync { /// A permit should be acquired before starting query execution. async fn query_permit(&self) -> Result; + + /// Report the name of the shard in which the subgraph is stored. This + /// should only be used for reporting and monitoring + fn shard(&self) -> &str; + + /// Return the deployment id that is queried by this `QueryStore` + fn deployment_id(&self) -> DeploymentId; } /// A view of the store that can provide information about the indexing status diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index 1932cd61e9e..ab339a54f0f 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -1,5 +1,5 @@ use crate::deployment_store::{DeploymentStore, ReplicaId}; -use graph::components::store::QueryStore as QueryStoreTrait; +use graph::components::store::{DeploymentId, QueryStore as QueryStoreTrait}; use graph::data::query::Trace; use graph::data::value::Object; use graph::prelude::*; @@ -127,4 +127,12 @@ impl QueryStoreTrait for QueryStore { async fn query_permit(&self) -> Result { self.store.query_permit(self.replica_id).await } + + fn shard(&self) -> &str { + self.site.shard.as_str() + } + + fn deployment_id(&self) -> DeploymentId { + self.site.id.into() + } } From 5fd299bcc356b3092f4bde1a625a515f48ea8e8b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 9 Aug 2023 16:14:27 -0700 Subject: [PATCH 0376/2104] graphql: Move recording query work into the resolver --- graphql/src/execution/resolver.rs | 10 +++++++++- graphql/src/query/mod.rs | 9 ++------- graphql/src/runner.rs | 3 ++- graphql/src/store/resolver.rs | 19 +++++++++++++++---- graphql/src/subscription/mod.rs | 19 ++++++++++++++++++- server/index-node/src/service.rs | 3 --- store/test-store/src/store.rs | 4 ++-- .../test-store/tests/graphql/introspection.rs | 2 -- store/test-store/tests/graphql/query.rs | 1 + 9 files changed, 49 insertions(+), 21 deletions(-) diff --git a/graphql/src/execution/resolver.rs b/graphql/src/execution/resolver.rs index a95267f0850..6bbfe7995a3 100644 --- a/graphql/src/execution/resolver.rs +++ b/graphql/src/execution/resolver.rs @@ -1,5 +1,7 @@ +use std::time::Duration; + use graph::components::store::UnitStream; -use graph::data::query::Trace; +use graph::data::query::{CacheStatus, Trace}; use graph::prelude::{async_trait, s, tokio, Error, QueryExecutionError}; use graph::schema::ApiSchema; use graph::{ @@ -9,6 +11,8 @@ use graph::{ use crate::execution::{ast as a, ExecutionContext}; +use super::Query; + /// A GraphQL resolver that can resolve entities, enum values, scalar types and interfaces/unions. #[async_trait] pub trait Resolver: Sized + Send + Sync + 'static { @@ -122,4 +126,8 @@ pub trait Resolver: Sized + Send + Sync + 'static { fn post_process(&self, _result: &mut QueryResult) -> Result<(), Error> { Ok(()) } + + fn record_work(&self, _query: &Query, _elapsed: Duration, _cache_status: CacheStatus) { + // by default, record nothing + } } diff --git a/graphql/src/query/mod.rs b/graphql/src/query/mod.rs index 707b87936a0..9cbe4a405e2 100644 --- a/graphql/src/query/mod.rs +++ b/graphql/src/query/mod.rs @@ -2,8 +2,6 @@ use graph::prelude::{BlockPtr, CheapClone, QueryExecutionError, QueryResult}; use std::sync::Arc; use std::time::Instant; -use graph::data::graphql::effort::LoadManager; - use crate::execution::{ast as a, *}; /// Utilities for working with GraphQL query ASTs. @@ -26,8 +24,6 @@ pub struct QueryExecutionOptions { /// Maximum value for the `skip` argument pub max_skip: u32, - pub load_manager: Arc, - /// Whether to include an execution trace in the result pub trace: bool, } @@ -76,9 +72,8 @@ where .await; let elapsed = start.elapsed(); let cache_status = ctx.cache_status.load(); - options - .load_manager - .record_work(query.shape_hash, elapsed, cache_status); + ctx.resolver + .record_work(query.as_ref(), elapsed, cache_status); query.log_cache_status( &selection_set, block_ptr.map(|b| b.number).unwrap_or(0), diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 30019deb492..8e84b1a5741 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -156,6 +156,7 @@ where error_policy, query.schema.id().clone(), metrics.cheap_clone(), + self.load_manager.cheap_clone(), ) .await?; max_block = max_block.max(resolver.block_number()); @@ -168,7 +169,6 @@ where deadline: ENV_VARS.graphql.query_timeout.map(|t| Instant::now() + t), max_first: max_first.unwrap_or(ENV_VARS.graphql.max_first), max_skip: max_skip.unwrap_or(ENV_VARS.graphql.max_skip), - load_manager: self.load_manager.clone(), trace, }, ) @@ -268,6 +268,7 @@ where max_first: ENV_VARS.graphql.max_first, max_skip: ENV_VARS.graphql.max_skip, graphql_metrics: self.graphql_metrics.clone(), + load_manager: self.load_manager.cheap_clone(), }, ) } diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index c5aeffa0d04..a73cc325238 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -2,17 +2,18 @@ use std::collections::BTreeMap; use std::result; use std::sync::Arc; -use graph::components::store::*; +use graph::components::store::{SubscriptionManager, UnitStream}; +use graph::data::graphql::effort::LoadManager; use graph::data::graphql::{object, ObjectOrInterface}; -use graph::data::query::Trace; +use graph::data::query::{CacheStatus, Trace}; use graph::data::value::{Object, Word}; use graph::prelude::*; use graph::schema::{ast as sast, ApiSchema, META_FIELD_TYPE}; use graph::schema::{ErrorPolicy, BLOCK_FIELD_TYPE}; -use crate::execution::ast as a; +use crate::execution::{ast as a, Query}; use crate::metrics::GraphQLMetrics; -use crate::prelude::*; +use crate::prelude::{ExecutionContext, Resolver}; use crate::query::ext::BlockConstraint; use crate::store::query::collect_entities_from_query_field; @@ -28,6 +29,7 @@ pub struct StoreResolver { has_non_fatal_errors: bool, error_policy: ErrorPolicy, graphql_metrics: Arc, + load_manager: Arc, } #[derive(Clone, Debug)] @@ -64,6 +66,7 @@ impl StoreResolver { store: Arc, subscription_manager: Arc, graphql_metrics: Arc, + load_manager: Arc, ) -> Self { StoreResolver { logger: logger.new(o!("component" => "StoreResolver")), @@ -76,6 +79,7 @@ impl StoreResolver { has_non_fatal_errors: false, error_policy: ErrorPolicy::Deny, graphql_metrics, + load_manager, } } @@ -93,6 +97,7 @@ impl StoreResolver { error_policy: ErrorPolicy, deployment: DeploymentHash, graphql_metrics: Arc, + load_manager: Arc, ) -> Result { let store_clone = store.cheap_clone(); let block_ptr = Self::locate_block(store_clone.as_ref(), bc, state).await?; @@ -110,6 +115,7 @@ impl StoreResolver { has_non_fatal_errors, error_policy, graphql_metrics, + load_manager, }; Ok(resolver) } @@ -385,4 +391,9 @@ impl Resolver for StoreResolver { } Ok(()) } + + fn record_work(&self, query: &Query, elapsed: Duration, cache_status: CacheStatus) { + self.load_manager + .record_work(query.shape_hash, elapsed, cache_status); + } } diff --git a/graphql/src/subscription/mod.rs b/graphql/src/subscription/mod.rs index c12d9d904bf..5c31ae293bc 100644 --- a/graphql/src/subscription/mod.rs +++ b/graphql/src/subscription/mod.rs @@ -2,6 +2,7 @@ use std::result::Result; use std::time::{Duration, Instant}; use graph::components::store::UnitStream; +use graph::data::graphql::effort::LoadManager; use graph::schema::ApiSchema; use graph::{components::store::SubscriptionManager, prelude::*, schema::ErrorPolicy}; @@ -38,6 +39,8 @@ pub struct SubscriptionExecutionOptions { pub max_skip: u32, pub graphql_metrics: Arc, + + pub load_manager: Arc, } pub fn execute_subscription( @@ -88,6 +91,7 @@ fn create_source_event_stream( options.store.clone(), options.subscription_manager.cheap_clone(), options.graphql_metrics.cheap_clone(), + options.load_manager.cheap_clone(), ); let ctx = ExecutionContext { logger: options.logger.cheap_clone(), @@ -155,6 +159,7 @@ fn map_source_to_response_stream( max_first, max_skip, graphql_metrics, + load_manager, } = options; trigger_stream @@ -169,6 +174,7 @@ fn map_source_to_response_stream( max_first, max_skip, graphql_metrics.cheap_clone(), + load_manager.cheap_clone(), ) .boxed() }) @@ -184,6 +190,7 @@ async fn execute_subscription_event( max_first: u32, max_skip: u32, metrics: Arc, + load_manager: Arc, ) -> Arc { async fn make_resolver( store: Arc, @@ -191,6 +198,7 @@ async fn execute_subscription_event( subscription_manager: Arc, query: &Arc, metrics: Arc, + load_manager: Arc, ) -> Result { let state = store.deployment_state().await?; StoreResolver::at_block( @@ -202,11 +210,20 @@ async fn execute_subscription_event( ErrorPolicy::Deny, query.schema.id().clone(), metrics, + load_manager, ) .await } - let resolver = match make_resolver(store, &logger, subscription_manager, &query, metrics).await + let resolver = match make_resolver( + store, + &logger, + subscription_manager, + &query, + metrics, + load_manager, + ) + .await { Ok(resolver) => resolver, Err(e) => return Arc::new(e.into()), diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index a880bd6b33a..77d3fd62e83 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -141,8 +141,6 @@ where Err(e) => return Ok(QueryResults::from(QueryResult::from(e))), }; - let load_manager = self.graphql_runner.load_manager(); - // Run the query using the index node resolver let query_clone = query.cheap_clone(); let logger = self.logger.cheap_clone(); @@ -159,7 +157,6 @@ where deadline: None, max_first: std::u32::MAX, max_skip: std::u32::MAX, - load_manager, trace: false, }; let result = execute_query(query_clone.cheap_clone(), None, None, options).await; diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 22d8b83ce18..8485682f552 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -536,7 +536,8 @@ async fn execute_subgraph_query_internal( bc, error_policy, query.schema.id().clone(), - graphql_metrics() + graphql_metrics(), + LOAD_MANAGER.clone() ) .await ); @@ -548,7 +549,6 @@ async fn execute_subgraph_query_internal( QueryExecutionOptions { resolver, deadline, - load_manager: LOAD_MANAGER.clone(), max_first: std::u32::MAX, max_skip: std::u32::MAX, trace, diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index b79c17fe4e2..97176704227 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -12,7 +12,6 @@ use graph_graphql::prelude::{ a, execute_query, ExecutionContext, Query as PreparedQuery, QueryExecutionOptions, Resolver, }; use test_store::graphql_metrics; -use test_store::LOAD_MANAGER; /// Mock resolver used in tests that don't need a resolver. #[derive(Clone)] @@ -568,7 +567,6 @@ async fn introspection_query(schema: Schema, query: &str) -> QueryResult { deadline: None, max_first: std::u32::MAX, max_skip: std::u32::MAX, - load_manager: LOAD_MANAGER.clone(), trace: false, }; diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index c053b9654bd..8eff8ad8e4b 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -625,6 +625,7 @@ async fn run_subscription( max_first: std::u32::MAX, max_skip: std::u32::MAX, graphql_metrics: graphql_metrics(), + load_manager: LOAD_MANAGER.clone(), }; let schema = STORE .subgraph_store() From e969a8c2239594a2c8bfc7b0d73ccf39d41aa911 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:28:30 -0700 Subject: [PATCH 0377/2104] graph, gaphql, server: Remove GraphQlRunner.load_manager --- graph/src/components/graphql.rs | 4 +--- graphql/src/runner.rs | 4 ---- server/http/src/service.rs | 9 +-------- server/http/tests/server.rs | 5 ----- 4 files changed, 2 insertions(+), 20 deletions(-) diff --git a/graph/src/components/graphql.rs b/graph/src/components/graphql.rs index 9aa7072b69b..2e160706989 100644 --- a/graph/src/components/graphql.rs +++ b/graph/src/components/graphql.rs @@ -1,8 +1,8 @@ use futures::prelude::*; +use crate::data::query::QueryResults; use crate::data::query::{Query, QueryTarget}; use crate::data::subscription::{Subscription, SubscriptionError, SubscriptionResult}; -use crate::data::{graphql::effort::LoadManager, query::QueryResults}; use crate::prelude::DeploymentHash; use async_trait::async_trait; @@ -41,8 +41,6 @@ pub trait GraphQlRunner: Send + Sync + 'static { target: QueryTarget, ) -> Result; - fn load_manager(&self) -> Arc; - fn metrics(&self) -> Arc; } diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 8e84b1a5741..cc6675e8e94 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -273,10 +273,6 @@ where ) } - fn load_manager(&self) -> Arc { - self.load_manager.clone() - } - fn metrics(&self) -> Arc { self.graphql_metrics.clone() } diff --git a/server/http/src/service.rs b/server/http/src/service.rs index a1880d46a0f..f32d67cf0e4 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -474,10 +474,7 @@ mod tests { use hyper::service::Service; use hyper::{Body, Method, Request}; - use graph::data::{ - graphql::effort::LoadManager, - query::{QueryResults, QueryTarget}, - }; + use graph::data::query::{QueryResults, QueryTarget}; use graph::prelude::*; use crate::test_utils; @@ -528,10 +525,6 @@ mod tests { unreachable!(); } - fn load_manager(&self) -> Arc { - unimplemented!() - } - fn metrics(&self) -> Arc { Arc::new(TestGraphQLMetrics) } diff --git a/server/http/tests/server.rs b/server/http/tests/server.rs index a8b53c3c70d..e3ccd761970 100644 --- a/server/http/tests/server.rs +++ b/server/http/tests/server.rs @@ -3,7 +3,6 @@ use hyper::{Body, Client, Request}; use std::time::Duration; use graph::data::{ - graphql::effort::LoadManager, query::{QueryResults, QueryTarget}, value::{Object, Word}, }; @@ -74,10 +73,6 @@ impl GraphQlRunner for TestGraphQlRunner { unreachable!(); } - fn load_manager(&self) -> Arc { - unimplemented!() - } - fn metrics(&self) -> Arc { Arc::new(TestGraphQLMetrics) } From 9199f53dd565056adef7fc07e8dfd93ed0b3d810 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 9 Aug 2023 16:35:06 -0700 Subject: [PATCH 0378/2104] graph: Do not store window and bin size redundantly --- graph/src/data/graphql/effort.rs | 8 ++------ graph/src/util/stats.rs | 4 ++-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index c0438d1e9e6..4c2470ddc03 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -22,8 +22,6 @@ struct QueryEffort { /// Track the effort for queries (identified by their ShapeHash) over a /// time window. struct QueryEffortInner { - window_size: Duration, - bin_size: Duration, effort: HashMap, total: MovingStats, } @@ -65,16 +63,14 @@ impl QueryEffort { impl QueryEffortInner { fn new(window_size: Duration, bin_size: Duration) -> Self { Self { - window_size, - bin_size, effort: HashMap::default(), total: MovingStats::new(window_size, bin_size), } } fn add(&mut self, shape_hash: u64, duration: Duration) { - let window_size = self.window_size; - let bin_size = self.bin_size; + let window_size = self.total.window_size; + let bin_size = self.total.bin_size; let now = Instant::now(); self.effort .entry(shape_hash) diff --git a/graph/src/util/stats.rs b/graph/src/util/stats.rs index b5e04c57a6a..ac608b56dcb 100644 --- a/graph/src/util/stats.rs +++ b/graph/src/util/stats.rs @@ -55,8 +55,8 @@ impl Bin { /// a `window_size` of 5 minutes and a bin size of one second would use /// 300 bins. Each bin has constant size pub struct MovingStats { - window_size: Duration, - bin_size: Duration, + pub window_size: Duration, + pub bin_size: Duration, /// The buffer with measurements. The back has the most recent entries, /// and the front has the oldest entries bins: VecDeque, From e48df356ba1817d33e70ac5192830d1e8b8b5f91 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:45:50 -0700 Subject: [PATCH 0379/2104] graph, graphql: Pass shard and deployment id into LoadManager --- graph/src/data/graphql/effort.rs | 20 +++++++++++++++++--- graphql/src/runner.rs | 4 ++++ graphql/src/store/resolver.rs | 9 +++++++-- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index 4c2470ddc03..76a1dc12946 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -8,7 +8,7 @@ use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use crate::components::metrics::{Counter, Gauge, MetricsRegistry}; -use crate::components::store::PoolWaitStats; +use crate::components::store::{DeploymentId, PoolWaitStats}; use crate::data::graphql::shape_hash::shape_hash; use crate::data::query::{CacheStatus, QueryExecutionError}; use crate::prelude::q; @@ -263,7 +263,14 @@ impl LoadManager { /// Record that we spent `duration` amount of work for the query /// `shape_hash`, where `cache_status` indicates whether the query /// was cached or had to actually run - pub fn record_work(&self, shape_hash: u64, duration: Duration, cache_status: CacheStatus) { + pub fn record_work( + &self, + _shard: &str, + _deployment: DeploymentId, + shape_hash: u64, + duration: Duration, + cache_status: CacheStatus, + ) { self.query_counters .get(&cache_status) .map(GenericCounter::inc); @@ -318,7 +325,14 @@ impl LoadManager { /// case, we also do not take any locks when asked to update statistics, /// or to check whether we are overloaded; these operations amount to /// noops. - pub fn decide(&self, wait_stats: &PoolWaitStats, shape_hash: u64, query: &str) -> Decision { + pub fn decide( + &self, + wait_stats: &PoolWaitStats, + _shard: &str, + _deployment: DeploymentId, + shape_hash: u64, + query: &str, + ) -> Decision { use Decision::*; if self.blocked_queries.contains(&shape_hash) { diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index cc6675e8e94..5e885092657 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -136,6 +136,8 @@ where self.load_manager .decide( &store.wait_stats().map_err(QueryExecutionError::from)?, + store.shard(), + store.deployment_id(), query.shape_hash, query.query_text.as_ref(), ) @@ -248,6 +250,8 @@ where .load_manager .decide( &store.wait_stats().map_err(QueryExecutionError::from)?, + store.shard(), + store.deployment_id(), query.shape_hash, query.query_text.as_ref(), ) diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index a73cc325238..ec0b5ff9d08 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -393,7 +393,12 @@ impl Resolver for StoreResolver { } fn record_work(&self, query: &Query, elapsed: Duration, cache_status: CacheStatus) { - self.load_manager - .record_work(query.shape_hash, elapsed, cache_status); + self.load_manager.record_work( + self.store.shard(), + self.store.deployment_id(), + query.shape_hash, + elapsed, + cache_status, + ); } } From f0a5a6d0d65857b5a59fa8dab977caa48627c0ec Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:52:35 -0700 Subject: [PATCH 0380/2104] all: Track and decide query effort by shard --- graph/src/data/graphql/effort.rs | 25 +++++++++++++++++++------ node/src/bin/manager.rs | 2 +- node/src/main.rs | 2 ++ store/test-store/src/store.rs | 1 + tests/src/fixture/mod.rs | 2 +- 5 files changed, 24 insertions(+), 8 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index 76a1dc12946..eee496fed8a 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -184,7 +184,7 @@ impl Decision { pub struct LoadManager { logger: Logger, - effort: QueryEffort, + effort: HashMap, /// List of query shapes that have been statically blocked through /// configuration blocked_queries: HashSet, @@ -202,6 +202,7 @@ pub struct LoadManager { impl LoadManager { pub fn new( logger: &Logger, + shards: Vec, blocked_queries: Vec>, registry: Arc, ) -> Self { @@ -248,9 +249,15 @@ impl LoadManager { }) .collect::>(); + let effort = HashMap::from_iter( + shards + .into_iter() + .map(|shard| (shard, QueryEffort::default())), + ); + Self { logger, - effort: QueryEffort::default(), + effort, blocked_queries, jailed_queries: RwLock::new(HashSet::new()), kill_state: RwLock::new(KillState::new()), @@ -265,7 +272,7 @@ impl LoadManager { /// was cached or had to actually run pub fn record_work( &self, - _shard: &str, + shard: &str, _deployment: DeploymentId, shape_hash: u64, duration: Duration, @@ -275,7 +282,9 @@ impl LoadManager { .get(&cache_status) .map(GenericCounter::inc); if !ENV_VARS.load_management_is_disabled() { - self.effort.add(shape_hash, duration, &self.effort_gauge); + self.effort + .get(shard) + .map(|effort| effort.add(shape_hash, duration, &self.effort_gauge)); } } @@ -328,7 +337,7 @@ impl LoadManager { pub fn decide( &self, wait_stats: &PoolWaitStats, - _shard: &str, + shard: &str, _deployment: DeploymentId, shape_hash: u64, query: &str, @@ -356,7 +365,11 @@ impl LoadManager { return Proceed; } - let (query_effort, total_effort) = self.effort.current_effort(shape_hash); + let (query_effort, total_effort) = self + .effort + .get(shard) + .map(|effort| effort.current_effort(shape_hash)) + .unwrap_or((None, Duration::ZERO)); // When `total_effort` is `Duratino::ZERO`, we haven't done any work. All are // welcome if total_effort.is_zero() { diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index dd631c55dc8..80b50226a63 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -944,7 +944,7 @@ impl Context { let store = self.store(); let subscription_manager = Arc::new(PanicSubscriptionManager); - let load_manager = Arc::new(LoadManager::new(&logger, vec![], registry.clone())); + let load_manager = Arc::new(LoadManager::new(&logger, vec![], vec![], registry.clone())); Arc::new(GraphQlRunner::new( &logger, diff --git a/node/src/main.rs b/node/src/main.rs index b882f2ee970..f6161cf4d72 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -444,8 +444,10 @@ async fn main() { let blockchain_map = Arc::new(blockchain_map); + let shards: Vec<_> = config.stores.keys().cloned().collect(); let load_manager = Arc::new(LoadManager::new( &logger, + shards, expensive_queries, metrics_registry.clone(), )); diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 8485682f552..82ce2590d5f 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -57,6 +57,7 @@ lazy_static! { pub static ref METRICS_REGISTRY: Arc = Arc::new(MetricsRegistry::mock()); pub static ref LOAD_MANAGER: Arc = Arc::new(LoadManager::new( &LOGGER, + CONFIG.stores.keys().cloned().collect(), Vec::new(), METRICS_REGISTRY.clone(), )); diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 1b220df7d62..5fe7fd42bb1 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -389,7 +389,7 @@ pub async fn setup( // Graphql runner let subscription_manager = Arc::new(PanicSubscriptionManager {}); - let load_manager = LoadManager::new(&logger, Vec::new(), mock_registry.clone()); + let load_manager = LoadManager::new(&logger, Vec::new(), Vec::new(), mock_registry.clone()); let graphql_runner = Arc::new(GraphQlRunner::new( &logger, stores.network_store.clone(), From ed89073d65988af37fda72982e23703a76f30420 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 15:58:17 -0700 Subject: [PATCH 0381/2104] graph: Track query effort by deployment id and shape We used to conflate queries for different deployments that had the same shape hash. With this change, that's no longer the case --- graph/src/data/graphql/effort.rs | 40 +++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index eee496fed8a..6c103262be1 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -15,14 +15,26 @@ use crate::prelude::q; use crate::prelude::{debug, info, o, warn, Logger, ENV_VARS}; use crate::util::stats::MovingStats; +#[derive(PartialEq, Eq, Hash, Debug)] +struct QueryRef { + id: DeploymentId, + shape_hash: u64, +} + +impl QueryRef { + fn new(id: DeploymentId, shape_hash: u64) -> Self { + QueryRef { id, shape_hash } + } +} + struct QueryEffort { inner: Arc>, } -/// Track the effort for queries (identified by their ShapeHash) over a -/// time window. +/// Track the effort for queries (identified by their deployment id and +/// shape hash) over a time window. struct QueryEffortInner { - effort: HashMap, + effort: HashMap, total: MovingStats, } @@ -41,9 +53,9 @@ impl QueryEffort { } } - pub fn add(&self, shape_hash: u64, duration: Duration, gauge: &Gauge) { + pub fn add(&self, qref: QueryRef, duration: Duration, gauge: &Gauge) { let mut inner = self.inner.write().unwrap(); - inner.add(shape_hash, duration); + inner.add(qref, duration); gauge.set(inner.total.average().unwrap_or(Duration::ZERO).as_millis() as f64); } @@ -52,10 +64,10 @@ impl QueryEffort { /// at all, return `ZERO_DURATION` as the total effort. If we have no /// data for the particular query, return `None` as the effort /// for the query - pub fn current_effort(&self, shape_hash: u64) -> (Option, Duration) { + pub fn current_effort(&self, qref: &QueryRef) -> (Option, Duration) { let inner = self.inner.read().unwrap(); let total_effort = inner.total.duration(); - let query_effort = inner.effort.get(&shape_hash).map(|stats| stats.duration()); + let query_effort = inner.effort.get(qref).map(|stats| stats.duration()); (query_effort, total_effort) } } @@ -68,12 +80,12 @@ impl QueryEffortInner { } } - fn add(&mut self, shape_hash: u64, duration: Duration) { + fn add(&mut self, qref: QueryRef, duration: Duration) { let window_size = self.total.window_size; let bin_size = self.total.bin_size; let now = Instant::now(); self.effort - .entry(shape_hash) + .entry(qref) .or_insert_with(|| MovingStats::new(window_size, bin_size)) .add_at(now, duration); self.total.add_at(now, duration); @@ -273,7 +285,7 @@ impl LoadManager { pub fn record_work( &self, shard: &str, - _deployment: DeploymentId, + deployment: DeploymentId, shape_hash: u64, duration: Duration, cache_status: CacheStatus, @@ -282,9 +294,10 @@ impl LoadManager { .get(&cache_status) .map(GenericCounter::inc); if !ENV_VARS.load_management_is_disabled() { + let qref = QueryRef::new(deployment, shape_hash); self.effort .get(shard) - .map(|effort| effort.add(shape_hash, duration, &self.effort_gauge)); + .map(|effort| effort.add(qref, duration, &self.effort_gauge)); } } @@ -338,7 +351,7 @@ impl LoadManager { &self, wait_stats: &PoolWaitStats, shard: &str, - _deployment: DeploymentId, + deployment: DeploymentId, shape_hash: u64, query: &str, ) -> Decision { @@ -365,10 +378,11 @@ impl LoadManager { return Proceed; } + let qref = QueryRef::new(deployment, shape_hash); let (query_effort, total_effort) = self .effort .get(shard) - .map(|effort| effort.current_effort(shape_hash)) + .map(|effort| effort.current_effort(&qref)) .unwrap_or((None, Duration::ZERO)); // When `total_effort` is `Duratino::ZERO`, we haven't done any work. All are // welcome From 5fd651efdb3c1d72e862918657281cea4c1b0b76 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 16:02:52 -0700 Subject: [PATCH 0382/2104] graph: Rename QueryEffort to ShardEffort --- graph/src/data/graphql/effort.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index 6c103262be1..921655d6d70 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -27,29 +27,30 @@ impl QueryRef { } } -struct QueryEffort { - inner: Arc>, +/// Statistics about the query effort for a single database shard +struct ShardEffort { + inner: Arc>, } /// Track the effort for queries (identified by their deployment id and /// shape hash) over a time window. -struct QueryEffortInner { +struct ShardEffortInner { effort: HashMap, total: MovingStats, } /// Create a `QueryEffort` that uses the window and bin sizes configured in /// the environment -impl Default for QueryEffort { +impl Default for ShardEffort { fn default() -> Self { Self::new(ENV_VARS.load_window_size, ENV_VARS.load_bin_size) } } -impl QueryEffort { +impl ShardEffort { pub fn new(window_size: Duration, bin_size: Duration) -> Self { Self { - inner: Arc::new(RwLock::new(QueryEffortInner::new(window_size, bin_size))), + inner: Arc::new(RwLock::new(ShardEffortInner::new(window_size, bin_size))), } } @@ -72,7 +73,7 @@ impl QueryEffort { } } -impl QueryEffortInner { +impl ShardEffortInner { fn new(window_size: Duration, bin_size: Duration) -> Self { Self { effort: HashMap::default(), @@ -196,7 +197,7 @@ impl Decision { pub struct LoadManager { logger: Logger, - effort: HashMap, + effort: HashMap, /// List of query shapes that have been statically blocked through /// configuration blocked_queries: HashSet, @@ -264,7 +265,7 @@ impl LoadManager { let effort = HashMap::from_iter( shards .into_iter() - .map(|shard| (shard, QueryEffort::default())), + .map(|shard| (shard, ShardEffort::default())), ); Self { From 41328236f9f92585ec571e1e003b3950ca178b96 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 16:12:41 -0700 Subject: [PATCH 0383/2104] graph: Jail queries by deployment and shape hash --- graph/src/data/graphql/effort.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/effort.rs index 921655d6d70..98f8851de38 100644 --- a/graph/src/data/graphql/effort.rs +++ b/graph/src/data/graphql/effort.rs @@ -199,13 +199,15 @@ pub struct LoadManager { logger: Logger, effort: HashMap, /// List of query shapes that have been statically blocked through - /// configuration + /// configuration. We should really also include the deployment, but + /// that would require a change to the format of the file from which + /// these queries are read blocked_queries: HashSet, /// List of query shapes that have caused more than `JAIL_THRESHOLD` /// proportion of the work while the system was overloaded. Currently, /// there is no way for a query to get out of jail other than /// restarting the process - jailed_queries: RwLock>, + jailed_queries: RwLock>, kill_state: RwLock, effort_gauge: Box, query_counters: HashMap, @@ -365,7 +367,9 @@ impl LoadManager { return Proceed; } - if self.jailed_queries.read().unwrap().contains(&shape_hash) { + let qref = QueryRef::new(deployment, shape_hash); + + if self.jailed_queries.read().unwrap().contains(&qref) { return if ENV_VARS.load_simulate { Proceed } else { @@ -379,7 +383,6 @@ impl LoadManager { return Proceed; } - let qref = QueryRef::new(deployment, shape_hash); let (query_effort, total_effort) = self .effort .get(shard) @@ -406,11 +409,12 @@ impl LoadManager { // effort in an overload situation gets killed warn!(self.logger, "Jailing query"; "query" => query, + "sgd" => format!("sgd{}", qref.id), "wait_ms" => wait_ms.as_millis(), "query_effort_ms" => query_effort, "total_effort_ms" => total_effort, "ratio" => format!("{:.4}", query_effort/total_effort)); - self.jailed_queries.write().unwrap().insert(shape_hash); + self.jailed_queries.write().unwrap().insert(qref); return if ENV_VARS.load_simulate { Proceed } else { @@ -428,6 +432,7 @@ impl LoadManager { if ENV_VARS.load_simulate { debug!(self.logger, "Declining query"; "query" => query, + "sgd" => format!("sgd{}", qref.id), "wait_ms" => wait_ms.as_millis(), "query_weight" => format!("{:.2}", query_effort / total_effort), "kill_rate" => format!("{:.4}", kill_rate), From 3212df9f97ab86ff43618d6c5061796b847d38be Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 11 Aug 2023 16:50:53 -0700 Subject: [PATCH 0384/2104] all: Rename graph/src/data/graphql/effort.rs to load_manager.rs --- graph/src/data/graphql/{effort.rs => load_manager.rs} | 0 graph/src/data/graphql/mod.rs | 2 +- graphql/src/runner.rs | 2 +- graphql/src/store/resolver.rs | 2 +- graphql/src/subscription/mod.rs | 2 +- node/src/bin/manager.rs | 2 +- node/src/main.rs | 2 +- store/test-store/src/store.rs | 2 +- tests/src/fixture/mod.rs | 2 +- 9 files changed, 8 insertions(+), 8 deletions(-) rename graph/src/data/graphql/{effort.rs => load_manager.rs} (100%) diff --git a/graph/src/data/graphql/effort.rs b/graph/src/data/graphql/load_manager.rs similarity index 100% rename from graph/src/data/graphql/effort.rs rename to graph/src/data/graphql/load_manager.rs diff --git a/graph/src/data/graphql/mod.rs b/graph/src/data/graphql/mod.rs index b41df572f0d..1bb2c691411 100644 --- a/graph/src/data/graphql/mod.rs +++ b/graph/src/data/graphql/mod.rs @@ -23,7 +23,7 @@ pub use self::values::{ pub mod shape_hash; -pub mod effort; +pub mod load_manager; pub mod object_or_interface; pub use object_or_interface::ObjectOrInterface; diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 5e885092657..5bc1666ced1 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -14,7 +14,7 @@ use graph::{ SubscriptionError, SubscriptionResult, ENV_VARS, }, }; -use graph::{data::graphql::effort::LoadManager, prelude::QueryStoreManager}; +use graph::{data::graphql::load_manager::LoadManager, prelude::QueryStoreManager}; use graph::{ data::query::{QueryResults, QueryTarget}, prelude::QueryStore, diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index ec0b5ff9d08..d59978b35e5 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -3,7 +3,7 @@ use std::result; use std::sync::Arc; use graph::components::store::{SubscriptionManager, UnitStream}; -use graph::data::graphql::effort::LoadManager; +use graph::data::graphql::load_manager::LoadManager; use graph::data::graphql::{object, ObjectOrInterface}; use graph::data::query::{CacheStatus, Trace}; use graph::data::value::{Object, Word}; diff --git a/graphql/src/subscription/mod.rs b/graphql/src/subscription/mod.rs index 5c31ae293bc..d916732aa42 100644 --- a/graphql/src/subscription/mod.rs +++ b/graphql/src/subscription/mod.rs @@ -2,7 +2,7 @@ use std::result::Result; use std::time::{Duration, Instant}; use graph::components::store::UnitStream; -use graph::data::graphql::effort::LoadManager; +use graph::data::graphql::load_manager::LoadManager; use graph::schema::ApiSchema; use graph::{components::store::SubscriptionManager, prelude::*, schema::ErrorPolicy}; diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 80b50226a63..ece55b44c5d 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -5,7 +5,7 @@ use graph::bail; use graph::endpoint::EndpointMetrics; use graph::log::logger_with_levels; use graph::prelude::{MetricsRegistry, BLOCK_NUMBER_MAX}; -use graph::{data::graphql::effort::LoadManager, prelude::chrono, prometheus::Registry}; +use graph::{data::graphql::load_manager::LoadManager, prelude::chrono, prometheus::Registry}; use graph::{ prelude::{ anyhow::{self, Context as AnyhowContextTrait}, diff --git a/node/src/main.rs b/node/src/main.rs index f6161cf4d72..fb7a6623aa6 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -11,7 +11,7 @@ use graph::blockchain::{ use graph::components::link_resolver::{ArweaveClient, FileSizeLimit}; use graph::components::store::BlockStore; use graph::components::subgraph::Settings; -use graph::data::graphql::effort::LoadManager; +use graph::data::graphql::load_manager::LoadManager; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; use graph::firehose::{FirehoseEndpoints, FirehoseNetworks}; diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 82ce2590d5f..0e9b537e5fe 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -1,6 +1,6 @@ use diesel::{self, PgConnection}; use graph::blockchain::mock::MockDataSource; -use graph::data::graphql::effort::LoadManager; +use graph::data::graphql::load_manager::LoadManager; use graph::data::query::QueryResults; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError}; diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 5fe7fd42bb1..829f0f8ea6f 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -20,7 +20,7 @@ use graph::components::link_resolver::{ArweaveClient, ArweaveResolver, FileSizeL use graph::components::metrics::MetricsRegistry; use graph::components::store::{BlockStore, DeploymentLocator}; use graph::components::subgraph::Settings; -use graph::data::graphql::effort::LoadManager; +use graph::data::graphql::load_manager::LoadManager; use graph::data::query::{Query, QueryTarget}; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; use graph::env::EnvVars; From d57831dd3e08d88c2ddce1e92a8dbf05a6f8caa9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 15 Aug 2023 14:20:36 -0700 Subject: [PATCH 0385/2104] graph: Report query_effort_ms and query_kill_rate by shard --- graph/src/data/graphql/load_manager.rs | 29 ++++++++++++++++---------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/graph/src/data/graphql/load_manager.rs b/graph/src/data/graphql/load_manager.rs index 98f8851de38..5b2b81ba97c 100644 --- a/graph/src/data/graphql/load_manager.rs +++ b/graph/src/data/graphql/load_manager.rs @@ -7,7 +7,7 @@ use std::iter::FromIterator; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use crate::components::metrics::{Counter, Gauge, MetricsRegistry}; +use crate::components::metrics::{Counter, GaugeVec, MetricsRegistry}; use crate::components::store::{DeploymentId, PoolWaitStats}; use crate::data::graphql::shape_hash::shape_hash; use crate::data::query::{CacheStatus, QueryExecutionError}; @@ -15,6 +15,8 @@ use crate::prelude::q; use crate::prelude::{debug, info, o, warn, Logger, ENV_VARS}; use crate::util::stats::MovingStats; +const SHARD_LABEL: [&str; 1] = ["shard"]; + #[derive(PartialEq, Eq, Hash, Debug)] struct QueryRef { id: DeploymentId, @@ -54,10 +56,12 @@ impl ShardEffort { } } - pub fn add(&self, qref: QueryRef, duration: Duration, gauge: &Gauge) { + pub fn add(&self, shard: &str, qref: QueryRef, duration: Duration, gauge: &GaugeVec) { let mut inner = self.inner.write().unwrap(); inner.add(qref, duration); - gauge.set(inner.total.average().unwrap_or(Duration::ZERO).as_millis() as f64); + gauge + .with_label_values(&[shard]) + .set(inner.total.average().unwrap_or(Duration::ZERO).as_millis() as f64); } /// Return what we know right now about the effort for the query @@ -209,9 +213,9 @@ pub struct LoadManager { /// restarting the process jailed_queries: RwLock>, kill_state: RwLock, - effort_gauge: Box, + effort_gauge: Box, query_counters: HashMap, - kill_rate_gauge: Box, + kill_rate_gauge: Box, } impl LoadManager { @@ -236,18 +240,19 @@ impl LoadManager { }; info!(logger, "Creating LoadManager in {} mode", mode,); + let shard_label: Vec<_> = SHARD_LABEL.into_iter().map(String::from).collect(); let effort_gauge = registry - .new_gauge( + .new_gauge_vec( "query_effort_ms", "Moving average of time spent running queries", - HashMap::new(), + shard_label.clone(), ) .expect("failed to create `query_effort_ms` counter"); let kill_rate_gauge = registry - .new_gauge( + .new_gauge_vec( "query_kill_rate", "The rate at which the load manager kills queries", - HashMap::new(), + shard_label, ) .expect("failed to create `query_kill_rate` counter"); let query_counters = CacheStatus::iter() @@ -300,7 +305,7 @@ impl LoadManager { let qref = QueryRef::new(deployment, shape_hash); self.effort .get(shard) - .map(|effort| effort.add(qref, duration, &self.effort_gauge)); + .map(|effort| effort.add(shard, qref, duration, &self.effort_gauge)); } } @@ -529,7 +534,9 @@ impl LoadManager { Skip => { /* do nothing */ } } } - self.kill_rate_gauge.set(kill_rate); + self.kill_rate_gauge + .with_label_values(&[shard]) + .set(kill_rate); kill_rate } } From 837948a0b193c9ec75908c981984d568ae9ae160 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 17 Aug 2023 13:31:05 -0700 Subject: [PATCH 0386/2104] graph: Make load management decisions separately for each shard --- graph/src/data/graphql/load_manager.rs | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/graph/src/data/graphql/load_manager.rs b/graph/src/data/graphql/load_manager.rs index 5b2b81ba97c..5e314d1607a 100644 --- a/graph/src/data/graphql/load_manager.rs +++ b/graph/src/data/graphql/load_manager.rs @@ -212,7 +212,8 @@ pub struct LoadManager { /// there is no way for a query to get out of jail other than /// restarting the process jailed_queries: RwLock>, - kill_state: RwLock, + /// Per shard state of whether we are killing queries or not + kill_state: HashMap>, effort_gauge: Box, query_counters: HashMap, kill_rate_gauge: Box, @@ -270,9 +271,15 @@ impl LoadManager { .collect::>(); let effort = HashMap::from_iter( + shards + .iter() + .map(|shard| (shard.clone(), ShardEffort::default())), + ); + + let kill_state = HashMap::from_iter( shards .into_iter() - .map(|shard| (shard, ShardEffort::default())), + .map(|shard| (shard, RwLock::new(KillState::new()))), ); Self { @@ -280,7 +287,7 @@ impl LoadManager { effort, blocked_queries, jailed_queries: RwLock::new(HashSet::new()), - kill_state: RwLock::new(KillState::new()), + kill_state, effort_gauge, query_counters, kill_rate_gauge, @@ -383,7 +390,7 @@ impl LoadManager { } let (overloaded, wait_ms) = self.overloaded(wait_stats); - let (kill_rate, last_update) = self.kill_state(); + let (kill_rate, last_update) = self.kill_state(shard); if !overloaded && kill_rate == 0.0 { return Proceed; } @@ -430,7 +437,7 @@ impl LoadManager { // Kill random queries in case we have no queries, or not enough queries // that cause at least 20% of the effort - let kill_rate = self.update_kill_rate(kill_rate, last_update, overloaded, wait_ms); + let kill_rate = self.update_kill_rate(shard, kill_rate, last_update, overloaded, wait_ms); let decline = thread_rng().gen_bool((kill_rate * query_effort / total_effort).min(1.0).max(0.0)); if decline { @@ -458,13 +465,14 @@ impl LoadManager { (overloaded, store_avg.unwrap_or(Duration::ZERO)) } - fn kill_state(&self) -> (f64, Instant) { - let state = self.kill_state.read().unwrap(); + fn kill_state(&self, shard: &str) -> (f64, Instant) { + let state = self.kill_state.get(shard).unwrap().read().unwrap(); (state.kill_rate, state.last_update) } fn update_kill_rate( &self, + shard: &str, mut kill_rate: f64, last_update: Instant, overloaded: bool, @@ -498,7 +506,7 @@ impl LoadManager { kill_rate = (kill_rate - KILL_RATE_STEP_DOWN).max(0.0); } let event = { - let mut state = self.kill_state.write().unwrap(); + let mut state = self.kill_state.get(shard).unwrap().write().unwrap(); state.kill_rate = kill_rate; state.last_update = now; state.log_event(now, kill_rate, overloaded) From 5cc12fc9e0f94ac14906dfbfead59a01e0fc9822 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Mon, 21 Aug 2023 15:30:09 +0530 Subject: [PATCH 0387/2104] store: remove entry from `subgraph_features` table when a subgraph is removed --- store/postgres/src/primary.rs | 8 ++++++-- store/test-store/tests/postgres/subgraph.rs | 5 ++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 6a75b63c4b1..5f640722565 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -1334,10 +1334,11 @@ impl<'a> Connection<'a> { .map(|_| ()) } - /// Remove all subgraph versions and the entry in `deployment_schemas` for - /// subgraph `id` in a transaction + /// Remove all subgraph versions, the entry in `deployment_schemas` and the entry in + /// `subgraph_features` for subgraph `id` in a transaction pub fn drop_site(&self, site: &Site) -> Result<(), StoreError> { use deployment_schemas as ds; + use subgraph_features as f; use subgraph_version as v; use unused_deployments as u; @@ -1355,6 +1356,9 @@ impl<'a> Connection<'a> { if !exists { delete(v::table.filter(v::deployment.eq(site.deployment.as_str()))) .execute(conn)?; + + // Remove the entry in `subgraph_features` + delete(f::table.filter(f::id.eq(site.deployment.as_str()))).execute(conn)?; } update(u::table.filter(u::id.eq(site.id))) diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index 13a09d0faac..a6b04dc4c77 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -524,7 +524,10 @@ fn subgraph_features() { ); assert_eq!(1, data_source_kinds.len()); - test_store::remove_subgraph(&id) + test_store::remove_subgraph(&id); + let features = get_subgraph_features(id.to_string()); + // Subgraph was removed, so we expect the entry to be removed from `subgraph_features` table + assert!(features.is_none()); }) } From 194141115b9f2652e5ab46d5ca6dc18b361b2965 Mon Sep 17 00:00:00 2001 From: Krishnanand V P <44740264+incrypto32@users.noreply.github.com> Date: Wed, 30 Aug 2023 16:19:26 +0530 Subject: [PATCH 0388/2104] Validate field exists in schema before setting entity (#4807) --- chain/substreams/src/trigger.rs | 5 +- graph/src/components/store/entity_cache.rs | 7 +- graph/src/components/store/err.rs | 4 + graph/src/data/store/mod.rs | 166 +++++++++----- graph/src/schema/ast.rs | 2 +- graph/src/schema/input_schema.rs | 39 +++- graph/src/util/intern.rs | 29 +++ runtime/test/src/test.rs | 253 +++++++++++++-------- runtime/wasm/src/host_exports.rs | 3 +- store/postgres/src/fork.rs | 6 +- 10 files changed, 358 insertions(+), 156 deletions(-) diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 01ee2e0a643..d3aeb91c020 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -256,7 +256,10 @@ where let id = state.entity_cache.schema.id_value(&key)?; data.insert(Word::from("id"), id); - let entity = state.entity_cache.make_entity(data)?; + let entity = state.entity_cache.make_entity(data).map_err(|err| { + MappingError::Unknown(anyhow!("Failed to make entity: {}", err)) + })?; + state.entity_cache.set(key, entity)?; } Operation::Delete => { diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 0f8d0caed34..cf20c3d4b41 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; use crate::components::store::{self as s, Entity, EntityKey, EntityOperation}; -use crate::data::store::IntoEntityIterator; +use crate::data::store::{EntityValidationError, IntoEntityIterator}; use crate::prelude::ENV_VARS; use crate::schema::InputSchema; use crate::util::intern::Error as InternError; @@ -117,7 +117,10 @@ impl EntityCache { } /// Make a new entity. The entity is not part of the cache - pub fn make_entity(&self, iter: I) -> Result { + pub fn make_entity( + &self, + iter: I, + ) -> Result { self.schema.make_entity(iter) } diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index 4a689463c85..3cfa74bef5a 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -1,4 +1,5 @@ use super::{BlockNumber, DeploymentHash, DeploymentSchemaVersion}; +use crate::data::store::EntityValidationError; use crate::prelude::QueryExecutionError; use crate::util::intern::Error as InternError; @@ -11,6 +12,8 @@ use tokio::task::JoinError; pub enum StoreError { #[error("store error: {0:#}")] Unknown(Error), + #[error("Entity validation failed: {0}")] + EntityValidationError(EntityValidationError), #[error( "tried to set entity of type `{0}` with ID \"{1}\" but an entity of type `{2}`, \ which has an interface in common with `{0}`, exists with the same ID" @@ -84,6 +87,7 @@ impl Clone for StoreError { fn clone(&self) -> Self { match self { Self::Unknown(arg0) => Self::Unknown(anyhow!("{}", arg0)), + Self::EntityValidationError(arg0) => Self::EntityValidationError(arg0.clone()), Self::ConflictingId(arg0, arg1, arg2) => { Self::ConflictingId(arg0.clone(), arg1.clone(), arg2.clone()) } diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index d80362dc39d..e0e84e36c52 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,7 +1,7 @@ use crate::{ components::store::{DeploymentLocator, EntityKey, EntityType}, data::graphql::ObjectTypeExt, - prelude::{anyhow::Context, lazy_static, q, r, s, CacheWeight, QueryExecutionError}, + prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, schema::InputSchema, util::intern::AtomPool, @@ -19,6 +19,7 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; use strum_macros::IntoStaticStr; +use thiserror::Error; use super::{ graphql::{ext::DirectiveFinder, TypeExt as _}, @@ -647,6 +648,59 @@ pub trait TryIntoEntityIterator: IntoIterator impl>> TryIntoEntityIterator for T {} +#[derive(Debug, Error, PartialEq, Eq, Clone)] +pub enum EntityValidationError { + #[error("The provided entity has fields not defined in the schema for entity `{entity}`")] + FieldsNotDefined { entity: String }, + + #[error("Entity {entity}[{id}]: unknown entity type `{entity}`")] + UnknownEntityType { entity: String, id: String }, + + #[error("Entity {entity}[{entity_id}]: field `{field}` is of type {expected_type}, but the value `{value}` contains a {actual_type} at index {index}")] + MismatchedElementTypeInList { + entity: String, + entity_id: String, + field: String, + expected_type: String, + value: String, + actual_type: String, + index: usize, + }, + + #[error("Entity {entity}[{entity_id}]: the value `{value}` for field `{field}` must have type {expected_type} but has type {actual_type}")] + InvalidFieldType { + entity: String, + entity_id: String, + value: String, + field: String, + expected_type: String, + actual_type: String, + }, + + #[error("Entity {entity}[{entity_id}]: missing value for non-nullable field `{field}`")] + MissingValueForNonNullableField { + entity: String, + entity_id: String, + field: String, + }, + + #[error("Entity {entity}[{entity_id}]: field `{field}` is derived and cannot be set")] + CannotSetDerivedField { + entity: String, + entity_id: String, + field: String, + }, + + #[error("Unknown key `{0}`. It probably is not part of the schema")] + UnknownKey(String), + + #[error("Internal error: no id attribute for entity `{entity}`")] + MissingIDAttribute { entity: String }, + + #[error("Unsupported type for `id` attribute")] + UnsupportedTypeForIDAttribute, +} + /// The `entity!` macro is a convenient way to create entities in tests. It /// can not be used in production code since it panics when creating the /// entity goes wrong. @@ -680,15 +734,14 @@ macro_rules! entity { } impl Entity { - pub fn make(pool: Arc, iter: I) -> Result { + pub fn make( + pool: Arc, + iter: I, + ) -> Result { let mut obj = Object::new(pool); for (key, value) in iter { - obj.insert(key, value).map_err(|e| { - anyhow!( - "Unknown key `{}`. It probably is not part of the schema", - e.not_interned() - ) - })?; + obj.insert(key, value) + .map_err(|e| EntityValidationError::UnknownKey(e.not_interned()))?; } let entity = Entity(obj); entity.check_id()?; @@ -731,15 +784,14 @@ impl Entity { v } - fn check_id(&self) -> Result<(), Error> { + fn check_id(&self) -> Result<(), EntityValidationError> { match self.get("id") { - None => Err(anyhow!( - "internal error: no id attribute for entity `{:?}`", - self.0 - )), + None => Err(EntityValidationError::MissingIDAttribute { + entity: format!("{:?}", self.0), + }), Some(Value::String(_)) => Ok(()), Some(Value::Bytes(_)) => Ok(()), - _ => Err(anyhow!("Entity has non-string `id` attribute")), + _ => Err(EntityValidationError::UnsupportedTypeForIDAttribute), } } @@ -801,7 +853,11 @@ impl Entity { /// Validate that this entity matches the object type definition in the /// schema. An entity that passes these checks can be stored /// successfully in the subgraph's database schema - pub fn validate(&self, schema: &InputSchema, key: &EntityKey) -> Result<(), anyhow::Error> { + pub fn validate( + &self, + schema: &InputSchema, + key: &EntityKey, + ) -> Result<(), EntityValidationError> { fn scalar_value_type(schema: &InputSchema, field_type: &s::Type) -> ValueType { use s::TypeDefinition as t; match field_type { @@ -851,13 +907,22 @@ impl Entity { // type for them, and validation would therefore fail return Ok(()); } - let object_type = schema.find_object_type(&key.entity_type).with_context(|| { - format!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, key.entity_id, key.entity_type - ) + + let object_type = schema.find_object_type(&key.entity_type).ok_or_else(|| { + EntityValidationError::UnknownEntityType { + entity: key.entity_type.to_string(), + id: key.entity_id.to_string(), + } })?; + for field in self.0.atoms() { + if !schema.has_field(&key.entity_type, field) { + return Err(EntityValidationError::FieldsNotDefined { + entity: key.entity_type.clone().into_string(), + }); + } + } + for field in &object_type.fields { let is_derived = field.is_derived(); match (self.get(&field.name), is_derived) { @@ -870,50 +935,47 @@ impl Entity { if let Value::List(elts) = value { for (index, elt) in elts.iter().enumerate() { if !elt.is_assignable(&scalar_type, false) { - anyhow::bail!( - "Entity {}[{}]: field `{}` is of type {}, but the value `{}` \ - contains a {} at index {}", - key.entity_type, - key.entity_id, - field.name, - &field.field_type, - value, - elt.type_name(), - index + return Err( + EntityValidationError::MismatchedElementTypeInList { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + field: field.name.to_string(), + expected_type: field.field_type.to_string(), + value: value.to_string(), + actual_type: elt.type_name().to_string(), + index, + }, ); } } } } if !value.is_assignable(&scalar_type, field.field_type.is_list()) { - anyhow::bail!( - "Entity {}[{}]: the value `{}` for field `{}` must have type {} but has type {}", - key.entity_type, - key.entity_id, - value, - field.name, - &field.field_type, - value.type_name() - ); + return Err(EntityValidationError::InvalidFieldType { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + value: value.to_string(), + field: field.name.to_string(), + expected_type: field.field_type.to_string(), + actual_type: value.type_name().to_string(), + }); } } (None, false) => { if field.field_type.is_non_null() { - anyhow::bail!( - "Entity {}[{}]: missing value for non-nullable field `{}`", - key.entity_type, - key.entity_id, - field.name, - ); + return Err(EntityValidationError::MissingValueForNonNullableField { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + field: field.name.to_string(), + }); } } (Some(_), true) => { - anyhow::bail!( - "Entity {}[{}]: field `{}` is derived and can not be set", - key.entity_type, - key.entity_id, - field.name, - ); + return Err(EntityValidationError::CannotSetDerivedField { + entity: key.entity_type.to_string(), + entity_id: key.entity_id.to_string(), + field: field.name.to_string(), + }); } (None, true) => { // derived fields should not be set @@ -1102,7 +1164,7 @@ fn entity_validation() { thing.set("cruft", "wat").unwrap(); check( thing, - "Entity Thing[t8]: field `cruft` is derived and can not be set", + "Entity Thing[t8]: field `cruft` is derived and cannot be set", ); } diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 8c588e63273..5b82b793edb 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -513,6 +513,6 @@ fn entity_validation() { thing.set("cruft", "wat").unwrap(); check( thing, - "Entity Thing[t8]: field `cruft` is derived and can not be set", + "Entity Thing[t8]: field `cruft` is derived and cannot be set", ); } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 22d3bfd7899..de26dd30149 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; @@ -9,12 +9,14 @@ use crate::cheap_clone::CheapClone; use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::store::{self, scalar, IntoEntityIterator, TryIntoEntityIterator}; +use crate::data::store::{ + self, scalar, EntityValidationError, IntoEntityIterator, TryIntoEntityIterator, +}; use crate::data::subgraph::schema::POI_DIGEST; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; -use crate::util::intern::AtomPool; +use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; use super::{ApiSchema, Schema, SchemaValidationError}; @@ -33,6 +35,8 @@ pub struct InputSchema { pub struct Inner { schema: Schema, immutable_types: HashSet, + // Maps each entity type to its field names + field_names: HashMap>, pool: Arc, } @@ -57,10 +61,26 @@ impl InputSchema { let pool = Arc::new(atom_pool(&schema.document)); + let field_names = HashMap::from_iter( + schema + .document + .get_object_type_definitions() + .into_iter() + .map(|obj_type| { + let fields: Vec<_> = obj_type + .fields + .iter() + .map(|field| pool.lookup(&field.name).unwrap()) + .collect(); + (EntityType::from(obj_type), fields) + }), + ); + Self { inner: Arc::new(Inner { schema, immutable_types, + field_names, pool, }), } @@ -335,7 +355,10 @@ impl InputSchema { self.inner.schema.validate() } - pub fn make_entity(&self, iter: I) -> Result { + pub fn make_entity( + &self, + iter: I, + ) -> Result { Entity::make(self.inner.pool.clone(), iter) } @@ -348,6 +371,14 @@ impl InputSchema { ) -> Result { Entity::try_make(self.inner.pool.clone(), iter) } + + pub fn has_field(&self, entity_type: &EntityType, field: Atom) -> bool { + self.inner + .field_names + .get(entity_type) + .map(|fields| fields.contains(&field)) + .unwrap_or(false) + } } /// Create a new pool that contains the names of all the types defined diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index f7d0ee4b728..31da953efc2 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -287,6 +287,10 @@ impl Object { fn same_pool(&self, other: &Object) -> bool { Arc::ptr_eq(&self.pool, &other.pool) } + + pub fn atoms(&self) -> AtomIter<'_, V> { + AtomIter::new(self) + } } impl Object { @@ -377,6 +381,31 @@ impl Iterator for ObjectOwningIter { } } +pub struct AtomIter<'a, V> { + iter: std::slice::Iter<'a, Entry>, +} + +impl<'a, V> AtomIter<'a, V> { + fn new(object: &'a Object) -> Self { + Self { + iter: object.entries.as_slice().iter(), + } + } +} + +impl<'a, V> Iterator for AtomIter<'a, V> { + type Item = Atom; + + fn next(&mut self) -> Option { + while let Some(entry) = self.iter.next() { + if entry.key != TOMBSTONE_KEY { + return Some(entry.key); + } + } + None + } +} + impl IntoIterator for Object { type Item = as Iterator>::Item; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 751266f3416..7b68bd41dfc 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1217,104 +1217,92 @@ async fn recursion_limit() { .contains("recursion limit reached")); } -/// Test the various ways in which `store_set` sets the `id` of entities and -/// errors when there are issues -#[tokio::test] -async fn test_store_set_id() { - struct Host { - ctx: MappingContext, - host_exports: host_exports::test_support::HostExports, - stopwatch: StopwatchMetrics, - gas: GasCounter, - } - - impl Host { - async fn new() -> Host { - let version = ENV_VARS.mappings.max_api_version.clone(); - let wasm_file = wasm_file_path("boolean.wasm", API_VERSION_0_0_5); - - let ds = mock_data_source(&wasm_file, version.clone()); - - let store = STORE.clone(); - let deployment = DeploymentHash::new("hostStoreSetId".to_string()).unwrap(); - let deployment = test_store::create_test_subgraph( - &deployment, - "type User @entity { - id: ID!, - name: String, - } - - type Binary @entity { - id: Bytes! - }", - ) - .await; - - let ctx = mock_context(deployment.clone(), ds, store.subgraph_store(), version); - let host_exports = host_exports::test_support::HostExports::new(&ctx); - - let metrics_registry = Arc::new(MetricsRegistry::mock()); - let stopwatch = StopwatchMetrics::new( - ctx.logger.clone(), - deployment.hash.clone(), - "test", - metrics_registry.clone(), - ); - let gas = GasCounter::new(); - - Host { - ctx, - host_exports, - stopwatch, - gas, - } +struct Host { + ctx: MappingContext, + host_exports: host_exports::test_support::HostExports, + stopwatch: StopwatchMetrics, + gas: GasCounter, +} + +impl Host { + async fn new(schema: &str, deployment_hash: &str, wasm_file: &str) -> Host { + let version = ENV_VARS.mappings.max_api_version.clone(); + let wasm_file = wasm_file_path(wasm_file, API_VERSION_0_0_5); + + let ds = mock_data_source(&wasm_file, version.clone()); + + let store = STORE.clone(); + let deployment = DeploymentHash::new(deployment_hash.to_string()).unwrap(); + let deployment = test_store::create_test_subgraph(&deployment, schema).await; + let ctx = mock_context(deployment.clone(), ds, store.subgraph_store(), version); + let host_exports = host_exports::test_support::HostExports::new(&ctx); + + let metrics_registry = Arc::new(MetricsRegistry::mock()); + let stopwatch = StopwatchMetrics::new( + ctx.logger.clone(), + deployment.hash.clone(), + "test", + metrics_registry.clone(), + ); + let gas = GasCounter::new(); + + Host { + ctx, + host_exports, + stopwatch, + gas, } + } - fn store_set( - &mut self, - entity_type: &str, - id: &str, - data: Vec<(&str, &str)>, - ) -> Result<(), HostExportError> { - let data: Vec<_> = data.into_iter().map(|(k, v)| (k, Value::from(v))).collect(); - self.store_setv(entity_type, id, data) - } + fn store_set( + &mut self, + entity_type: &str, + id: &str, + data: Vec<(&str, &str)>, + ) -> Result<(), HostExportError> { + let data: Vec<_> = data.into_iter().map(|(k, v)| (k, Value::from(v))).collect(); + self.store_setv(entity_type, id, data) + } - fn store_setv( - &mut self, - entity_type: &str, - id: &str, - data: Vec<(&str, Value)>, - ) -> Result<(), HostExportError> { - let id = String::from(id); - let data = HashMap::from_iter(data.into_iter().map(|(k, v)| (Word::from(k), v))); - self.host_exports.store_set( - &self.ctx.logger, - &mut self.ctx.state, - &self.ctx.proof_of_indexing, - entity_type.to_string(), - id, - data, - &self.stopwatch, - &self.gas, - ) - } + fn store_setv( + &mut self, + entity_type: &str, + id: &str, + data: Vec<(&str, Value)>, + ) -> Result<(), HostExportError> { + let id = String::from(id); + let data = HashMap::from_iter(data.into_iter().map(|(k, v)| (Word::from(k), v))); + self.host_exports.store_set( + &self.ctx.logger, + &mut self.ctx.state, + &self.ctx.proof_of_indexing, + entity_type.to_string(), + id, + data, + &self.stopwatch, + &self.gas, + ) + } - fn store_get( - &mut self, - entity_type: &str, - id: &str, - ) -> Result>, anyhow::Error> { - let user_id = String::from(id); - self.host_exports.store_get( - &mut self.ctx.state, - entity_type.to_string(), - user_id, - &self.gas, - ) - } + fn store_get( + &mut self, + entity_type: &str, + id: &str, + ) -> Result>, anyhow::Error> { + let user_id = String::from(id); + self.host_exports.store_get( + &mut self.ctx.state, + entity_type.to_string(), + user_id, + &self.gas, + ) } +} +/// Test the various ways in which `store_set` sets the `id` of entities and +/// errors when there are issues +#[tokio::test] +async fn test_store_set_id() { #[track_caller] fn err_says(err: E, exp: &str) { let err = err.to_string(); @@ -1326,7 +1314,17 @@ async fn test_store_set_id() { const BID: &str = "0xdeadbeef"; const BINARY: &str = "Binary"; - let mut host = Host::new().await; + let schema = "type User @entity { + id: ID!, + name: String, + } + + type Binary @entity { + id: Bytes!, + name: String, + }"; + + let mut host = Host::new(schema, "hostStoreSetId", "boolean.wasm").await; host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) .expect("setting with same id works"); @@ -1387,5 +1385,72 @@ async fn test_store_set_id() { let err = host .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) .expect_err("id must be Bytes"); - err_says(err, "Entity has non-string `id` attribute"); + err_says(err, "Unsupported type for `id` attribute"); +} + +/// Test setting fields that are not defined in the schema +/// This should return an error +#[tokio::test] +async fn test_store_set_invalid_fields() { + #[track_caller] + fn err_says(err: E, exp: &str) { + let err = err.to_string(); + assert!(err.contains(exp), "expected `{err}` to contain `{exp}`"); + } + + const UID: &str = "u1"; + const USER: &str = "User"; + const BID: &str = "0xdeadbeef"; + const BINARY: &str = "Binary"; + let schema = " + type User @entity { + id: ID!, + name: String + } + + type Binary @entity { + id: Bytes!, + test: String, + test2: String + }"; + + let mut host = Host::new(schema, "hostStoreSetInvalidFields", "boolean.wasm").await; + + host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) + .unwrap(); + + let err = host + .store_set( + USER, + UID, + vec![ + ("id", "u1"), + ("name", "user1"), + ("test", "invalid_field"), + ("test2", "invalid_field"), + ], + ) + .err() + .unwrap(); + + // The order of `test` and `test2` is not guranteed + // So we just check the string contains them + let err_string = err.to_string(); + dbg!(err_string.as_str()); + assert!(err_string + .contains("The provided entity has fields not defined in the schema for entity `User`")); + + let err = host + .store_set( + USER, + UID, + vec![("id", "u1"), ("name", "user1"), ("test3", "invalid_field")], + ) + .err() + .unwrap(); + + err_says( + err, + "Unknown key `test3`. It probably is not part of the schema", + ) } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 3e90c117dba..d6820298c74 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -214,7 +214,8 @@ impl HostExports { let entity = state .entity_cache - .make_entity(data.into_iter().map(|(key, value)| (key, value)))?; + .make_entity(data.into_iter().map(|(key, value)| (key, value))) + .map_err(|e| HostExportError::Deterministic(anyhow!(e)))?; state.entity_cache.set(key, entity)?; diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index ddad71e2514..58c29aebd67 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -220,7 +220,11 @@ query Query ($id: String) {{ map }; - Ok(Some(schema.make_entity(map)?)) + Ok(Some( + schema + .make_entity(map) + .map_err(|e| StoreError::EntityValidationError(e))?, + )) } } From d25f7feaf24e29ec13417431abdbb51a8190d3d4 Mon Sep 17 00:00:00 2001 From: DaMandal0rian <3614052+DaMandal0rian@users.noreply.github.com> Date: Wed, 30 Aug 2023 19:47:52 +0300 Subject: [PATCH 0389/2104] set sslmode paramater and other connection options to be configurable (#4840) --- docker/Dockerfile | 1 + docker/start | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8c0a8e19919..0044c6b7812 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -63,6 +63,7 @@ ENV postgres_host "" ENV postgres_user "" ENV postgres_pass "" ENV postgres_db "" +ENV postgres_args "sslmode=prefer" # The full URL to the IPFS node ENV ipfs "" # The etherum network(s) to connect to. Set this to a space-separated diff --git a/docker/start b/docker/start index 02d57748238..f1e4106363e 100755 --- a/docker/start +++ b/docker/start @@ -65,7 +65,7 @@ run_graph_node() { else unset GRAPH_NODE_CONFIG postgres_port=${postgres_port:-5432} - postgres_url="postgresql://$postgres_user:$postgres_pass@$postgres_host:$postgres_port/$postgres_db?sslmode=prefer" + postgres_url="postgresql://$postgres_user:$postgres_pass@$postgres_host:$postgres_port/$postgres_db?$postgres_args" wait_for_ipfs "$ipfs" echo "Waiting for Postgres ($postgres_host:$postgres_port)" From 95a0329c62c74082376d753aceb5bb63a087c102 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 10:17:33 +0100 Subject: [PATCH 0390/2104] build(deps): bump url from 2.4.0 to 2.4.1 (#4835) Bumps [url](https://github.com/servo/rust-url) from 2.4.0 to 2.4.1. - [Release notes](https://github.com/servo/rust-url/releases) - [Commits](https://github.com/servo/rust-url/compare/v2.4.0...v2.4.1) --- updated-dependencies: - dependency-name: url dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- node/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2793f65351..f794c35a436 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5064,9 +5064,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna 0.4.0", diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 72e720882e9..272c5c0b2ee 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -49,7 +49,7 @@ tokio = { version = "1.32.0", features = ["time", "sync", "macros", "test-util", tokio-stream = { version = "0.1.14", features = ["sync"] } tokio-retry = "0.3.0" toml = "0.7.6" -url = "2.4.0" +url = "2.4.1" prometheus = "0.13.3" priority-queue = "0.7.0" tonic = { workspace = true } diff --git a/node/Cargo.toml b/node/Cargo.toml index e4ed4d3e66d..3d67b08682e 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -19,7 +19,7 @@ git-testament = "0.2" graphql-parser = "0.4.0" futures = { version = "0.3.1", features = ["compat"] } lazy_static = "1.2.0" -url = "2.4.0" +url = "2.4.1" graph = { path = "../graph" } graph-core = { path = "../core" } graph-chain-arweave = { path = "../chain/arweave" } From d8803ad196e0b171a12d0f1f0113b6015f5279c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 10:17:48 +0100 Subject: [PATCH 0391/2104] build(deps): bump openssl from 0.10.56 to 0.10.57 (#4833) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.56 to 0.10.57. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.56...openssl-v0.10.57) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 34 ++++++++++++++++++++-------------- store/postgres/Cargo.toml | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f794c35a436..73e8e0bb7c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,7 +181,7 @@ checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.1", "bytes", "futures-util", "http", @@ -303,6 +303,12 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da1976d75adbe5fbc88130ecd119529cf1cc6a93ae1546d8696ee66f0d21af1" +[[package]] +name = "bitflags" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" + [[package]] name = "bitvec" version = "1.0.0" @@ -522,7 +528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", - "bitflags", + "bitflags 1.3.1", "clap_derive", "clap_lex", "indexmap 1.9.3", @@ -978,7 +984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" dependencies = [ "bigdecimal", - "bitflags", + "bitflags 1.3.1", "byteorder", "chrono", "diesel_derives", @@ -2059,7 +2065,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.1", "bytes", "headers-core", "http", @@ -2996,11 +3002,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.56" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags", + "bitflags 2.4.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3028,9 +3034,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.91" +version = "0.9.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" +checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" dependencies = [ "cc", "libc", @@ -3580,7 +3586,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ - "bitflags", + "bitflags 1.3.1", ] [[package]] @@ -3628,7 +3634,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" dependencies = [ - "bitflags", + "bitflags 1.3.1", "libc", "mach", "winapi", @@ -3741,7 +3747,7 @@ version = "0.37.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aae838e49b3d63e9274e1c01833cc8139d3fec468c3b84688c628f44b1ae11d" dependencies = [ - "bitflags", + "bitflags 1.3.1", "errno 0.3.0", "io-lifetimes", "libc", @@ -3892,7 +3898,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ - "bitflags", + "bitflags 1.3.1", "core-foundation", "core-foundation-sys", "libc", @@ -4848,7 +4854,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e980386f06883cf4d0578d6c9178c81f68b45d77d00f2c2c1bc034b3439c2c56" dependencies = [ - "bitflags", + "bitflags 1.3.1", "bytes", "futures-core", "futures-util", diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index c78277b1246..2fcf2f4485c 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -25,7 +25,7 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.56" +openssl = "0.10.57" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" From 2734f286751cb8e0182fe0a657ed6630dee1113c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 10:18:03 +0100 Subject: [PATCH 0392/2104] build(deps): bump serde_plain from 1.0.1 to 1.0.2 (#4832) Bumps [serde_plain](https://github.com/mitsuhiko/serde-plain) from 1.0.1 to 1.0.2. - [Changelog](https://github.com/mitsuhiko/serde-plain/blob/master/CHANGELOG) - [Commits](https://github.com/mitsuhiko/serde-plain/compare/1.0.1...1.0.2) --- updated-dependencies: - dependency-name: serde_plain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graph/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73e8e0bb7c4..87766ad65f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3957,9 +3957,9 @@ dependencies = [ [[package]] name = "serde_plain" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" +checksum = "9ce1fc6db65a611022b23a0dec6975d63fb80a302cb3388835ff02c097258d50" dependencies = [ "serde", ] diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 272c5c0b2ee..6726dc881a5 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -65,7 +65,7 @@ itertools = "0.11.0" # Our fork contains patches to make some fields optional for Celo and Fantom compatibility. # Without the "arbitrary_precision" feature, we get the error `data did not match any variant of untagged enum Response`. web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-patches-onto-0.18", features = ["arbitrary_precision"] } -serde_plain = "1.0.1" +serde_plain = "1.0.2" [dev-dependencies] clap = { version = "3.2.25", features = ["derive", "env"] } From 25d4c69a8613c15b3a7b9289a11ff6d1b515fb5e Mon Sep 17 00:00:00 2001 From: computeronix <19168174+computeronix@users.noreply.github.com> Date: Thu, 31 Aug 2023 20:11:01 -0400 Subject: [PATCH 0393/2104] Update docker-compose.yml (#4844) update ipfs to ipfs/kubo:v0.14.0 --- docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 86a7d035dd0..701fc68dacf 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -22,7 +22,7 @@ services: ethereum: 'mainnet:http://host.docker.internal:8545' GRAPH_LOG: info ipfs: - image: ipfs/go-ipfs:v0.10.0 + image: ipfs/kubo:v0.14.0 ports: - '5001:5001' volumes: From c31a1f097c0971cc6f01dfcf1d3d6350240a4719 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 23 Aug 2023 19:08:29 -0700 Subject: [PATCH 0394/2104] graph, graphql: Handle defaults for entity range better --- graph/src/components/store/mod.rs | 15 ++++++++++++++- graphql/src/execution/query.rs | 6 +++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 5bd27c65ab0..1afed696c23 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -410,6 +410,10 @@ pub struct EntityRange { } impl EntityRange { + /// The default value for `first` that we use when the user doesn't + /// specify one + pub const FIRST: u32 = 100; + /// Query for the first `n` entities. pub fn first(n: u32) -> Self { Self { @@ -419,6 +423,15 @@ impl EntityRange { } } +impl std::default::Default for EntityRange { + fn default() -> Self { + Self { + first: Some(Self::FIRST), + skip: 0, + } + } +} + /// The attribute we want to window by in an `EntityWindow`. We have to /// distinguish between scalar and list attributes since we need to use /// different queries for them, and the JSONB storage scheme can not @@ -591,7 +604,7 @@ impl EntityQuery { collection, filter: None, order: EntityOrder::Default, - range: EntityRange::first(100), + range: EntityRange::default(), logger: None, query_id: None, trace: false, diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index 6fceacaec6d..44fadff6ead 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -16,8 +16,8 @@ use graph::data::graphql::{ext::TypeExt, ObjectOrInterface}; use graph::data::query::QueryExecutionError; use graph::data::query::{Query as GraphDataQuery, QueryVariables}; use graph::prelude::{ - info, o, q, r, s, warn, BlockNumber, CheapClone, DeploymentHash, GraphQLMetrics, Logger, - TryFromValue, ENV_VARS, + info, o, q, r, s, warn, BlockNumber, CheapClone, DeploymentHash, EntityRange, GraphQLMetrics, + Logger, TryFromValue, ENV_VARS, }; use graph::schema::ast::{self as sast}; use graph::schema::ErrorPolicy; @@ -560,7 +560,7 @@ impl<'s> RawQuery<'s> { q::Value::Int(n) => Some(n.as_i64()? as u64), _ => None, }) - .unwrap_or(100); + .unwrap_or(EntityRange::FIRST as u64); max_entities .checked_add( max_entities.checked_mul(field_complexity).ok_or(Overflow)?, From 906f5e69881df1975115934a8715801d04f92e1e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 22 Aug 2023 14:04:10 -0700 Subject: [PATCH 0395/2104] graph, graphql, store: Fix ParentLink entries We used to omit entries for parents that have no children from the `ParentLink` which would violate the constraint that the child/children for the ith parent is in the ith place in the list of child ids. That is not a problem for the SQL queries we generate, but can be an issue for other uses of the data structure. --- graph/src/components/store/mod.rs | 2 +- graphql/src/store/prefetch.rs | 49 ++++++++++--------- store/postgres/src/relational_queries.rs | 8 ++- .../tests/postgres/relational_bytes.rs | 2 +- 4 files changed, 36 insertions(+), 25 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 1afed696c23..5f08bcce61c 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -461,7 +461,7 @@ pub enum ParentLink { /// The parent stores the id of one child. The ith entry in the /// vector contains the id of the child of the parent with id /// `EntityWindow.ids[i]` - Scalar(Vec), + Scalar(Vec>), } /// How many children a parent can have when the child stores diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 1fe3c6cfcea..48710227a99 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -295,10 +295,12 @@ impl<'a> JoinCond<'a> { // those and the parent ids let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() - .filter_map(|(id, node)| { - node.get(child_field) - .and_then(|value| value.as_str()) - .map(|child_id| (id, child_id.to_owned())) + .map(|(id, node)| { + ( + id, + node.get(child_field) + .and_then(|value| value.as_str().map(|s| s.to_string())), + ) }) .unzip(); @@ -310,25 +312,28 @@ impl<'a> JoinCond<'a> { // parent ids let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() - .filter_map(|(id, node)| { - node.get(child_field) - .and_then(|value| match value { - r::Value::List(values) => { - let values: Vec<_> = values - .iter() - .filter_map(|value| { - value.as_str().map(|value| value.to_owned()) - }) - .collect(); - if values.is_empty() { - None - } else { - Some(values) + .map(|(id, node)| { + ( + id, + node.get(child_field) + .and_then(|value| match value { + r::Value::List(values) => { + let values: Vec<_> = values + .iter() + .filter_map(|value| { + value.as_str().map(|value| value.to_owned()) + }) + .collect(); + if values.is_empty() { + None + } else { + Some(values) + } } - } - _ => None, - }) - .map(|child_ids| (id, child_ids)) + _ => None, + }) + .unwrap_or(Vec::new()), + ) }) .unzip(); (ids, ParentLink::List(child_ids)) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index c1a45cd5ecd..9377b995d2b 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -2102,7 +2102,13 @@ enum ParentIds { impl ParentIds { fn new(link: ParentLink) -> Self { match link { - ParentLink::Scalar(child_ids) => ParentIds::Scalar(child_ids), + ParentLink::Scalar(child_ids) => { + // Remove `None` child ids; query generation doesn't require + // that parent and child ids are in strict 1:1 + // correspondence + let child_ids = child_ids.into_iter().filter_map(|c| c).collect(); + ParentIds::Scalar(child_ids) + } ParentLink::List(child_ids) => { // Postgres will only accept child_ids, which is a Vec> // if all Vec are the same length. We therefore pad diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 554abbd591b..d6b41f08061 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -555,7 +555,7 @@ fn query() { ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], link: EntityLink::Parent( THING.clone(), - ParentLink::Scalar(vec![ROOT.to_owned(), ROOT.to_owned()]), + ParentLink::Scalar(vec![Some(ROOT.to_owned()), Some(ROOT.to_owned())]), ), column_names: AttributeNames::All, }]); From 8a08a7387133ff87327f3af3225c49c5277b3a9d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 22 Aug 2023 14:19:03 -0700 Subject: [PATCH 0396/2104] graph: More concise debug output for data::Object and data::Value --- graph/src/data/value.rs | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index 960db616429..c17d52b6d47 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -275,11 +275,18 @@ impl CacheWeight for Object { impl std::fmt::Debug for Object { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) + f.debug_map() + .entries(self.0.into_iter().map(|e| { + ( + e.key.as_ref().map(|w| w.as_str()).unwrap_or("---"), + &e.value, + ) + })) + .finish() } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] pub enum Value { Int(i64), Float(f64), @@ -506,3 +513,18 @@ impl From for q::Value { } } } + +impl std::fmt::Debug for Value { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Value::Int(i) => f.debug_tuple("Int").field(i).finish(), + Value::Float(n) => f.debug_tuple("Float").field(n).finish(), + Value::String(s) => write!(f, "{s:?}"), + Value::Boolean(b) => write!(f, "{b}"), + Value::Null => write!(f, "null"), + Value::Enum(e) => write!(f, "{e}"), + Value::List(l) => f.debug_list().entries(l).finish(), + Value::Object(o) => write!(f, "{o:?}"), + } + } +} From 13de37c5f2ff5aa2db2798180d622e8b5cfcf7b9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 22 Aug 2023 14:42:46 -0700 Subject: [PATCH 0397/2104] graph, graphql, store: Introduce a constant for g$parent_id --- graph/src/data/store/mod.rs | 3 +++ graphql/src/store/prefetch.rs | 3 ++- store/postgres/src/relational_queries.rs | 24 +++++++++++++------- store/test-store/tests/graph/entity_cache.rs | 3 ++- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index e0e84e36c52..0551d4d6bce 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -634,6 +634,9 @@ where lazy_static! { /// The name of the id attribute, `"id"` pub static ref ID: Word = Word::from("id"); + /// The name of the parent_id attribute that we inject into query + /// results + pub static ref PARENT_ID: Word = Word::from("g$parent_id"); } /// An entity is represented as a map of attribute names to values. diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 48710227a99..454a8817ff9 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -4,6 +4,7 @@ use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; +use graph::data::store::PARENT_ID; use graph::data::value::{Object, Word}; use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; @@ -401,7 +402,7 @@ impl<'a> Join<'a> { let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); for child in children.iter() { match child - .get("g$parent_id") + .get(&*PARENT_ID) .expect("the query that produces 'child' ensures there is always a g$parent_id") { r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 9377b995d2b..7e1d0eb7cf4 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -14,7 +14,7 @@ use diesel::Connection; use graph::components::store::write::WriteChunk; use graph::components::store::{DerivedEntityQuery, EntityKey}; -use graph::data::store::NULL; +use graph::data::store::{NULL, PARENT_ID}; use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -540,7 +540,7 @@ impl EntityData { // Simply ignore keys that do not have an underlying table // column; those will be things like the block_range that // is used internally for versioning - if key == "g$parent_id" { + if key == PARENT_ID.as_str() { if T::WITH_INTERNAL_KEYS { match &parent_type { None => { @@ -553,7 +553,7 @@ impl EntityData { } Some(parent_type) => Some( T::Value::from_column_value(parent_type, json) - .map(|value| (Word::from("g$parent_id"), value)), + .map(|value| (PARENT_ID.clone(), value)), ), } } else { @@ -2555,7 +2555,8 @@ impl<'a> FilterWindow<'a> { ) -> QueryResult<()> { out.push_sql("select '"); out.push_sql(self.table.object.as_str()); - out.push_sql("' as entity, c.id, c.vid, p.id::text as g$parent_id"); + out.push_sql("' as entity, c.id, c.vid, p.id::text as "); + out.push_sql(&*PARENT_ID); sort_key.select(&mut out, SelectStatementLevel::InnerStatement)?; self.children(ParentLimit::Outer, block, out) } @@ -3515,14 +3516,20 @@ impl<'a> SortKey<'a> { /// A boolean (use_sort_key_alias) is not a good idea and prone to errors. /// We could make it the standard and always use sort_key$ alias. fn order_by_parent(&self, out: &mut AstPass, use_sort_key_alias: bool) -> QueryResult<()> { + fn order_by_parent_id(out: &mut AstPass) { + out.push_sql("order by "); + out.push_sql(&*PARENT_ID); + out.push_sql(", "); + } + match self { SortKey::None => Ok(()), SortKey::IdAsc(_) => { - out.push_sql("order by g$parent_id, "); + order_by_parent_id(out); out.push_identifier(PRIMARY_KEY_COLUMN) } SortKey::IdDesc(_) => { - out.push_sql("order by g$parent_id, "); + order_by_parent_id(out); out.push_identifier(PRIMARY_KEY_COLUMN)?; out.push_sql(" desc"); Ok(()) @@ -3532,7 +3539,7 @@ impl<'a> SortKey<'a> { value, direction, } => { - out.push_sql("order by g$parent_id, "); + order_by_parent_id(out); SortKey::sort_expr( column, value, @@ -3986,7 +3993,8 @@ impl<'a> FilterQuery<'a> { ) -> QueryResult<()> { Self::select_entity_and_data(window.table, &mut out); out.push_sql(" from (\n"); - out.push_sql("select c.*, p.id::text as g$parent_id"); + out.push_sql("select c.*, p.id::text as "); + out.push_sql(&*PARENT_ID); window.children( ParentLimit::Ranked(&self.sort_key, &self.range), self.block, diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 7c036670788..b03c5d7b6ee 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -3,6 +3,7 @@ use graph::components::store::{ DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, GetScope, LoadRelatedRequest, ReadStore, StoredDynamicDataSource, WritableStore, }; +use graph::data::store::PARENT_ID; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; use graph::schema::InputSchema; @@ -741,7 +742,7 @@ fn no_internal_keys() { #[track_caller] fn check(entity: &Entity) { assert_eq!(None, entity.get("__typename")); - assert_eq!(None, entity.get("g$parent_id")); + assert_eq!(None, entity.get(&*PARENT_ID)); } let key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); From 2d6c531f6cdc96507e1b550af773eb3818d5f7a9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 21 Aug 2023 16:12:12 -0700 Subject: [PATCH 0398/2104] graph, graphql: Optimize away unneeded child queries If a query only needs to get the id of children, and we already have them in memory, do not run a query to fetch them again. Instead, construct the appropriate objects in memory. Fixes https://github.com/graphprotocol/graph-node/issues/4261 --- graph/src/components/store/mod.rs | 80 ++++++++++++++++++++++++++++++- graph/src/data/query/error.rs | 10 +++- graphql/src/store/prefetch.rs | 52 +++++++++++++++++--- 3 files changed, 133 insertions(+), 9 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 5f08bcce61c..136ea83d9bf 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -30,7 +30,7 @@ use crate::blockchain::Block; use crate::components::store::write::EntityModification; use crate::data::store::scalar::Bytes; use crate::data::store::*; -use crate::data::value::Word; +use crate::data::value::{Object, Word}; use crate::data_source::CausalityRegion; use crate::schema::InputSchema; use crate::util::intern; @@ -483,6 +483,84 @@ pub enum EntityLink { Parent(EntityType, ParentLink), } +impl EntityLink { + /// Return a list of objects that have only the `id`, parent id, and + /// typename set using the child ids from `self` when `self` is + /// `Parent`. If `self` is `Direct`, return `None` + /// + /// The list that is returned is sorted and truncated to `first` many + /// entries. + /// + /// This makes it possible to avoid running a query when all that is + /// needed is the `id` of the children + pub fn to_basic_objects(self, parents: &Vec, first: usize) -> Option> { + use crate::data::value::Value as V; + + fn basic_object(entity_type: &EntityType, parent: &str, child: String) -> Object { + let mut obj = Vec::new(); + obj.push((ID.clone(), V::String(child))); + obj.push((Word::from("__typename"), V::String(entity_type.to_string()))); + obj.push((PARENT_ID.clone(), V::String(parent.to_string()))); + Object::from_iter(obj) + } + + fn basic_objects( + entity_type: &EntityType, + parent: &str, + children: Vec, + ) -> Vec { + children + .into_iter() + .map(|child| basic_object(entity_type, parent, child)) + .collect() + } + + fn obj_key<'a>(obj: &'a Object) -> Option<(&'a str, &'a str)> { + match (obj.get(&*PARENT_ID), obj.get(ID.as_str())) { + (Some(V::String(p)), Some(V::String(id))) => Some((p, id)), + _ => None, + } + } + + fn obj_cmp(a: &Object, b: &Object) -> std::cmp::Ordering { + obj_key(a).cmp(&obj_key(b)) + } + + match self { + EntityLink::Direct(_, _) => return None, + EntityLink::Parent(entity_type, link) => { + let mut objects = Vec::new(); + match link { + ParentLink::List(ids) => { + for (parent, children) in parents.iter().zip(ids) { + objects.extend(basic_objects(&entity_type, parent, children)); + } + } + ParentLink::Scalar(ids) => { + for (parent, child) in parents.iter().zip(ids) { + if let Some(child) = child { + objects.push(basic_object(&entity_type, parent, child)); + } + } + } + } + // Sort the objects by parent id and child id just as + // running a query would + objects.sort_by(obj_cmp); + objects.truncate(first); + Some(objects) + } + } + } + + pub fn has_child_ids(&self) -> bool { + match self { + EntityLink::Direct(_, _) => false, + EntityLink::Parent(_, _) => true, + } + } +} + /// Window results of an `EntityQuery` query along the parent's id: /// the `order_by`, `order_direction`, and `range` of the query apply to /// entities that belong to the same parent. Only entities that belong to diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index c50220f6012..5449a330c30 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -74,6 +74,8 @@ pub enum QueryExecutionError { InvalidSubgraphManifest, ResultTooBig(usize, usize), DeploymentNotFound(String), + IdMissing, + IdNotString, } impl QueryExecutionError { @@ -130,7 +132,9 @@ impl QueryExecutionError { | InvalidSubgraphManifest | ValidationError(_, _) | ResultTooBig(_, _) - | DeploymentNotFound(_) => false, + | DeploymentNotFound(_) + | IdMissing + | IdNotString => false, } } } @@ -274,7 +278,9 @@ impl fmt::Display for QueryExecutionError { SubgraphManifestResolveError(e) => write!(f, "failed to resolve subgraph manifest: {}", e), InvalidSubgraphManifest => write!(f, "invalid subgraph manifest file"), ResultTooBig(actual, limit) => write!(f, "the result size of {} is larger than the allowed limit of {}", actual, limit), - DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name) + DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name), + IdMissing => write!(f, "Entity is missing an `id` attribute"), + IdNotString => write!(f, "Entity is missing an `id` attribute") } } } diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 454a8817ff9..c2531977e1e 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -1,12 +1,11 @@ //! Run a GraphQL query and fetch all the entitied needed to build the //! final result -use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; -use graph::data::store::PARENT_ID; +use graph::data::store::{ID, PARENT_ID}; use graph::data::value::{Object, Word}; -use graph::prelude::{r, CacheWeight, CheapClone}; +use graph::prelude::{r, CacheWeight, CheapClone, EntityQuery, EntityRange}; use graph::slog::warn; use graph::util::cache_weight; use std::collections::BTreeMap; @@ -171,11 +170,11 @@ impl ValueExt for r::Value { } impl Node { - fn id(&self) -> Result { + fn id(&self) -> Result { match self.get("id") { - None => Err(anyhow!("Entity is missing an `id` attribute")), + None => Err(QueryExecutionError::IdMissing), Some(r::Value::String(s)) => Ok(s.clone()), - _ => Err(anyhow!("Entity has non-string `id` attribute")), + _ => Err(QueryExecutionError::IdNotString), } } @@ -658,6 +657,30 @@ fn execute_field( .map_err(|e| vec![e]) } +/// Check whether `field` only selects the `id` of its children and whether +/// it is safe to skip running `query` if we have all child ids in memory +/// already. +fn selects_id_only(field: &a::Field, query: &EntityQuery) -> bool { + if query.filter.is_some() || query.range.skip != 0 { + return false; + } + match &query.order { + EntityOrder::Ascending(attr, _) => { + if attr != ID.as_str() { + return false; + } + } + _ => { + return false; + } + } + field + .selection_set + .single_field() + .map(|field| field.name.as_str() == ID.as_str()) + .unwrap_or(false) +} + /// Query child entities for `parents` from the store. The `join` indicates /// in which child field to look for the parent's id/join field. When /// `is_single` is `true`, there is at most one child per parent. @@ -704,6 +727,23 @@ fn fetch( if windows.is_empty() { return Ok((vec![], Trace::None)); } + // See if we can short-circuit query execution and just reuse what + // we already have in memory. We could do this probably even with + // multiple windows, but this covers the most common case. + if windows.len() == 1 && windows[0].link.has_child_ids() && selects_id_only(field, &query) { + let mut windows = windows; + // unwrap: we checked that len is 1 + let window = windows.pop().unwrap(); + let parent_ids = parents + .iter() + .map(|parent| parent.id()) + .collect::>() + .map_err(QueryExecutionError::from)?; + // unwrap: we checked in the if condition that the window has child ids + let first = query.range.first.unwrap_or(EntityRange::FIRST) as usize; + let objs = window.link.to_basic_objects(&parent_ids, first).unwrap(); + return Ok((objs.into_iter().map(Node::from).collect(), Trace::None)); + } query.collection = EntityCollection::Window(windows); } resolver From 9c65a8d8fa4daaff5150fb29c09f8289fe0e9757 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 31 Aug 2023 16:56:38 -0700 Subject: [PATCH 0399/2104] graph, graphql: Add feature flag to disable child optimization Setting GRAPH_STORE_DISABLE_CHILD_OPTIMIZATION=1 will disable the child optimization. This is meant as a safety switch in case the optimization causes problems. --- graph/src/env/store.rs | 7 +++++++ graphql/src/store/prefetch.rs | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 48150df9f4c..161c1a81e86 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -109,6 +109,10 @@ pub struct EnvVarsStore { /// is 10_000 which corresponds to 10MB. Setting this to 0 disables /// write batching. pub write_batch_size: usize, + /// Disable the optimization that skips certain child queries for + /// entities. Only as a safety valve. Remove after 2023-09-30 if the + /// optimization has not caused any issues + pub disable_child_optimization: bool, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -150,6 +154,7 @@ impl From for EnvVarsStore { history_slack_factor: x.history_slack_factor.0, write_batch_duration: Duration::from_secs(x.write_batch_duration_in_secs), write_batch_size: x.write_batch_size * 1_000, + disable_child_optimization: x.disable_child_optimization.0, } } } @@ -203,6 +208,8 @@ pub struct InnerStore { write_batch_duration_in_secs: u64, #[envconfig(from = "GRAPH_STORE_WRITE_BATCH_SIZE", default = "10000")] write_batch_size: usize, + #[envconfig(from = "GRAPH_STORE_DISABLE_CHILD_OPTIMIZATION", default = "false")] + disable_child_optimization: EnvVarBoolean, } #[derive(Clone, Copy, Debug)] diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index c2531977e1e..ff6e1e5781e 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -730,7 +730,11 @@ fn fetch( // See if we can short-circuit query execution and just reuse what // we already have in memory. We could do this probably even with // multiple windows, but this covers the most common case. - if windows.len() == 1 && windows[0].link.has_child_ids() && selects_id_only(field, &query) { + if !ENV_VARS.store.disable_child_optimization + && windows.len() == 1 + && windows[0].link.has_child_ids() + && selects_id_only(field, &query) + { let mut windows = windows; // unwrap: we checked that len is 1 let window = windows.pop().unwrap(); From e5261f8445956db29343060dcd07fe063c4f3cbd Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Mon, 11 Sep 2023 13:50:21 +0100 Subject: [PATCH 0400/2104] Add substreams block ingestor (#4839) * Add substreams block ingestor - Substreams ingestor not currently allowed for supported chains - Chains using substreams protocol can only have substreams endpoints - Add substream-head-tracker to the project - Embed substreams-head-tracker in substreams block ingestor --- Cargo.lock | 4 + Cargo.toml | 1 + chain/substreams/src/block_ingestor.rs | 160 +++++ chain/substreams/src/chain.rs | 36 +- chain/substreams/src/lib.rs | 1 + node/src/config.rs | 66 ++ node/src/main.rs | 46 +- substreams-head-tracker/Cargo.lock | 583 ++++++++++++++++++ substreams-head-tracker/Cargo.toml | 9 + substreams-head-tracker/Makefile | 15 + substreams-head-tracker/rust-toolchain.toml | 2 + substreams-head-tracker/src/lib.rs | 19 + .../substreams-head-tracker-v1.0.0.spkg | Bin 0 -> 89935 bytes substreams-head-tracker/substreams.yaml | 17 + 14 files changed, 948 insertions(+), 11 deletions(-) create mode 100644 chain/substreams/src/block_ingestor.rs create mode 100755 substreams-head-tracker/Cargo.lock create mode 100755 substreams-head-tracker/Cargo.toml create mode 100755 substreams-head-tracker/Makefile create mode 100755 substreams-head-tracker/rust-toolchain.toml create mode 100644 substreams-head-tracker/src/lib.rs create mode 100644 substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg create mode 100755 substreams-head-tracker/substreams.yaml diff --git a/Cargo.lock b/Cargo.lock index 87766ad65f6..4efd770d1e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4310,6 +4310,10 @@ dependencies = [ "syn 2.0.12", ] +[[package]] +name = "substreams-head-tracker" +version = "1.0.0" + [[package]] name = "subtle" version = "2.4.1" diff --git a/Cargo.toml b/Cargo.toml index e3e6a020646..24c9d54df16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "runtime/*", "server/*", "store/*", + "substreams-head-tracker", "graph", "tests", ] diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs new file mode 100644 index 00000000000..98a0729fa11 --- /dev/null +++ b/chain/substreams/src/block_ingestor.rs @@ -0,0 +1,160 @@ +use std::{sync::Arc, time::Duration}; + +use crate::mapper::Mapper; +use anyhow::{Context, Error}; +use graph::blockchain::{ + client::ChainClient, substreams_block_stream::SubstreamsBlockStream, BlockIngestor, +}; +use graph::prelude::MetricsRegistry; +use graph::slog::trace; +use graph::substreams::Package; +use graph::tokio_stream::StreamExt; +use graph::{ + blockchain::block_stream::BlockStreamEvent, + cheap_clone::CheapClone, + components::store::ChainStore, + prelude::{async_trait, error, info, DeploymentHash, Logger}, + util::backoff::ExponentialBackoff, +}; +use prost::Message; + +const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = + include_bytes!("../../../substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg"); + +pub struct SubstreamsBlockIngestor { + chain_store: Arc, + client: Arc>, + logger: Logger, + chain_name: String, + metrics: Arc, +} + +impl SubstreamsBlockIngestor { + pub fn new( + chain_store: Arc, + client: Arc>, + logger: Logger, + chain_name: String, + metrics: Arc, + ) -> SubstreamsBlockIngestor { + SubstreamsBlockIngestor { + chain_store, + client, + logger, + chain_name, + metrics, + } + } + + async fn fetch_head_cursor(&self) -> String { + let mut backoff = + ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); + loop { + match self.chain_store.clone().chain_head_cursor() { + Ok(cursor) => return cursor.unwrap_or_default(), + Err(e) => { + error!(self.logger, "Fetching chain head cursor failed: {:#}", e); + + backoff.sleep_async().await; + } + } + } + } + + /// Consumes the incoming stream of blocks infinitely until it hits an error. In which case + /// the error is logged right away and the latest available cursor is returned + /// upstream for future consumption. + async fn process_blocks( + &self, + cursor: String, + mut stream: SubstreamsBlockStream, + ) -> String { + let mut latest_cursor = cursor; + + while let Some(message) = stream.next().await { + let (block_ptr, cursor) = match message { + Ok(BlockStreamEvent::ProcessBlock(triggers, cursor)) => { + (Arc::new(triggers.block), cursor) + } + Ok(BlockStreamEvent::Revert(_ptr, _cursor)) => { + trace!(self.logger, "Received undo block to ingest, skipping"); + continue; + } + Err(e) => { + info!( + self.logger, + "An error occurred while streaming blocks: {}", e + ); + break; + } + }; + + let res = self.process_new_block(block_ptr, cursor.to_string()).await; + if let Err(e) = res { + error!(self.logger, "Process block failed: {:#}", e); + break; + } + + latest_cursor = cursor.to_string() + } + + error!( + self.logger, + "Stream blocks complete unexpectedly, expecting stream to always stream blocks" + ); + latest_cursor + } + + async fn process_new_block( + &self, + block_ptr: Arc, + cursor: String, + ) -> Result<(), Error> { + trace!(self.logger, "Received new block to ingest {:?}", block_ptr); + + self.chain_store + .clone() + .set_chain_head(block_ptr, cursor) + .await + .context("Updating chain head")?; + + Ok(()) + } +} + +#[async_trait] +impl BlockIngestor for SubstreamsBlockIngestor { + async fn run(self: Box) { + let mapper = Arc::new(Mapper {}); + let mut latest_cursor = self.fetch_head_cursor().await; + let mut backoff = + ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); + let package = Package::decode(SUBSTREAMS_HEAD_TRACKER_BYTES.to_vec().as_ref()).unwrap(); + + loop { + let stream = SubstreamsBlockStream::::new( + DeploymentHash::default(), + self.client.cheap_clone(), + None, + Some(latest_cursor.clone()), + mapper.cheap_clone(), + package.modules.clone(), + "map_blocks".to_string(), + vec![], + vec![], + self.logger.cheap_clone(), + self.metrics.cheap_clone(), + ); + + // Consume the stream of blocks until an error is hit + latest_cursor = self.process_blocks(latest_cursor, stream).await; + + // If we reach this point, we must wait a bit before retrying + backoff.sleep_async().await; + } + } + + fn network_name(&self) -> String { + self.chain_name.clone() + } +} diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index aa0d1b287a3..c1c3b15fbc8 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -1,10 +1,14 @@ +use crate::block_ingestor::SubstreamsBlockIngestor; use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersAdapter}; use anyhow::Error; use graph::blockchain::client::ChainClient; -use graph::blockchain::{BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter}; +use graph::blockchain::{ + BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, EmptyNodeCapabilities, + NoopRuntimeAdapter, +}; use graph::components::store::DeploymentCursorTracker; use graph::firehose::FirehoseEndpoints; -use graph::prelude::{BlockHash, LoggerFactory, MetricsRegistry}; +use graph::prelude::{BlockHash, CheapClone, LoggerFactory, MetricsRegistry}; use graph::{ blockchain::{ self, @@ -163,6 +167,32 @@ impl Blockchain for Chain { } fn block_ingestor(&self) -> anyhow::Result> { - unreachable!("Substreams rely on the block ingestor from the network they are processing") + Ok(Box::new(SubstreamsBlockIngestor::new( + self.chain_store.cheap_clone(), + self.client.cheap_clone(), + self.logger_factory.component_logger("", None), + "substreams".to_string(), + self.metrics_registry.cheap_clone(), + ))) + } +} + +impl BlockchainBuilder for BasicBlockchainBuilder { + fn build(self) -> super::Chain { + let BasicBlockchainBuilder { + logger_factory, + name: _, + chain_store, + firehose_endpoints, + metrics_registry, + } = self; + + Chain { + chain_store, + block_stream_builder: Arc::new(crate::BlockStreamBuilder::new()), + logger_factory, + client: Arc::new(ChainClient::new_firehose(firehose_endpoints)), + metrics_registry, + } } } diff --git a/chain/substreams/src/lib.rs b/chain/substreams/src/lib.rs index 60215c453cc..5daccfda9b3 100644 --- a/chain/substreams/src/lib.rs +++ b/chain/substreams/src/lib.rs @@ -4,6 +4,7 @@ mod codec; mod data_source; mod trigger; +pub mod block_ingestor; pub mod mapper; pub use block_stream::BlockStreamBuilder; diff --git a/node/src/config.rs b/node/src/config.rs index 86d637321b3..815fcdd5986 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -530,6 +530,31 @@ impl Chain { for provider in self.providers.iter_mut() { provider.validate()? } + + if !matches!(self.protocol, BlockchainKind::Substreams) { + let has_only_substreams_providers = self + .providers + .iter() + .all(|provider| matches!(provider.details, ProviderDetails::Substreams(_))); + if has_only_substreams_providers { + bail!( + "{} protocol requires an rpc or firehose endpoint defined", + self.protocol + ); + } + } + + // When using substreams protocol, only substreams endpoints are allowed + if matches!(self.protocol, BlockchainKind::Substreams) { + let has_non_substreams_providers = self + .providers + .iter() + .any(|provider| !matches!(provider.details, ProviderDetails::Substreams(_))); + if has_non_substreams_providers { + bail!("Substreams protocol only supports substreams providers"); + } + } + Ok(()) } } @@ -1313,6 +1338,47 @@ mod tests { ); } + #[test] + fn fails_if_non_substreams_provider_for_substreams_protocol() { + let mut actual = toml::from_str::( + r#" + ingestor = "block_ingestor_node" + [mainnet] + shard = "primary" + protocol = "substreams" + provider = [ + { label = "firehose", details = { type = "firehose", url = "http://127.0.0.1:8888", token = "TOKEN", features = ["filters"] }}, + ] + "#, + ) + .unwrap(); + let err = actual.validate().unwrap_err().to_string(); + + assert!(err.contains("only supports substreams providers"), "{err}"); + } + + #[test] + fn fails_if_only_substreams_provider_for_non_substreams_protocol() { + let mut actual = toml::from_str::( + r#" + ingestor = "block_ingestor_node" + [mainnet] + shard = "primary" + protocol = "ethereum" + provider = [ + { label = "firehose", details = { type = "substreams", url = "http://127.0.0.1:8888", token = "TOKEN", features = ["filters"] }}, + ] + "#, + ) + .unwrap(); + let err = actual.validate().unwrap_err().to_string(); + + assert!( + err.contains("ethereum protocol requires an rpc or firehose endpoint defined"), + "{err}" + ); + } + #[test] fn it_works_on_new_web3_provider_from_toml() { let actual = toml::from_str( diff --git a/node/src/main.rs b/node/src/main.rs index fb7a6623aa6..c01da62117c 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -7,6 +7,7 @@ use graph::blockchain::client::ChainClient; use graph::blockchain::{ BasicBlockchainBuilder, Blockchain, BlockchainBuilder, BlockchainKind, BlockchainMap, + ChainIdentifier, }; use graph::components::link_resolver::{ArweaveClient, FileSizeLimit}; use graph::components::store::BlockStore; @@ -289,7 +290,7 @@ async fn main() { create_firehose_networks(logger.clone(), &config, endpoint_metrics.cheap_clone()) }; - let substreams_networks_by_kind = if query_only { + let mut substreams_networks_by_kind = if query_only { BTreeMap::new() } else { create_substreams_networks(logger.clone(), &config, endpoint_metrics.clone()) @@ -368,6 +369,24 @@ async fn main() { .await .unwrap(); + let substreams_networks = substreams_networks_by_kind + .remove(&BlockchainKind::Substreams) + .unwrap_or_else(FirehoseNetworks::new); + + let substream_idents = substreams_networks + .networks + .keys() + .map(|name| { + ( + name.clone(), + ChainIdentifier { + net_version: name.to_string(), + genesis_block_hash: BlockHash::default(), + }, + ) + }) + .collect::>(); + // Note that both `eth_firehose_only_idents` and `ethereum_idents` contain Ethereum // networks. If the same network is configured in both RPC and Firehose, the RPC ident takes // precedence. This is necessary because Firehose endpoints currently have no `net_version`. @@ -377,6 +396,7 @@ async fn main() { network_identifiers.extend(arweave_idents); network_identifiers.extend(near_idents); network_identifiers.extend(cosmos_idents); + network_identifiers.extend(substream_idents); let network_store = store_builder.network_store(network_identifiers); @@ -442,6 +462,16 @@ async fn main() { metrics_registry.clone(), ); + let substreams_chains = networks_as_chains::( + &mut blockchain_map, + &logger, + &substreams_networks, + None, + network_store.as_ref(), + &logger_factory, + metrics_registry.clone(), + ); + let blockchain_map = Arc::new(blockchain_map); let shards: Vec<_> = config.stores.keys().cloned().collect(); @@ -480,16 +510,13 @@ async fn main() { ethereum_chains, arweave_chains, near_chains, - cosmos_chains + cosmos_chains, + substreams_chains ); ingestors.into_iter().for_each(|ingestor| { let logger = logger.clone(); - info!( - logger, - "Starting firehose block ingestor for network"; - "network_name" => &ingestor.network_name() - ); + info!(logger,"Starting block ingestor for network";"network_name" => &ingestor.network_name()); graph::spawn(ingestor.run()); }); @@ -738,7 +765,10 @@ where for (network_name, firehose_endpoints) in substreams_networks.networks.iter() { let chain_store = blockchain_map .get::(network_name.clone()) - .expect("any substreams endpoint needs an rpc or firehose chain defined") + .expect(&format!( + "{} requires an rpc or firehose endpoint defined", + network_name + )) .chain_store(); blockchain_map.insert::( diff --git a/substreams-head-tracker/Cargo.lock b/substreams-head-tracker/Cargo.lock new file mode 100755 index 00000000000..92ad0a04eef --- /dev/null +++ b/substreams-head-tracker/Cargo.lock @@ -0,0 +1,583 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" + +[[package]] +name = "bytes" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.147" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" + +[[package]] +name = "linux-raw-sys" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "pad" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "proc-macro2" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +dependencies = [ + "bytes", + "heck", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 1.0.109", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "regex" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + +[[package]] +name = "rustix" +version = "0.38.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +dependencies = [ + "bitflags 2.4.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "substreams" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af972e374502cdfc9998132f5343848d1c58f27a295dc061a89804371f408a46" +dependencies = [ + "anyhow", + "bigdecimal", + "hex", + "hex-literal", + "num-bigint", + "num-traits", + "pad", + "prost", + "prost-build", + "prost-types", + "substreams-macro", + "thiserror", +] + +[[package]] +name = "substreams-entity-change" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" +dependencies = [ + "base64", + "prost", + "prost-types", + "substreams", +] + +[[package]] +name = "substreams-head-tracker" +version = "1.0.0" +dependencies = [ + "prost", + "substreams", + "substreams-entity-change", +] + +[[package]] +name = "substreams-macro" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6521ccd011a4c3f52cd3c31fc7400733e4feba2094e0e0e6354adca25b2b3f37" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "thiserror", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.29", +] + +[[package]] +name = "unicode-ident" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "which" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +dependencies = [ + "either", + "libc", + "once_cell", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" diff --git a/substreams-head-tracker/Cargo.toml b/substreams-head-tracker/Cargo.toml new file mode 100755 index 00000000000..ec2a9cfa6df --- /dev/null +++ b/substreams-head-tracker/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "substreams-head-tracker" +version = "1.0.0" +edition = "2021" + +[lib] +name = "substreams" +crate-type = ["cdylib"] + diff --git a/substreams-head-tracker/Makefile b/substreams-head-tracker/Makefile new file mode 100755 index 00000000000..9ef9e5c3f70 --- /dev/null +++ b/substreams-head-tracker/Makefile @@ -0,0 +1,15 @@ +ENDPOINT ?= mainnet.eth.streamingfast.io:443 +START_BLOCK ?= 16000000 +STOP_BLOCK ?= +100 + +.PHONY: build +build: + cargo build --target wasm32-unknown-unknown --release + +.PHONY: run +run: build + substreams run -e $(ENDPOINT) substreams.yaml map_blocks -s $(START_BLOCK) -t $(STOP_BLOCK) + +.PHONY: pack +pack: build + substreams pack substreams.yaml diff --git a/substreams-head-tracker/rust-toolchain.toml b/substreams-head-tracker/rust-toolchain.toml new file mode 100755 index 00000000000..a09cf93404f --- /dev/null +++ b/substreams-head-tracker/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +targets = [ "wasm32-unknown-unknown" ] \ No newline at end of file diff --git a/substreams-head-tracker/src/lib.rs b/substreams-head-tracker/src/lib.rs new file mode 100644 index 00000000000..ee880963011 --- /dev/null +++ b/substreams-head-tracker/src/lib.rs @@ -0,0 +1,19 @@ +#![cfg(target_arch = "wasm32")] + +#[no_mangle] +pub extern "C" fn map_blocks(_params_ptr: *mut u8, _params_len: usize) {} + +#[no_mangle] +pub fn alloc(size: usize) -> *mut u8 { + let mut buf = Vec::with_capacity(size); + let ptr = buf.as_mut_ptr(); + + // Runtime is responsible of calling dealloc when no longer needed + std::mem::forget(buf); + ptr +} + +#[no_mangle] +pub unsafe fn dealloc(ptr: *mut u8, size: usize) { + std::mem::drop(Vec::from_raw_parts(ptr, size, size)) +} diff --git a/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg b/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg new file mode 100644 index 0000000000000000000000000000000000000000..2e44fdf53c64cb88bcc76caa927de5c3860b144b GIT binary patch literal 89935 zcmd?Sdvs-2ednor?nBa%s!DaMY?tNNvHYlqRVr86U|f!It0YxfMpaU-R5m7q#K3n1Fs+(^=vF%`RBw)ZjUbn2dDM9NKnXCr zYCZ1RxE@vSi1+#`46${2t+~WV%SM1c_hKx4UWHUkt7{wU?E@7f?~PYPXk685zIOGV zxYFEM=gEOi8^}#pY&U7uYQB2)mU!3F>eBjBeR+0nxjFyjfiBvYYQ6s9DLv~y5_ft6 zR&R=XTt~CHiHzN&8K$=XUq^AbDm(hj#Je`u7V7Jb*|p~K()?wc`Q0auT@k&s>Cky= zH)YLgy$*^iRd0ynr|Qca4Tpocrn}Z{PlUq2Ckt_JzIFAQc;ES{(czh~*{R8i@sUSo z&!3wfo9XMQUKQ`%{6dcl5ap(Ak7vhE&z_r{nH_uW`1H(lU$njL@aSk?ar@)5JLp@W%$%L zgQ<$y8o4ryyVh=%s6q96;&Og*-1=hu`sLvo3GKCd3%O*A;>FRPvdTi+HS51VuDGG> zBsThQkBeHx@S~GrvpM+?yGQ4c68+B<;H5<4+ zE~!-Qj?00+I^*IQ<0vHA=-9-}FpSfGV_b1#t?rM@#xV)srEdsdJx)T9T zRpOH5(5cg}8@;f!esN=NXui2}%uz!1+FZ6fJoN4x;!3sD z(NQnNU2&neTI}ek>Q_`PMjhAcS68)I>iE96JC3?~+|M|UN*&cwrK7tL$1!gU)ncV< zPuvriN*z&0wb&Wm8283y6-p|_p;V#Nvp1Az)>*wal!}zDzrh3HRabPAe^mfTR|=#6 zkggO+K{c!Wp(a4OZn!Z%4TgnkskdWKq5ouZ%C@GY)mUpa+KA(J(r&Ial6j_$|E#Pn zEjJdD^=7if8z-BS#b#?|h`}*B_0Ze3+Yl>gbi1Oebw{DBQdcNZaaZ@QPy&@*eS3W~ zUhR(d`&R{;?T$J_N$;SZ2v&#+G}~R>N6%3ys+RU@VuVFRt-T(LNLW-o78C$i^;l4% zV%1|o32;@g=w)i6E+%bXbam0XqD0MoUA=~3B>4MFZZar9}Ydu>;QHf^Prp=h|;`S}RMd zjdrrm>&1=L`SqpdYCEYvRbN`J&n-8!l2$g>+5N4C?Im$?XnE;LptT$8hYmkJXg_9` z7H3zR>$8ogm)h%X9w*HfFE&=JMq_<`Xy|aVv>Nkxq0ti9#%g<`C9w7NMBByka$`AZ zt~FY9j{`5)n(g+I(5SC2q}bBI(tI2z)Ah?qJu#G$`Hhv0Ww2}~Pc|;Mlf_nZB{5|v zxl?@|KRak3sNO(RU2v`9@6hb{S+zTHb|`t|Vq-PA)JQJXSJ(Ayqb;PD+OhAt4!OW> zt%0hzI)7QxMh}{oR-GVE+)g%VFqW2=la)s6f{`;$)-N?36kyEHLmH&M`Vf2uFLevS z!eRS`QSsBhadh>v1EW}T>I8^FSUrSOArHsXEz3;gydr$KrR_AWV zJK7cQEWNrM-rLS_Cu@06+y$c+-0pC^a%0>VcZUanJYKmr{{)$GoSop2v=LSAs$AcH zI+ozHZVHoTL~jYRQ1o5)iHse2&LQz=qN(|bXx z@uEo4(Mv;Wxlw=0n3}nx(bCGsN@B=u!cFr2-c7i%Ul#A(gd5y@-%G=-{`B7ixBAmp zz^x2!_ie(h3~u*<+y9)>jfHuC<);3X7o}T$O>*L;%qC)L9)Y%sG?Yw$d~=s2-qa_hw~!AKQOb+MMF)9P&k)pUxq#RdG6pyi}CaAn$3Tt$V!m znv^!Sd5`y2;}bUbe*1o?a`nK?N#p60$pHSB29wolY^)5)yvAcQWES{9Sc}#MW_}1P z-0HL^;PKF|z4<3#@(=?Fn6T4Nl+Ioi;5SzhlM6fug$s_-6x^D+YO?kvB^~Bh7<%380A#1 z8=lZ;D%TB9=w^y;GB8y6-pc+Q7%|NERxYO8^>%{{GZ#wX1c0zHSWnC0eq%w*XCz1;S4;PZ%VcEU45IX39s{AJ)57=^!)D4 zz4GYcu6>)TiJT8(667(_rt$DztsE(e5v*BywDf)16o_{pg%<W)s!wgfp)LEN}8*5RXTYrdOU~%`{etr; zbk{<$dSe`CS|85GA|9;V6FCjnHs(;_W^>hIs+DVf?!{=TG*%DBdoDHVPv)!kSk>Kn zoU1qz_pUVBZSZlPvy%B3lWn8VXyr!U<6FtQRvnJJ8><^D4no-=Y^UECHMTvyqvpxgUhw{ntmVU194oNBe*u3;!w zug1A|d3C-1^nqS;WBSs-$1CxkEtv1fV)w=6MUB$C<6j~TT${~AbTjc10py4{9QR-Y zt~VAOM~Ii@iQh_xErY4WuF-aua!mPmWvSQ3ds2RwZPiy)6AjpicDYknC%*_qWQRb}fKyRG3U?W*@?tQI`BE(7dN_h<9%$k9?Nx^?GaFe3s>! zD^&a9BC4r*PRlj@tp1~Hz(rdcw{JstGvuw~?!PDAwP^q^wd;X5##ikKy87C9|Hdle zmqrW!pE%a7{saGy6Cm4p?tH$sL$J1Q6A1V4NO#v4I{W@Hf6>5iR^t6T37#F7FUudH zIM--d0IFQ`C4_7l7fNH$A?gEry4&{KJP8xv@BsttwpD z#LXl<%Qd&q#w~0{_fNE&tARBIp-T^}`+G9xQoO3?#0@(SZ!1q}t906gpZfapfxQ6e zs_k}j_tweiOJy^;y|H1es1QKcM|{# z4bHmmQ6zO2xifv;Da^`;<%7G3O0yBIrn{%qgwDmIh#IDJ}ZQDe`R zM_}5QLYa0@_HKGWjq1QiTsAg@-FHt6pBkH(ot&B(pFB4_0qxRfwXyF#KVBOfg?_nb zQ)9&F2%-(l$NP5@s5>s994{ymxDziJtgXB-@VO}7w}YOtVTLm;*yS$+LB;qUgxSMG?xmd=L+a+3_Lr855tn}9aWt((Ao@!oBK zUa~NLQz1TZCF1Nz>P#eRIx~@YU$*8Q1#jMw1U3m<$vE(VLVWekD&>wiygII|V2L+_ zTD5)eI%}`D(7Wba<#&6B>rLBm_1J8~^sP~R)sDb-M3hLl#R@k?!tIF(uPxDf;ewr! zMeDJHf>P_Fv^Q=;)FHDK&+46V-~93tF|^q<w#U7mw&WDtP-F*MIHSs?k@V=hINY_w+Qm+S5J zsuD-EO?20)dPhvP(HaSFs;`LmSqowS>q~3PjaeCn?E`Thv%RWu)>jZlZPdaWtl5Re zs-g~!*~a&8Fz#7`4YL>P?TZKY3&5%9K%sW6DxUH6#;n@#>cWFms-BFmHYm^#63xzE zY|KA7yRp9bssq>M`!@`s3w1_Rw)_tKF$P2ON=Vxuq*ru8G;c-oJKnZF{>T`F2&RY;Aa( zS>~;V{HF^CuE|%If$m>ahhYEtSv;%+m}cuOezXtVWR*+ntqrV*`S~$>Ic)E$N8-KB zxhLiwN6rFfap~y;x6+`ywns0FAE)eb^)Rs87wfGxQ-0b^M&rP3uBv-39mpPQMYJz1 zEv^TM2d!F}xDwJISV>*V#+ zbF~Mpw#bgiu$NgY@N8|&T}~P6sKZmw0&Bf&%K5;_xHr%DO3*OQMsOo8zH+wx z4Ew6p&Fm|IpEk_1FWNO3KJYIYX_hk9RqNt`WupBLF_WNXsjtj`%Y7C8Tj}fMzg>OZ z{I|O=9{7n;+-oL|CXS%845E2iA3lNXmK~%VUJzeMOy*+>S)L5-A+Pw5&7Lr$c^P2q2 zH^f}m1OG=M?#XH9GF-_vXkMDFFE1hU9V^B5Xjo5P>eRh#;x320TV4b2FT{PR3LX~J zWS1U+Yd4{0C-;qyj-8#FoEbZ3`KzxvH~Gl9T3-&lReSl?#K2VC zw-qTR*6x57D|=tO=iDT_DqHK=>CgX0XAYp0IcEnvB_EmX%e}Q8 z_smtne^tM5p`~sEAZOX^P9Iyv{)ghOK!+-u{VFs;`7*a{gwENe2O^R*$fY)!S0R=X zB@ynho~3r`LLK-3D$nLH2G&m(ncD}+bw|DFd7-&XC}pVGx{w8GrC_yl;o7yizb+J> zkBVoePJKA)KVxCtA%jHFcBs}^Ty9uC;Y0uVccZwoT0YqEp3Xx2;XRg`e6XW>;x~Jh zFtgsAZ!RaNHWn8ft#)!WaexlC6B#GuyI|_iUsPPj5_#+EaWc|eyWCp3aB)33ap#?{ z@(q&l)%l?$879Nd-nGdQHgCkjP@G&`UtepVJO=UzVkH^212rIw8zD#>1#cm7jO@Z% zV}V4&*3#UDv{%a>ROTP)H*Ny#NiM;d#IQ@`a4YRm|KhxemHCIra%+sdF(ouA-*O$! zN^n*y`;A;>B|uvGWm8HC3{D>N>H;FnGBH~X;561xf|CA@Y(cD@G#3L#ie!?lNm3f2 zkvfA6Ir8I;@^2|CNsA|SyS z7dGZy>ZsvC#$`a{AB0D&c@UBdO)@``8@4VrCC8wDlQ*pd!wh8|e@;efn;>4sNq%nA zL7i(baY{}Fu!O4!#;6{f9!$p1jZB;$ zC6F#`S zRI9O?u+I-OIW-2B!vulU6E_Z29*68fp{P|F-VT_)Y!;4zn}-I8yG!0=z&fX z{-72wlhNU`1Z<|0LoY@ZCT8S(ZS1VDh7{B1PfgE^&zzqbOU_JAjvBG2$7&CckBm*< zmrP8G{HK%i(_Kzd?-HgpQB`W19{szNrLR^dUN$3`5g?wo*a6OJxU(NEmJ>?@H$o3%0!+w z*&{efvb=o*!h5rH0daPj>eKbrCs@&EE;d%`m+I?-$wQ6B#bmTmUv+7~CTZHX2$31y zH)J`Qww~pNW}V9$giw&^Ab@Je@+8_9n;VE)1er^8N>m$IEC6)JrG@Y4V9RbEr&6T8dfCfFrMbvsr5){1W9Y(AT8HDOmVB;bGfS8| zt@ZX{#^nwV9qu^b4-ehZF&YjJ>8CqUG}tj{56KnNPxtU>$8meei6i}V4~IJLv4^*j zY0gjg@L0#q_VCtfQ9s?oJ3DT&hc{J=`sp6t-En>VLmXY|t`_;(zwT+9rH;9c2SPQy zlN|Ai+(e8*u{j*V+QRYBdd1}EoY)~w>dP14hxLmqNag10!S&=)v-M=MupwuCGS_Ub zqX>|>O9_%Z;SNl_s^esV?ClvQ6_M_A7+6l{k6JceE=03!Ztxu1mcjlk=wb>t|FL5L z(zkUSqAgai!$L~fdsUY^G-HQF?u`!FDHC@f>)!M%mGW}$?MV-7(d^!vZV5F>#Jl(Q zgYhwUmg@eF*A@D2fsq|+Dg|G-FOyM`XFHF9(i-pYx-Pz#1VPI(eRXu*z@VXP`{W=b zg(=z1#EdQIl~YQH9W~sMF`ANJ{%VgeCDKD*?azKuqLWvn3cHh2%1L}Rhg^a5Uj=>w{d& zQ_;=+2NT&`w!7PU^ov34!&1{Gt;cC^5xt#Cd(+vZQ)zEX={!|+CzT2ujG}Z?5`VAt zMqxBM)IW-1LGFa>Ppse)1G7#&4qn+w^97yWR+nm*qEZBv(G(RO!y5HRp$eSVptN_u z6IVITqqiN5-(aZF>`Zh!x$9d{kyrAiA%e`p5$;u`qo%M+vaT#$aA$dtx67A@lF_Dg zKIuj&&L!(SOr6evok?-lnSe8U6OXgbfStMJw)mBXGZi0-2K(7&(Dv7xYe&s5vq`#| zg1B|MbX@GAltwx(_E3*IWmbrk=l{@k2Yr8K`g`ce9d_=kpq%s9L`VIr3P4_y0@2~I z*Z5UOMS#5KCbvE*0pvAz40<3O9-D}UJdjR+Or$_M0W#5p9dE}5b(Cyk;BY7bWMc5B z2ZA^~n*!+q$k`M~7eLPTxO0|;s0$!xlK`X(AZJq`$j`~hiH1Vd4UowcNH;(xIc?@J z=?2K;HBO{Z6CjhEFZzpuO+rj!b9Ai#zZMXJMrgeELA0@e%Hh1j zDcLUY(W4yxkq+IiFE-fQT8$N1FShR3$=UK)e(EMD$b{JJ%u8#7w<{?2;S3u)=sLUJ zqTh8T=d?|cLV1J>`?0KJASaxf8D07 z!0R!I?0q^2CstqIb&WBhW%@ss9)pxnf6Vvna{eFlJ-dAW$GlK?x&DtK)ZJN1ka|3F zv?8M)Adja&l=1(#=VX`d|9Ennq3@1|JkCif2L!2I_sfB!ApohTK(uJi|Ao>G1HH0Fmp9t%x6F=9wRgI} zg;F?`Lm3vhK%Rg49$**tg;PAdr{fD8ya>Q~eX(>Mz{j564qVf7F$Y}R>ct#zZLAmf zg+n?WcC7d!GaZh&GEGmEj-|M0qMpcs(^NgNYan|MxF>Eol9j>r309JubNXIR=Uftf zIh`&^^ku&sISve%<$fmuDFI|TopUO#M#ub2N}{i(KqS#uQ^t`*Urhp^N}{i(m>|*D zqQDN4=xZquN%XZ86G`;7ekb3fs1qP-x84?iSaMWMRoaN2F82RaA+d}uiG|?0O4jQa z5|>`u9>VqG99sFOCN8zFqV<~FLE_7VBr>?gQ6TRXPB0xVAWFRBq%Hr3U+6i&z7itX z97PGzx3yef^I`^E8OTO_jprIu2A{eDOFDJ$sdSp1!uM3lHBRMwDrHH^(B-L=CGGs` zr7N%`FCq7(9B{GZr5tc!dMRZ|r|n%zg%8{48j!C!BQGHhTG5 zrxj2W3@@V<{D{q5#6J4Q=(hfiY+~H*B^4(D3jj8z6_Wee}oo<-gzTY@-E2~ISr~E@5Zz;Iz2qe~i zsOyG!5_Z%PpVAK(qLcl@&bE;$f(50ELBggE6JdHa8*Q0NSfR}etK=gkI`uY`OgFsG z5uVuvc>!25h-r~4Dt@?tIP`B}{~s>gbXO>H{^*Abue>*Y9rq-l#FpM%h_3BFn~YeZ zTH7ouGotZn;_1Z3cZ!BFcFOFgznj*OvUnaZ3EN_sfxfxGVKl>|jIdQv=MF)-+`)Ob zHy5rt5Z?v0O4Z6+3mt!2DD>a9X`{?^5vH=9E~gk07crHprMDKkbb_wLUq`j_wnFrw zLeV;)%Gk{5c;`3t|gl#bp7_Hio0Yh53=EN4On_&~r8z-4ODw{9s`IcNa=JZ|~-k?Mx5)vIn$ycVVB7?y z*@8!_yIOj`IQuhL3iS&|7aGeXFiN{k1DaO;;cB&M!*i09` zYbbfJd5MTlYtXsz?jkNAFGd;>z@<(k277@kBd`<^qTx~tbFr8uaKSoO#+Q#TsYe|0 zET;9uz4Y5DH$7~uKDo-x4qk%icoR{pL1xq04H@2F*t;)2>ZuS{OV1TbSL4N)XsljX zzqm6UQo>_PsD3tgF#w+{?7J$y#RKfAmYx?`_p#(IX6&zKyJ^7#ogQ|)y_*(4SJ*EN(zV!AE&Y7H#U5JlAZxLQ7C)~R2YrjaYUu-vW8fMI znD!>Q0o2}V>4Pr`)LuYQGXu33P#<)(Lb|fXE5A^PKAH-B z3D;jJboS!c(lAkZs-=IyF#G$jDD;E)Gj&Oo!(!zDo&AdfCwCpu&~d7zk7Pun>>gw- zxCx3HxfYavG2cQUc#yS_i2X&i(4JWmzQ0t!^wTCu3F%4E?d%khSie+2`|xjp^-Be; zKVL*R|55=B%t7Mi#|qrz<6hFGiUAVJ97S9~{;gH!Dj#12(#J4bJtQ#vp#bNnKzd{?@hu})vbGkTr5&dVrRLi=RTGg}v*^!J&hLQVeOlxUsx^`KYqwEm;x@P134$X{k-z zdE|~ej@tb}ZDPT!%s6T3zDpyN88G;e36i)Vw7N|WEe)Yqy6aY~cRQ=8On8b@B!p3mTJd@BvG!hm(~#S!Fg6mD+N2-45C{Zfo=qY3?MIHWd7DbVd6U<%`^4%JJXqcN}&O(?sTegkrhHU z+RbIfjQv2I5l3PQ8akU(%C9NzSjR#j;9fR-(giAo;zp74mOZ;kN;$ z+7F1~2kguV9xbDl%b23VrhfeS!^FDL8#-wxhol}5%BZh7(kR|_seYL$G*EVL$`(K) zX?=%QVxDAo!^*4gPmZ5>mGPqon$inl;nkp1Q~jcfbe_cX-R~-L<9f&8TO;MND)hcq!@c`2bp7w0;uHw2WwKhO~VTOoO9 znwWAXA++k{nH@5#(TM7$rUED1B4kAH%{3d#XVFSHZ6k_+`#^eP6s9m2vX-eyC4;Y( zqcPbl$YDJx)alea36P<@v{_nLB2hzWC@$5)_|-u1!te(_$c*x&n|9D3?Ut{31i|tM z;lY$}cnLK4K)5al_XpYBOAY>mOb*~Epm?LA7m@>Lz<bn+kO*Q0=&T zy~*xDlpULur%e=e2QP64&a@_&3*9^D6nRaehvJJ?|ez8Ha z6P%jbdteOUk74t^0TB4P$oI?$A1_MfgP>#Qo3k^lc`G1)KX&0`C!?-^}ryi*` zVq7^^em%Qn07W8bv3%K~wBQ0`7yR)u?2pUI@YMKDzye9_7Ckt+zDSZ2@$2Pe0DEQG zk5nFjO}o6!2f}pgtOX~|yo?>)vPa~FR0m=y^NGv3BlV=cB#u~Zt{z3OtKLGo&pNpD zkGK^BFh}XErN)_7l-Xa(+FDRLoELg&S4z@c;_|;mTxNEAk}Xn`43~(l6Z;60U^@lk z9?@=6&*w!@6`jMF18Z(|;V2nd7Oyuzx%`SDBMZyD&7|0MeV3Sa$>D8d+B&(6vfU)s zEZ>~+`z~D6z+firJ}r(8VnNDoM4i||2g8(#XcnY%F zUB3{-Hbt7g5Ns5Rbp1lGQOYtFz8b_fB_L4@kO)#y^kSR0R@sYfia`3R#J0k0m1^ZL z3myMUROr7pKL|H}PPKMELun9ySs>LyBdKU4e-%bTiT)|N{wXq&zX~IvNYlRxBcVwD ze-%bj(MbNLK+r^*2_+y=43IREzbW+G65d+nTMvgKkp8AX;KbpCwe`&c!D9zWW(CCn zN!Hdk3xtpTTOfV2K>XMjf%MG+0pwqlf?45iwaS~K=q*vP|NYSpp^u<&+7_RWez*ra zIM!np1S=o$l2?$mNOlZy7{6x`4!JWGYi8%M1;Bb!nkdq5-CZcUCBcdeXBUofQ5)-$ zw%SfjYuw<_bEtUq%oN zdcG+l_`Lk7dE43`1vk?hrBJ9zoM+cMJ+K%hMk#v3 zuVU9}KNERFg(8?f6CtaPBda>Am46m>ya!ozOI}tvMLSs0e(QlTC4Xf!{IjUbDZ8Cw z%6CTIM??usC^}3ig2_7rCKPG<&dB?SD1ym5#T=3~oswGbioB1A5|AhcNMf~jMc#7Z ztyT6uB8otISLA&}o$_Km8<96B>QDj_#Q;fC>)EKs+3>st(zDU^PQ+0J(z6k{bGJgB zu4?6{qKODbZ4E$8o5OC`vO5I z()s%WLAr$5Pe;TpH4I8Xq8K1ajQ@1hlZ3Zc`R0S62&A8mh+T@bT_VVH5wU9rNd%!7 zAc-K)MMSUtTOd6b5x(|CAUzimzrGWKbXO}s6LtJ=5M*EzK{&F&-5buL-Ao+WH$M{* zZWJQjBF@hS;!pw+iVhKq5a(wDaVXOCX9IC4g2>N`IM2#D?G|-D7-60M7~VVGzAs09 z;?pqAk!0Q-GnkRwX5jDWQ9+r-oq`4}gLbRXPn|62Ms#6X7a65!;bz|i50p2SL@^L! zRA3fHCC>d|HY#!M2O|;%ePu@V!3dk%(T9opP!#D>XF<|LQ4EmeVERx7Nfh`{#03z( zGLSwLB}zLGc_{rt6y4@uN|sR!kR;1~AtLPUno5@aLUgU;C7N2>TW*cV3=&O05=Hm) z-;;2hkPQp{lnswlBo?Ga*+)$zmo{9-5uSV6aA~vaL_!KUX_3KvB=Y{uZWB@;iTVNx z5>g+DK*6`g74ebiwpWHC&XtcucfP_S2&9khgrM!$ie_&?&_CabpxkG`2?)x4_R$PM zshuCq5S07uqZxu_5d3I{U>O8IDg=LVH>)PDR{lfO@$ZU-6X%lq|KWecg-s7{$0z>% zu4GKct9QEP^UWzvTLfx zLa%8Kf$_@bd=JeTb2WdliOWjv??pR5*FG6@NKX04&NdBvYaYUoN+YEz)n(-1;6hrd z<=oclebz0VbmR0RT=@ojM7W@3EZcaRLwxW9?rnXqz>et+8X=vTj%tecwVk};PW}XC z{USaIyao5_Ph4SM0~dMFW%m$0|1AVP^0v5}9i}1iGi#s?i&VCH3~-2Canw-B^2*yp zLyY`3e3xpj)S}C&q*hOIMZ1p~;8aJruOr-XdmP0QS#}O2PDviUu-u%hFCR^35Z}-R zjHTA)-0>S5JO=`tnUu|Tm{lBH3fG%^SZ5)1^}ijIdxDcK&Npj^R0L;oP7m?Z934s4 zmNz)NbJ&KmsmZ0r97n$uFv4pRK4Ed#r~J0$JzvG~m{_*2k^Sj4&NZ$k7n_&tvja+9 zSKujp9u;eHUHbohQqLzciS8_yXSc$NH(Q-M>A>l`3pWB?2; zH@)(gZ>)!F-@UltD?dlQG;}#uTJ9rmMFa`Qw=L{sPm_civ7H@{baJ-wbX|!NS{%vW z^)M`FEZgpg>@A+z1jPLJ*tzQ)ErLLn)@@sXlOJSc)DaeQxD!n=P95}eFKw(De#ih0 zP3km~gq>6_J9y>#NO{DW7;+K)hlo3~E%!W@*7zq;bW8t#DUfi+UZTN1#FQgs?Aj|y zDeylqFByoE-85^*oJuZ%wOClTJ37vyb;r5k=WitgzDY{dC#+$1Em+wig&4(CpF%n4Q2 zNNL2&lMVqWdgR@ya>FA0T~#Ax@Iboo;MN8#K^*9rPJQSwo03@yQn)Y4<05O+>E@QCLO)^!eS%D3*S zV>)h2P^q=wH3G?40T9lBrSm1dADV~xlHUK>Y`&!Te>SQ*8;tp3zCIg~PyX{on=cgq zFGbOP{m&M)*4#B^VFc>m&5@3Ld+ug>Yz6|evjW=Egyl}()yF}w1Q z4$=fK4S7>r*HcLB!in$0Cnmpxrm0psL-n2bF3)!I--pvgSLQoGOea|;850#ISti++ zf{_rLGvP~7pK~TqWRkxW-R>L_6fqLM6dgY4mu!TQ@TKVPd*j2#0o47|C>rB*#*8Ao zUN9Rr2t`)@{Nr7Ozw#nRQauO$|dII_oZU zL;ncpJsr!ST!}5DD^n|FFhdA2ZYgw5m2=Eh=VG=s_L#Ku&3!8^H$Oj$Wv`;rVg8`S z5WF_UIn_9=Zw@EnxpQZh(m9kl457mu%4hfWYz~!Z^7W`MNDG<6UylwuXQMf$C=TB5 zrxY(9#e2Rdo)9A`_xNw3=)dD=|DNPbx=e!L^D#8Bm+qC!ADKm@*vz$g9`Llx<-dtK z_r;@lp#iSV_#07a_rTpLa9a7(q}E*8yJB0+4px-i$`G__m)>Sj7$9J}%~Na-kbvKa zdUwTVd_Pq97OSnl;tqTc9J5s7?4FUyHFh*&VIc+7%O$tLgsf;mmgEJJOIWlAboH%> zEZ5K#<-d(cCcGyH_QA(E;ZIp$H3nJ31I-i5w2!&SZ%k4&Rn6`K(*TfO<{+V{B<}!a zD=s;Wz+LkqeK*}7!pex09t4rY^eH{)Ek*Bjk99VbqWAMrWcuDxyy@onKfow(0`v8* zBK!TnE+x}0Y1w9hNW9;Q+uBgGnL!6?XcyN#Jsb54jR%rsz!vus2@j!(%RiKBBfUyQ zq7waz^@!q)!=;tfrbdVpHj@vR z*(ElZ$xfgM-oj?^DbJRr;_rPh zbz2t@;YjfG2+-Jd=!+B)Mq!E^5qoq>;%`byFtu{A{uIIr1l&sZh-=qE%?%{+LNQ#< z0!5t}dS#Nb2jTxAnSz0$Dxp-#zL2!CtT*UBiBrXB>l`W024`HWuGc)pTu!^3Gjdog z$=l|l_V|{7s63pMC6(?4TLqR>y4Smk-p2~h!IJMPrrL?pz1~$!wUbUH%DalGcG77a zibQdei%Mq$TMb`-_XkFulQo%N%D$h(Vu&iBj(4)pWx;-Mp6>U9~YJH96r>F3?W zSKjCQA$RIo)`fqm+$oBoAI>{bOvj_x!n4_UlsoloHlA+!c{UqQH~ls16iU(c9JdR@ec~9|3K$)wrD8Axk{56Fs;)uNc z_ZOoR{hudHArlPMP6}m?5g*{b7xTervw;J&I>@!fa*~z04lALgTSM*L4WzH?2QJR} zY|+!x=umme6mKJ7NO_+GkWTj!uyMTE)x|BLC^OWZ*U1A6rtz)i^=f-3VOc3 zc*mVw1tM~Ce&P8d!HDlq+-=Rys+H@ERm^(lT4%#eM0BROw|0D%fG5BAs+tq9VJT%0 zg4Gn(JgajVJrvzrBqfPC9a=9#1A{QMUWWF3HZ&clc)sYv7+}lLo-cBdfg?*VLwmk> zy9;CRmZ3dgJpPLKCmgGA(&7Wf=y3m=a&8W;ns%PBgd8oQ#He%?wfDd>9LnavGfPOu zZmvn*PD%%)ucaD0xqVKgkWdB`imaDa>2P;3T#yRG-NkS}kZp203-W=YJ|JQqi(L%& z16qOOHdmYj`EZeYFYZgup?zDTwUKihk?#xLnnq_+=NvmJ8oNOQSOJaQpz+}h4IMD~ zaIxybW^uF|G(KG9$_q&$G6jQqV5)>~tX##A9^Rf2jl{tJex_iK!96oc? z=EeKnG$TR%F=b01L1lg&TwYoPf>mo_2idY$>?Z9c5O5FlgeGYbdVVycr;guzw3zyq z_CU{%7H_`A(%j>Sqc^{-XVHW%E!2`c7RA{A8+cki}*#B4@-5xRvzbRM=`066xUgGI$0M2EUiVe>@w! zPAGl6=u?^w}J_tyZylpe}qEs*+t8IgM zLRfmZGe}>Y5LF1lU;-hk5aJUVA*vAK6UAzYI!5IQA|6+3wS@6(|y2vKgQ~B7yS|OINA^9zg_f4#3_RLZx=Zt{!a=dA4xO&og!Hv zZ%8A=F8(4R`gc1c{`Vh+X~a5E{yRl~pggt%=C%j)p^#^PJa{lvD}VA;^Oc&lsbed*>)38R7s3=rr85%0nd4qwZriaf zPt)!FBLTW;p~QKxavmj$=8xox{nL$09m^_pE_t#uILCfcp;8>TI)#9-v&cP|`dH1N zofcVaU^gaBvg5@GZ zK+oKe?fY^Bl|`tG3ags+qj3(S31m;C{hC!O?ZTxgq2$YbWj4Pb2e9oQ&Ha~ZpPcd`F5BK9Kh}t&liS-|#)-aF&K%*KwWS}7a7CAycAd+S zJukj7^;~%$nnBD5?whqW0cRs1VC}TXqW^r*v3bin_KzzPPs+)uhMRe@X z7rk$kB0Bcxi}*(Wn+=xEo%E@GW)ecACy=TQ!jP$f*CFoz3-6YFvy&O8Dux-TKUJk2XYMw;Vz zJ=$D>ejjla`7m%u`o)CTf*T_pyBOewJdzx$^R!SziqWP;==jBqj`E9svDkMkRECaU zES_*zb3iEQ_{HMgW8p1y{9^He(fBKlj)I2tQWh_Of?Wq{u;ax2=cybt`{!G(Xem`6vxbozxIB zbZiF!g4~Jt>y2e4N=_qVvL`N40YO|}`<~Y{F3%vt29t+%8Jd2+TF(jZ>Lxf2Ya&}` zBt4|G$e$PTl9J_SlS6GfY;1n!Os=1;ubo!wF+FJ4klO^6 z+8f31moG3|AtbDtMvd=g6-YYt#N7B`x_2-Ice3`ai_#dh#u%f7d$^Ad4POMug7p`v z>bP24fb6V88i4WGx-gpM%>dNyR#mhgU%p_wX%q`(qHonsGARAj#fqE&Y~E zw>kej2StY6akQZJFh>g`!U4#2?n?HMq|7V|}bSy2~4 z^+?V=Q~K^*2n9d~G0=+RQFP$7ecpo1GbOL>Q)JxFl>CKI+-q3+i4w~G%Wxr7kM#GS zD0%&z5_nK_`a4C4@)ITWcd6mM)ylg{9pCh7xLkbiDs}CTe@3CUUa8^lEn!r@GqPo3 zAH52^CjN6VHt{}gM7NFm<%!{ap}rU#F2Te`T6Nm zX;1&2)K9^O8Wb{7(AFx@J2i3-nEanE?b>ZA5~@Lk=Sn_+Y-RVLD+QsOR-V%e)7>np zLHU0z`P-bVObZ^kz_2~A8U%*_#BmYtKUj*c>;Jcoi)8FGA%41K;-b#`tRUM{)4ha~ zWkdud-Hk)cgX$_04RjV#YD{foEf;K%O~|>ZI&-g`p3VYv*&g}~LGVZEenM>wW@VW9 zsF)-iqS&jm0{EdLZS_`*T?;wyuL>ipL-_bKs^zVBBF&Lt$}IumswdPtU4s`wUJ7^_ z4se+clUo$cIYXT)?8A4CXx1Ybd)=+q9mvR-&kke+Y0X;WpED|Mg5%2H88mK2@Y?-VW3+8*UeRIh}f3|#o(y`b>bT9hpb{& zfz;fYTqc)vo^S>jPrZE_Cdoz3ZAL{8RyvQBzbIp!5DuT(8N#7*acEq8klM-cCQ=pV z3{q7GxqhYOGZ1(SWB*Etn}D4dRWjPIlsN3^i%8X9Dc#`G!l;Z?{gu+8<6f$AmDR77 zqTfiRsuszwmO6LYrCGi1Dyv^(XYaTBw~YA%ku;7}y1IL;sOhqAtR*Yz*GgpH95z{L z_gMY9KwWDd`P|evo(riwYAZCTn*o#H{q;OB%AeQ~m`~KtVJ7-eHv^`%{s{s5kG8dl zV4o|IL;I=SAxFc^N4JQWC2xZ8amc0R%OqszLy+!V0rJ4sFth2E+oK|jU->AlIu;-o zfSA^fQ~2aMmsc7TVsY0h($_MFU2i62rP%5gAYTu!nKB^ZIUmH5m5|}5gKJ(9c0OK9 zeeiM_l!)tkzTX7Z%h&mJml2=AvkW&$CE0T=9UC%_2-(zu<9TDluiM!ljdqJgqr{9w zJ{2H#_m&M%-Tq@9Ol5q)>2gKs+nYveiGa;Fl}P5gAeb#wb!*a;9z&uZCwBbaVRPrD ztr<$~jSF0nYw6@nfncQ`DU4gbF(TDziEz4`BuNjUun8Xv*Bc>Zo$>>F$F2smH5?LR zxB%(NCmRiS|CNpzU(`7vIKt<``}@ctMk-8Ds8Vuzq(dqw*ML~Ep<&Di>PQwr>Ni@8 zWVB0S$)y+!M3-fP8LGgr&%L&*Sj>?MIW5aakSswmNltHoKe9>rE6_wYKpfTe?CJID z;=Rxr;}xqpd=R;2Jf*X6pSTkC@ELibGeNUmT;~8>+%M#C`woqr=QmpJJeln|BupqF zb2C6O{nF|N%L>Z*J2}u9!byC`mQXS8hfN9Wt<~+ewSCe%fJ?n>+B27M^l2-DA2%J) zK+MBHOL$H-B=N|2d)%#CWmuB+?z1{Jt!}wQbmzT}R5<6oT{hzFs`%{=e6Uv22~!*yIQ;V^Q7qAJ^z z`T8Xc%b!^{*rD1j7-JhOYVvx4U=(o2&82s?utXpxWqb z?kvt7s%@4{L7su1Ic->CL98>Ia=Rn)9mTcKhfR?>ap#HSgO0=eYG?ecgZXt*>sc1+ zT(Z?j?{cE;aI61%O`iIudWggH=|hvVExuomEQ7(MMw%2yAz50L7eZ@S8P*P(E?FX= zmfLnX#Y3;@isExzqmzf`j-5Du{N6iXdHncYhY8=fgd7RvCBEfnp7d2W7H63-Os9It zd&0(=7BTZbm+9m3b$qVmZ#L{T@5$#%+-&IEqPBdlbnTH)L~Z$8iQ7wlQEus8DMVi= zkskEV3uI$3V|F8UnzO#JVl83m6CVPQ-p|LDdC)d!1xJ``h^nTo;hwF zYxjA;m-x*aH%Cem20}nhNy0B=s3}SKg^bsgB>Y0@8s|9djVhq_LaEa_>y@MKuY;qG z5_nK_jyj6y?0+ps-SxOadvUIQL$1(0tTTN}W+8dP^lo#i_A0vajo=5R1f+ZsNLG}~ zZ&$VQx229Zm0jdrX>NZjm!DjuyW~;&_IHu;0EF^Dbd;wE<-Z*$Pm$ig9Vkx`%6~gh zUio-GQhs?{q`O4XlpJ{A!ie_{hKPzzic*SYU90^mYlgm$%N&T7z9vJ!n2b+u~Yl~jzSL*kS-hn zDMFREmc2ioBIvxe+~;n5p$GxrTISZ){~|fLTV~)hW#r_4%;coq)$_86N$E}Q=pDNQ zhE&J{dy$;nd`j+1l9CqAPidrJFyjm~k{9fmj7GAopDBAG2~D8UGi5I%DI$rUDSIJF z5!v%h86m0o!)_McJIdc7{;*r??j2FZYQDKUU_DLoOR69O-#k8PIp~a&w>W2G%>lDm%x;ftWj|Eg`|X6_evA zukPXxJ_{oEmcIwXrL#K45kH;zPIM72r?8Y1u-Ia2Q;Zy&mRnltYt7~61$|yecJ0L` zw*z=XTd{wC)d>3_H)pgP+zpPRZBhP&pd9~G+{Ito>3mh}G%I-N*-+=~NRp~?SB3i$ zxqK!rwl}4GTr#nQk^eHgUu8@!tjpXw)};tPek`mqiZH^*!YZQ(>wc{4k8)6Cm3^%2 zk8)6Cm3^$tQ4X!LKCQA}{VuLD0D>wI1FBkOzZzB>MS$|!>BuZC6D$EZI3GOe;c zt+J1YRYnOsD7sZfkyZBbu*&vUEB~t8@yC9ZDVzIWmAkHr|D#sfUahiEX_b+?HZGqe zhLU&by{wDO_3?7AG4Cb(|KAd0Ug29N*6}SxnDJ9#iBW_LJ{6W2MfmnpVTn;>iG3<8 zF^Vj)Picw$xs-#wijMzIIr{hIVqP?RrqSINN8Q2oqvni1gv>GTEFQoIXJ)44o+3er z&m*|WU*L=bvX4lw-SM^RTxbuty7qBpJ6yWO>6i1co*A1_X26_DH}!;@&+Y?3j>#uA z#W%y#_h9F{3zd+b?uM-`VL84SKI7#TBZgeRQ|`RRLh%%}YJRs|y53^-R;B_EuE`#- z0)My6<&n1%kWvjQ{9d_qU|_%bFYUIw6c?8zV6|K|_bn}Z0G!_|Uwut{kB39~-<3=I z1`a0KSo}SL_~k68*X|c_bHGJ;%196-VL(gw86SIKpzPCE5(Gr&K2wfw-)GAHhD+W8?K9=;UH$|`80a%)e;))z80h!Q6?a69A`JBV<-NC>jNU74{13m2 zG8%xO3dDe_wDCU-GMcxb`iDUqrwFQl7-Y1Ly8O}0B%}9AM*mTe(Uic0qLa}SA;TX@ zM!&s?Z=hPOe74;2m*qnLxA+=;>eWqS=i0Hk`rgI$%}4IeSETV9{JHKUdfv+@?;?=} z;^kC?$$eY%B(68x$vjR&9a#3cX(3(u@1=KWq_!Rt_j4*Gny%pKbQ zR(Uj4-7@`!a`fL2c>mI6(%2CjFEBRUub0IN?FvN~Y=&q)`ASa6cH@G-R6v2Bh35Lv zaK#Rv1VV8`Xem(t(Bm(qodJzBAEsJkl359S6LyAP`$@@)t1i}Z`-T}~+413-MSy#I!*32XX` zWv(ezs+4L_;Y;PxEolV_Gah6x<*E3R7Nf+YwXh}}PfQ;G^Gjtezf$gGRakzx9JzZ@ zDFKh7lQ@y>hBCx((-tWQ-6oK^RGD#BliKwcO`}1;iP@jlmb>xH11OxKy z>A8u^CY1BSa%CTLNkB{~uB#^-`f4U;yVwfc9jb(j32C)`=*B`pEX*OULv)8!bYYz1 zTNbZRS|tQQI6KUb$$iiZLldF?yzKKoz}=Adr(!8Gv_CK3rVrSPNQkr-%8@(7P6;AW z43L!G{z3*xWO$)mb#VtO1L=h_SCKeKl)hT_w=GvA1$QV0NJ@?UYT0M{@fJv5E$_X> zQ(vjEUoCUnvV%m^ua*66^Hob6r5GS7H1f3!Cxu47R`zF9sSKpAWh_A5zbHrUKyWou ze1Kwrr1-#JWH>24@E7I1fnRjd%3p{<9Zs;z*USFuM=LS=lusO|4R&3~NSg*e4B=|`6QC)4e;q@9v= zS4MEZ*J%#TWRld3IKG6GyZ;Vl`tk4PjkDOTxevI*>WfV$g>~BSZ{-j8k-ae2KCQqH zRk)k^Ykh2x6U`7C3#>2My0%2LS`5A!B5!hn&P9xMyoMlXn-JteTW(-o{FmNmVyCAp z)57Ay$lyFdt%%M_K9!{jxFDWB9|D~5(J=+>b+ShHU_Q<1ZQ|R_dA@xWvTxH=U9Y0H z$+je!O|aZ|;w>=bFI*(u4#f@|+a2v9hDzQ-dLSW$f43bo6IuObR-8a{M;wxbLjw~H zI}u0r2=`>UD|*EmA&N(c*2Zne9o%laGpNUO`hdU3+zowrcHgR0hh;yX?$icnq*rOE3wu1E@K3hyshjVs4=x?%pM<2uI|q{! z{5N^MRh?l-mpFW|ePHZz)^Ki1!$A(((N+zY>m=zUhr;N!G16pyaA8}MSA-^t2qlLc z#eMz8;NrIW$3uP7APy}yo4)de!He4}-<4K&w|X8rPGy&OA~GFi83eNVw2$--ameJ< zJ2$PQ7BrR@{a5fTbHHx!(Xet%Z|sV~q@)z~5oLE;z#n!GF>0qsNr~ZJGOiSKNxCe( z7G_rhIv0HQ5t(2la-xO+D$US!$IYDn1Mt(BhAER<8l$p&1;^AGbW?`+WO9gHkoM9X zsk_R};Q}G?OPbx2X+GJSZ9BD3kldm{`?b3jQ*yx?Y=FjJXs5bs*9U?IQh4owBv4U+ z93e#8{OY^Wm)LwndZ-*yn}^lu*Utbu1f3OCMlk5=ssc@*vF1oL`wadKoYI zD>>647@%y!)C%+qCI#z&sso6!q+9>+w5+n{`TWuJODL?E5$%qSY23$l0G66%S*Jlm z#mbxG9Fy@U79)2HF)d2H##FLEQ)?WZB&WUVenlP4O*%6y zPFL1T7^AigGf2I_7Iz0Wa1j6{#il~dq{(GV;-#8NNhyhntU)2Za=If1o*_&kNwkUB z)X&6n(wrv&Q^|#!B)|Eb)rqMbwwK>!d-*+sL_l7z^d>K%t%CHQA+Pq#YBmqu#m;%` zm~?=iHhGHyI{4{!V*6;KcENh@drCW$ipqr&kx-S9%P;|}WZrq0=5{0USaN=fcE zEW_53^GY~fI*&PO+1*N`WzB3+xgGx7fuGn6{InMGmZpAnyMv+vy;G$j(?7Q1e%!zZ zy(cJ;R@XF0(I#eew99ch1-xLO3&`(ybza#F8yBtt`*#4818;-=k zANeH@kWXvL|0Z5Pcji=+!p3cPN+?%aRvSx^Ywd%!b&`pZG$1ROJ=pzb02?c-IK_1P zyk2DLD}Hv?p?x|TDcHsi;p+*^i9})s2zR`iIyfxR8T0S40kZz>*ckFzMv^~l<)pm= zis2yG{ADItVN7NS0VEt8Yder>q){MoS~QP3=f3ohG-q8m!P%gvO zp)enC=knxuTC>h^9^lpE1k-oA3j;xKO6p|GxRl{Dj>{K#dji+R>;)Kd$Yk9=ljMMt)PUw|N0+w46rc0T~cdFe<3B?SS zuM)ct$;_q*89(TtgLJ|=C)U<3+WwtZFm`j~QUdeG(n1s2F-)>{VN@Ht zidN?uZ06C!TTjX$vF}!}ukn!z8AUQJnPIa6yxQl?qd*5zyd4dtSn+*SmaTXSieqCC z;%Y6mpUl2;oPygfF(oi_u0- zz}`Bes6GD^!v`kdEgzBPRwX1zU=a@r5h3WIJ!{5UEE=j4MRP=GpL9$gR)gSRfH*zS zrbKpy=GY2XMlV}h$q83I9jc!b8wG=Xg`1Tl$tl!`Cj-{0C{s^-p$6>zuyJLX&`8iK@M>qWH99t|7ISh zoZ{cC?7uB*VlWOJPGJJ#Tb0s@w1Q3z@F0UJ=lHksFy$WqR%QRutO+o`Rk`!HF7Sy; z`>T~Vb#}a`v!J{Hr)QBf?7RK4-rmy5Es#&>kkYVj<8lrhzXhRe$>2BNhg4Gh3EDGK^k5x~ zV!B4sdQH!CSCPb5(i+U+0UAnLq-SQ1uG1n`>03H`t_p8K^DUiMyRYC;nK^q)Cttyn zRk~kR=|Aa=p6zs2DP?N^lg@a5{7eDfqpWegtrHdXhw?_LwzJfjbX&ugdE11k6H%r# zT9z@wi^IpZEdwZMqnov-p$YF39x~sVTTq4&!v!P9sP+IxeOss3&Fuk$cw48}&Fuk$ zep{#4&FumD-P=07ZtfnS&b_S@{ch5a8yG*{S-Q3Vfvw{fKf24#k!?Dd>2k<-9={g^ z>Oy)c5Hq}`m;QLC7X((r`rLn$Eiiik-ap>ScU3QWq(Jf=ouwQ4IS!b42s9a{$%{dn zi;ME3!CE?fmAdUV^O;O5lyqB`j^PC(7e;i^_#E<*V)UFLFDdqpJkce^-qGp51T5qw zwcgQ5-19#NR^q|CJ4@H~{~MCcHlvU2@sg1@-0cz(mV4^C%_DClu-pSPYCK7@c&FsT z8cLDKAa_4zsL~dnvahM@%-SJ7FK4gpPKjVb+M2!JKpsVk=?h{x3P~n2x{N}S$?win zNHY1|o&HP4qL4zB@9yMYh(8{TuXUHtyU%2C`TQ~L0k_zzd)zBm;?=+_Zy1iy1>vDaHqNeX5NXZ!s`b8 z%L`3i<1~bVkzpj@?PKcoKy-2bBYcMGHQbWFI-@2J zef0=8n3r$t_~}9+{+-^q&|59u*ik+4o4xK!@>qx=MHJ_tgN4(S1Ale+7}eEZq3fgu z-*F_nqot2TCMWK^^Hsh97k$j57!PA^*t<4UsvKJ#A^*F6uZ-=z*3g%`TT2)cI&~;T zS{FUAto$ybCv%)<D)>E;mKnuFo! zxQLioQ{Qz@W*!`$PNpYM&pa|*8%y{-RhxX6DD-G@>QP>fB_oqlkJiS|JUEj)I5{yo zR+~ize>{xB&LBI^38lM=S zdDJ>QJw9_zot>Vn$u%)GT$>pmIX^L6BYCq%BIa14k&TW|k4y}YpB)?JQlE45nT$O= zc5Wt_esFkVV)H<;xgR+z;gK1#F)CG>qh}v8ylI?fU;jB5Dx?=a0N_H zjg5@+3wn$}hT+&P+~@8nLIxY7dW(j7{H{OiYUWr<3#3V<0j*JTq)v0|RpLo&lUXKRs@Q8$UNQ zR;!(#ni-!wcbHjz1j;b~Q=3?`2}IL31(`kbTIun3|H zd`4#S)#wmjVWP8fCg;Y^OpKoyJ2x^Wd?p3vk@4xV!%WyXxSbJ9k@=BfI)zc_SR4sn zOp$))SQjEQpUL>?WO(#pfQ*LPaKQAqXBQexpC5V9(;W8iiVIP-m~Uruw)^2ShP~oIRw)0q&MI3_0I)IUnvE5oxdT@E+S()jt?6d=f{uWTu*N)^G|+9 z4tKu@)@MixbMS=m$@`Pr*XD*m`v*Y8--N1;2x7Eadd+Vt;HAv>39fp5^Zm&xA0kfY zKAU{yN7-LGJR?x9yHLS{ay`LTcx_1+ScbE}?=V+lzgR!WV$=-rgw(r>O_I?3(zJ{N z6ASru>wH@2cV(rF#>qijRZe5CuPwEQ(C&|shqgRqFQlj(l*e5fjmZkh5v@2qaLkmR zmTwtE*V(9S|RuJvCBhm z7;qlC%a(?qM(!h?$$;LmpmZ79vs;E+k(ndtny7v#ndvg5dGt4PCySlWQ*4_4q#$rjPry1!;XI*9A0;+wyF6ElkkX zHN9LpWw)(i9xh`|vWm5!i8W3TePl z9ypl{2t#l7rF`PpCLlHB&=^41aDM}az&Th50N>-feRcL$2yA)jT1pm;yO8Tv@;XAH z=;|xIN6n5jZdAy#9zL{SQ@>^Q6ecrw#il6@Zr-#T%^4mLpV<&6j~j1tC)xuktywKaolh#hv8yNkPcc3{^Vt1lA-cN%x1~+9vhSBqt{1pMuZk>x_{gJfQ^aZy0?@R%tV}qXa_tY2ja#W#1L)@h~h@ETU@z# zO;=$XsjWva400?>^pQ6nq;&ZPiK;QCH3A{+F(39!k4_smf#H7vi!6*+^zuOa&C$Gx(yvp)|nygAnejjIW_2SO*eMC7=?Li?CKB; z8$%rENG?_LNoc}SB0|Is$lPB{FxF;99YPI*9`YYoV23d63XDRidO$|M?Q08YDQfFL z8%z4LQlX~!2BiGgQFNsr#JAf#ZCS{>B>7WywzE$ zud*?z?Hqx5KAB0BkQgs_7or6c-o;gM5~gETg0_z)z)o@#p?z zq1&D4<1OCYpDgUZDt^SIFfZSSZQ4Ihg0Q=Gt}eFZTC_~AZWF6smeuD-mkb|V^^*r0 z3il9401F^@hN5-U<-YyHTd;Uvp>OY8#YFa9@gMZX|8IL|8zbj+-S_8tW_EUVW|uo0 z(Gs;J#n}-lE-i6i$z3jE$s9^cY9*0$mSk*8QI`A4wcn`2?FNI>SBL0z5n`9PheS^vR^-V zE+gggGCg;fSNGgr9g@&%ddtz8RDUy9YS(!J3an0)$`;963}irzo$D(P@7gG$?mtTE z6WOy2AJAqrRkDIgS;Zl3dm#0#ZAiTHQ)#d|Qfd#iZ!M5g9=^YSV{iM9d-JB!oYh9D z?1iRb&S$o^TdQh!D|nnd5>`)@ypY1xCrX_G$qu7vFnVu?5ipSfDTiVPg??Q@U5Yb@ ztsO4m55ekzQW;yUOBl*rS-fy=YsvbyyR!TXkAf#&On9^spTFPs0-I}u~`&KTl!EDWYEcC(a=y>LQ3X^3K*x9NJPrBQ4%_ZYnxfok&Jkc3{3P>P)#K&s{Y z(=>oodv>QGl|yLaKqz)k@<>S1HR5|RAiP(RyQiGC1hb0VJ=@Y&a_KO44=+zWVbnoS z?@iv*GgfomIIlL@Vqlm)m96OF32>(i?k)0@dqhaJOv$K^!8qzL0~V(#7yVO&kj z-J2ndhHcFdR%_-qqZWFxmtFw+i9ysz^d}r9Di*F3)~1j^C{^``-?@GW%EJ zA1kwYB}7I%tnp^Wnw?_9>CcW8KUQY5^}T~qJrke8$+TP&rpS+#**{ihKZ(Ni$I9%f zA1kxbYy0`*!Dy$QKXyu)y)#7aiYC)Ti9S8&?xu!YpC0*8G+;T=aO=|}_cS%!`t-;? z)Y?*|@2ckX`CoR}L8kDDIAZJT`HLM0m$R+6DX?d;3s3GyY{_nK+)H@BMzTwUULOCm zI^}RW(WNoOMf?wYnVA(`M3N~}xGTc#Os+~$E-(wWmg3?tRYA8TAZj+g!MF-Z>bMjo zGb$JMIv@^IZj6k4MRGWVfXi25oRPksIsG^SqWLQ(Bf5$Z`Kg&J*QBn;eg4q+7!C&1 zBiJ#vyQKwO2hG)vp>D$N6DVoxgACTMP9tiQwb31NWe*^$5ijARNn%)uwq(D?W9?j5 z2a%WpjSglVmqkFojNX^CDCMZHaz&Vn%Djv%*)knL)QaSNS%O>fSGYSi`P?d}hXvtP zM?$l!Lx|``$~^XD??50_9p1l>EAD>hL3%WU{(PUrYpaI_2Ig@>v&7|Z?r{b#OKLrh z^MK_vVfbl~v?6JuhBXrKK(JveyC{ywX&kMn2W(D1&Iq0_@7hYpPMw_kDg z=h^<+5d@?RBo~6DB^_#%m3DZt5t`F-Jy5 z#+!Y)apOk+g@v_6vvOQnoEzYua`dmgxVBfvD4Er+P_0COwFgs05U1DlvxXVN|I>zpTF@W3&#@> zjcVFkd*AzNBg1=ZFH-?&&s@EcCZ%}e`cb1sd*{)T7)VVPy4tda;n*R~;H%Wo=TTD>|z!1Yq`6 z_}w9vp}c#6bhre>=BrH*|9Z4JpZj@F`^dEmOPF=`?bQ@M6=ra#!Vwb;YJ_R!IiXW- zpWfS3&qGe{`rbhI!sOn5bFtSa6f|*g;D_K~633P(F#0jWZs~T2WskT)x7X!)$DTTQ ztRs=Tb{W|HEE*HqSRD;`;%fp}-}{hRs36xJdpdg8o`wOoA<^!<-neNk8?u&xN{yPb z#C|6J^Vb$j9 zJ*(0<7rW%WjL;W|+O}`#R+T~1?NaB-Qa(=nAMKMc$e!kcA=5^F4*mDCu?cC>z6<@_ z1@p+EmX)%F+mmZZ@kOaV+iR*n3x9-(C5s?Su2j9jU`TXeoM>Mi2C$Cpmc}-XwcASR z<4wARX5#D-Cx%Rqhl^X#&SPx>mF!!_O}aIL*Cw+r?5v-y`Qf|Zo&-(T@nYxTY z#qA_-C@#$yceik5-`hr;X`p%Cys{?M&dyUpgwvmXoJ~6v)Hk8G!EKzY*KI-ZDMzQyvT7^} z#@Uu0(dzzXH`~;GoZ*3B%xi3S|M|7cOAlJLX<50yT08F+GfGJDDAjgt&+ZTJ+kJW8 z?)lTZpFFht_@UjW`gboryN81XkKF57H#_DCLCsNuo*mj#SLbJJ-NQ)dxwGJ3NIFdw zkPz#lbzDIXTyq6yak&a64@4r!cDub`L+n z-%^b`{oG`pwd*e7fhQ!A+>D-CwnGo+#+J*Is@8VyVBDOM7!>MvI_>&nT-;pxVyd0X zzY!-}dVVRcO{E5fs~PPBT~khAZVe1VLn{W%*m@-Ir$_d$+aygXzovPtNt_)pydD=bHNR@F{(H8@DSCyJox?b;dpW zYlbl6M_hg+%N{eD5+ZFylLlGLM2ZGYS~q?yGQ6&J1$0M}Dubm5>j(-h)Z8evIy%Zh z6Dd7d$ENO3jHFVMDk-H0L+%waN)J}*=uSfvD>0?KHdKe@Oi?WTayi)+GD^Ew-kz$4 zr7bMOTAp&fXLnSMdycuI0|xG{y#@hp(}!eZ{;GVM$!a#^P>dKH3uaL$s>=zHu@bOAh;~&17EFP82vKZvz=A7KH&^e;w1MA0U8=92Ls%RP zTpu2g=^!SNVJ2-^Z+SzW{_Dek4@;t+cbvHOTi6**Enw!lx>kLt)D5;))~3yC$~2QO zXdSyO+NRKnYnSn}+v)i@TC84O2}Uhti{q3)6fTQ5t*%{NZh<5-7pznin*n-nX|p|d z>DgAN<&eOPUy4)RmS8zo|@-yiZWqQ zWhPA>Q=J}37UEhhp4w^HyR&#YQ^Irj?6q^#;c@6`I9Qfyet!@D)GVV%id=y^ICVIV?#HyfOEbQD=VSGsQl@HxboOE|?hld$sezIRoDgfJ>Z``@#! zk~HB+2j+JS7$yq!pNmV~8~TlvT*a<3m|>((l|UOk8yawZsNV{+TuyCLnV*s^rA1Yl zYb{XL&D`!RtIyv0|42eBTE_3uQo(0XsQWIIfH=TO>!dk2nhJ%g`$}!bF9fQ*@Aj74 z-ZtgA!~=vTYTf9V3Zd#FrE1HHF^v~ZuDBZ+<)}hYk&&<2h z+{Wg2+lUeC-C3=EFrzq|`zg?}SuXC`(5rN=D(gC|1Zq=!09@}Ym0RYt`7MCuT$s*S zqjv5^&Mi#N*M~~?ZJ^U5)hXKN3iP{yf{zq+l|v5Et)=}QYQt%d#$O9nkK>e3zBO_bcJ} zHU#%HN}a7}st%RjlU=2lzP3EQb~WsViSd@zEb+|p>8l2~=YPhfO<5bg^L?d?uRVs? zX}Jxzg)cv~$>J^76tUI*(mky?gKG(!0r{!T&CDNkpA%IeH(}zYk>${60>~CGpf-sG z*>eV%A+zkhfow?usOM+flQ1Avd4}w6#a~g%DHjY|?w;pL1s^NTOZvyv$*{DCP&U=I zsNa36YhB(r)sGZPdEGdvKUFF<$IeXVn@5L$qLQ5moc&-sz;YpsmoNB5A}q1~J&Zk@ zOI-EIQirtX2wpZK?3Y1gh1EN^ba6sepWzD<{>-Qo*pn?s(_UhGSaK19wZ- zLmjINgYl8$&p!Qkvh`;-=Rms7-k+$a{&-B*oKf^gSaV|bvccwmM=*uSMYE}SP55*f zh8y~GG%jH1FY|u{0`1X59LbXcHvb(_*-U*4CVD7tlGLDJCjX^mP;vqVC|xR;WwJs5 zD0P`2LLnpyrJW{2P)MSOha{@B3k6-#klPUpLKi~6OaQVe*&c)tI{I%y66QAB%D4$5 z>ktCmCS0O8FG%(*s!3_u1Ac#d>EU-|+9suFBv_OzVZLK#8UeG4&lsS&+QytgEU zfIXRbZ)x9}hSVQCo&C3>S?y!&zZYBX|6HuK*yx`}2_q%gWe)PL0^)HPIoCx8L~prT zY}`a2uU7;|L_w8d&xKhj^fc^tK~7-)rI3$4;TAcTOUj>S4y$Ul#sxR5rA%lPYD7na z=(^c9ME*GIUy-_)DkG_#wObzX>muiczAk$0*W6K0v_DD9*=vwP#_e9mrG zo+b*AlG|zMxSNkzkTWfa=JEe+NH8y54%*RxRoFSVo4sm>EeYLeo$RJmohq7Jx=!-&MI55 z>O~A>HZ-aSIds4f0uYm7%d7Cx5^u@0AW~XlsU}Rq-2&AH*g#)c&C)kgz>kFfmab>* zl%!GDq9k19zNfNmXoK*13KEd#GWlLL-&W{KA%~5vS=VQcvyX^RyL8^_iZf+3VChf? zN=JvJK{hIx)T2bI4j2Xvv1vPpncgEq1!I6qSx6U zHGB`0czQqLZ@sS;7uk9bB!_dhI1?i60Q+N2X}V`Kn%`c(C-t{)%Zja8-Wqby?a{mo z{9Rr?TJden<>jMnW6v2Z0FUK+>nC*E&g7CdOOUTIsTMS#;_`4c>lRqlWd12YgD4km zEv%7BNO3%KwgwU5`8|v-GIp9VmcPe4VeY4qaqw5AC zKJx11yA&^<)wIJnDno>B%e{}cb-fQF^Y?9iI1SOX_w^5kra?H5<;VXYHm!}OJ(hQs zPn>8fJ+^%$G)+_KvC)I2e-1|BfJr(!-QS3^erks~Wlr;c%5#o4ki_~JmTQ^*k z*P07y$Zfm5;bS~d`p}5q5#Q4y__X9b_G-wi94s4ygj_=&IQGq*$6t-O1<7l@BEhx| z5RtmQC}BG3R){XA+{1JW_tlPtJ9>6s?a5@w`bq#iJQ)@kzi{^CcE9@Q7tWp>7&Kb7 z3ulwb&QMHz=44ojNCJJ69eyj)FPu&8c)(^ldhBPSpYoY5(tIYgf?K)O+;Lvz6loBk z701JEHbP`P&M7IRF&f9=&@^LmniVH_6Vj73(M}|3Mz!L^{STyR6g$Bqpk9oLb~5P4 za&}YjWN17gik%GlF+vnOiI(uEtTqBuNexE;(z@d++#Vmn1LcJ|rigjTu-C8rcK$wH z))tGyILQFY_f8&E5|?V^5GF=05l1?ewLplO>Tp^lO_e@?4Afe{oIRZkhd${p>gllB z<`T`W)8Q^EA%L9T=0isaAg2e00uX9DlZ*x+Jby{Zm&{zEd2pues}muBoY`Rq9B{39 zaAxFy4J0mdKNLOd1F5P$RCG;hUIY2z#5XSjRQ2J|F+ybgFkTikT7&@iVVo@J6ojJx zr=y>7aH{mDi{9ZjfqOoT6am0JAHWeJy%R`MCgzn|sf_p>6{6fv5{cDf3w|QObOW?<}M#CohZ( z13c7yE?RO7XyZK>CR)2Na6Vj;B|t0A2XKVQcs^W`B?P$h;gW2-8h9b`?S}w;z7X#9 z5>gs&$%O$z6uW>|@?+3zyZU@7srMXWxk5he!d!`_O;MgP6>OxkDcQi0*|E_H6F^jX zDJ%(usPawuE#x}NNOf{Ic|qBui4I;5IfNu_Iz5ZgMfqd zCpuJ^yyrCV_(8mUjWc&2Hm!u0ctp0*|>6L^MZ2GMj z#EUgAf%yEZad`Te5E}wPH)kWr3jAt}cLmw65v1s=agRTJOb|BU)tIM`RY|#8_;?)s z^H52doF9+5ORq}Gs^m2c-3$yNwh4mnwTB>iUyGqtRYDM`uVD{n?;(^qI&i@)`NbG1 zoRW2T$bTX(N$3UVN$S? z1}XY_y!nANNYU5h@c4KcagoB7{kAy1YY5~i=f1Fh*jD9*U-5aXZ=`H^& zao%4wDdz|?wnX^j5Eem+Ux~}LP=lS=hzRdplo8MtJ`*SZB&$Iq`kA=2HPj%!Hvg+} zJ}pm}8i+`1Fz%L!vcK~NK%<~vjp4N%FItcCzowDa3oy#Sd{aadgu3->aoHc6w;Z71 z*EHTK2#Dpgt*v10DWVBNm3+3f6(W+)wzh)s=UQ6c$1uPl6{ug1;efSXcB;~^ z#Bi^ksF5I;7S%NPN(}!RQYreC7zS1i>Z%rgD>ki+HjyrJ{+1dfvg*Q}QsKAbm6i1N=L2XO|J8a**MxF+v|<$HE@rglG|z_-fp> zDJwzvcUwyc1QE>=BBI}IEkXZ&ueF3`0};&Cs*PBJP z&=5?EYMJ?Zv#9o|uTwNNKkbW$|LNh<&$_{k>xtfL#xwpsoYbR3`$`YPVXs|1wB8^C zi5qSW@p#uiVPB8C6GTk9ex&SgzQ5FYV`la8zzVMZ_FW&Ue(L=Z!8nSd_`bHYQFJ!B zc{4g2+rRjue8e|%)y)yIyId6E2`J%DoR9Nu@p7EYwdLYybK(Bn&A4&v);FT^&8Ttf zwU*!ZCklnjyrjDFQWCXvG}_cAal%~G-9EqIUzO6dnK(`(d(6eaKc?H3!-XcF=Dty|x?#pl*qbR-^2I*ED9 zV~TthFE-v*sPd{nMCE7JflC6oq!AyAw$`v|7)`eD1Gq{pX?)Y+bn+Q0De_soSoxR( zn~amRZIw&)*nuU7Yf)cPtS4Pj| zG&!99NACX>Pj*EOd|&bXAAazWlU)K^zF3bMFTT*5H0Z;h2u?jW(Ul`xo~|a1TZszo z?9KE2gCG2${pqfVuL32D*U#0XOV=Cm3k2e|OO3}a_U6kCk}vhfl+8DiwZ=Dp4u}`~ zlBga{SmkY$jvDuq8ehNE*!cnn%`YdyeJ`mw8%LXE3O`#?K|bxf}xST-~2cTI^NYmo8smWP$@BYthC+HV<_gkQl~SucE><(gl9$J zzfE|gcR7AMzhkj&vbUflm5V7CFxLy~D&!S}BcSO6Bko>E7)*hj$|pt0wYElOva5}B zda@!y=j&oUAwISsUl(g_(4=}|J*yWcAhQBEXnaFhx!7yH{7dz^*RnaXijA0Dx$A57 z!V42!vOY?Ry@h~Xnt5|obQV)tBezy>Ykbpy8C@}B>mVYk$F##Mska@abA(4(Ft9CQ zYIepV%|hd&wdBSNqdr7Xj(V~tpw@nZOb17q_o%(w#uu81A=Y}Vy1~Px6^h1dPzNCq zMm-js_Koc-s$C!~ZCAV6MOf`>SMyrh6>n@;vq`uR6?hFTZS|H>|3zD7lEY1GJ4N3G z+ak~=dA5V`Hp6%u7zdg#-sWhcX;f|_0~NH1s94uTA^fle3#0l`2Mn#FiH#`8VBxcY zuNAwMQmhqPAQdu5%)xb#3IP(uQ%C?eNHj^fvZ2YyE?PfMoX-NVbz&%M~Ix~b?#)#P=a49+2 z>>zvVh=IAt^4h`*pt$n7=2m9{ZdwK|SwC9LFwp?>Beu~>G+MEb76hK*IMGzL9WHD} z8YYk$22JEf10f_W`{^9M3o_M$JN?%rH$by2EBxo0&~?I3cjZ||si5)uxAGU6EtTts zNod?WT5s>76^#m0e4?w9O@LzGQZdM+4UXgG7i#U>>P%)RBg8AqUm4_Bn5sojZyWUp zrTiyPVUQYst?NXxV7ER>&4cm9&LXIUJgXTO_Lneb;*8J58K^K7y2v6_V?~Uch)7t}1z!Z- z%%3B_vpYDhqxzVZ-)C zc_NJHLP_tHXuL9HE9qH-3l zHc=2A3lyZnPCRsh<;uUAn~wjGT~ig*SfW>G#TiISv0Ye5Sj-mObS2hFfm+)G-F{a@ zVVonwY=~S3uF);p1Vt}`;LH;}A6Njny;+ z|5Cp!`0F!UEM>ptseiLRv&EA7*PiuEUAC%soxE$A+yDj`W;XGen<_OEtb^p zdFsbM=3NMxEtXW;`)$WSnv0cQpCJ`rzk6 z(0)ggFRc&08G`mZntXMA@Ha!yen*q9uMfTzg7!O_{K@*@+aYMbqsgDG4}K#A?U$hr z!Efr9p?bAizRqV(Ts0M4Vf*L)K7ZqXF_VoY_X*NvZsC_q=w8E zOX}x5^-Cc&WVTpR-}Kb4hSZSRVoCi?PyKpG4Vf*L)VDnKPeN+QY_X)i?WunjQbT5o z^=&pRhH1ucc*Y-f|7WT2W1y{^TE35q`_g*y_gUb(S*FT=is1dGcF3*H;IS|g=;V+Z z9b@0(z-!0yEo?J=Y-YJ_zNhK%-J6IBCwH-qv_sn)k^tY{wD7@+)NX(2brVOK~$q!4z#tl zv>D(`xKvR4pvsiL({7|Eo;C)+%SKt+fa{1o0w&aAm;L1Eeyq`^d9ca7Q3Aeg0ti#a zAc$bWPhPBA4%BFhq^FXcr=;9!%BWKDMQfwv7F6aOUEIpi#SQ1^U~&UTXVQ{3(Ltw> z?j#R>X~)~P2O>0C$I)p-t$t>JGY{Z3FekJiuwPkQjFWUKU=)CA%ZNzcLZPqf9^Bn+ ztdY)KX|L}9+{6r&OP40V|G73`5TSDZ-BK>YE(apbEE^ysp*8|^D<5!{YsFbE!&xri zEN9J6aVGM(jSF6nvsz0V(m@gxXJehT3@;Qe{AUMCeHvnj?(;(wYgB{?LSgH=#I}UA z>M73p&B1mrN1XJ8mpe@VNE5~7OpvQko@FgN-RYQIhI9nN&RnV7HqL1|h~c7D#*Wi= z3S^1cNx~0y>S%7fACn}1Tuo`*4Cg&24!b3ylUPV_=2R_p5_5*}rj3>ZpbdYo7Ic@b z#-_~CCZD=wy4HD<;hcf0^kz=hL%9x88=bO`^EVtoM#!%~Q#()&d2b*&IriQ?# z74Yq}q(zI&dy{A^$5-VSZ{50uczy!af{#QTB)2TL3=Yddep3=f;@p`Y8l8PnrSgxw z)W+~4P#lL}*EHq85_4d&Z;$@IaiWr%jwbf4DjLy~+7%jzY9is}Et}#upRq_g_DgbH z`FXFC#-hC#jzl@+k6j*0_pCrv`Gm5GNa$sExk5<&<8M_(wCOS8zo6K1x%|g*3?WFj z^j4gD6Els`!0HNqaA&vkhvMN7Xb9(rT_o!ADdC)D2qU}*7+RSgn?{@x=)nnAg zZ>%W(=$EJnm&m&2a`q&ylCNGlbm-cZ8!I!*xCX;QuQpRVi#03{*4UlIvqHa@?9c=o`^M6y}h zy=i-+Y?{vCM=AB0oV>Mn#z#jF9$Xw9 zn;o8LdnD~q6E6o0L*za zh*HZMs(;k80B~k}ba8HBadvk9{`t}QnFABW54>9-LkF*2S<(wwcBO23?)_I??KJ*(@NEsK1<{} zo9}r=% Date: Mon, 11 Sep 2023 13:50:45 +0100 Subject: [PATCH 0401/2104] build(deps): bump async-recursion from 1.0.4 to 1.0.5 (#4850) Bumps [async-recursion](https://github.com/dcchut/async-recursion) from 1.0.4 to 1.0.5. - [Release notes](https://github.com/dcchut/async-recursion/releases) - [Commits](https://github.com/dcchut/async-recursion/compare/v1.0.4...v1.0.5) --- updated-dependencies: - dependency-name: async-recursion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- graphql/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4efd770d1e3..5f8621dfba8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,9 +108,9 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 00f30484772..6f88882fd3e 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -16,4 +16,4 @@ stable-hash = { version = "0.4.2" } defer = "0.1" parking_lot = "0.12" anyhow = "1.0" -async-recursion = "1.0.4" +async-recursion = "1.0.5" From 3e654a5cb3fd9fe0165bf54ec17de9ffe01c38ac Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Thu, 31 Aug 2023 15:06:28 +0530 Subject: [PATCH 0402/2104] store/test-store : add tests for parsing context --- .../tests/chain/ethereum/manifest.rs | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index 236b11b180d..08cb83852e7 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use std::time::Duration; use graph::blockchain::DataSource; +use graph::data::store::Value; use graph::data::subgraph::schema::SubgraphError; use graph::data::subgraph::{SPEC_VERSION_0_0_4, SPEC_VERSION_0_0_7, SPEC_VERSION_0_0_8}; use graph::data_source::offchain::OffchainDataSourceKind; @@ -372,6 +373,59 @@ specVersion: 0.0.2 }) } +#[tokio::test] +async fn parse_data_source_context() { + const YAML: &str = " +dataSources: + - kind: ethereum/contract + name: Factory + network: mainnet + context: + foo: + type: String + data: bar + qux: + type: Int + data: 42 + source: + address: \"0x0000000000000000000000000000000000000000\" + abi: Factory + startBlock: 9562480 + mapping: + kind: ethereum/events + apiVersion: 0.0.4 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + abis: + - name: Factory + file: + /: /ipfs/Qmabi + blockHandlers: + - handler: handleBlock +schema: + file: + /: /ipfs/Qmschema +specVersion: 0.0.8 +"; + + let manifest = resolve_manifest(YAML, SPEC_VERSION_0_0_8).await; + let data_source = manifest + .data_sources + .iter() + .find_map(|ds| ds.as_onchain().cloned()) + .unwrap(); + + let context = data_source.context.as_ref().clone().unwrap(); + let sorted = context.sorted(); + + assert_eq!(sorted.len(), 2); + assert_eq!(sorted[0], ("foo".into(), Value::String("bar".into()))); + assert_eq!(sorted[1], ("qux".into(), Value::Int(42))); +} + #[tokio::test] async fn parse_block_handlers_with_polling_filter() { const YAML: &str = " From 11222dacaa928f809693957c868633ff7edc9939 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Thu, 31 Aug 2023 17:52:07 +0530 Subject: [PATCH 0403/2104] tests: add runner test for data source context in manifest --- .../data-sources/abis/Contract.abi | 15 + tests/runner-tests/data-sources/package.json | 13 + .../runner-tests/data-sources/schema.graphql | 6 + .../runner-tests/data-sources/src/mapping.ts | 15 + tests/runner-tests/data-sources/subgraph.yaml | 32 + tests/runner-tests/package.json | 1 + tests/runner-tests/yarn.lock | 6585 ++++++++++++----- tests/tests/runner_tests.rs | 31 + 8 files changed, 4997 insertions(+), 1701 deletions(-) create mode 100644 tests/runner-tests/data-sources/abis/Contract.abi create mode 100644 tests/runner-tests/data-sources/package.json create mode 100644 tests/runner-tests/data-sources/schema.graphql create mode 100644 tests/runner-tests/data-sources/src/mapping.ts create mode 100644 tests/runner-tests/data-sources/subgraph.yaml diff --git a/tests/runner-tests/data-sources/abis/Contract.abi b/tests/runner-tests/data-sources/abis/Contract.abi new file mode 100644 index 00000000000..9d9f56b9263 --- /dev/null +++ b/tests/runner-tests/data-sources/abis/Contract.abi @@ -0,0 +1,15 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "testCommand", + "type": "string" + } + ], + "name": "TestEvent", + "type": "event" + } +] diff --git a/tests/runner-tests/data-sources/package.json b/tests/runner-tests/data-sources/package.json new file mode 100644 index 00000000000..b7126ba86b2 --- /dev/null +++ b/tests/runner-tests/data-sources/package.json @@ -0,0 +1,13 @@ +{ + "name": "data-sources", + "version": "0.1.0", + "scripts": { + "codegen": "graph codegen --skip-migrations", + "create:test": "graph create test/data-sources --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/data-sources --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.57.0-alpha-20230831103613-4c8bdf8", + "@graphprotocol/graph-ts": "0.30.0" + } +} diff --git a/tests/runner-tests/data-sources/schema.graphql b/tests/runner-tests/data-sources/schema.graphql new file mode 100644 index 00000000000..6f97fa65c43 --- /dev/null +++ b/tests/runner-tests/data-sources/schema.graphql @@ -0,0 +1,6 @@ +type Data @entity { + id: ID! + foo: String + bar: Int + isTest: Boolean +} diff --git a/tests/runner-tests/data-sources/src/mapping.ts b/tests/runner-tests/data-sources/src/mapping.ts new file mode 100644 index 00000000000..3446d1f83c4 --- /dev/null +++ b/tests/runner-tests/data-sources/src/mapping.ts @@ -0,0 +1,15 @@ +import { BigInt, dataSource, ethereum, log } from "@graphprotocol/graph-ts"; +import { Data } from "../generated/schema"; + +export function handleBlock(block: ethereum.Block): void { + let foo = dataSource.context().getString("foo"); + let bar = dataSource.context().getI32("bar"); + let isTest = dataSource.context().getBoolean("isTest"); + if (block.number == BigInt.fromI32(0)) { + let data = new Data("0"); + data.foo = foo; + data.bar = bar; + data.isTest = isTest; + data.save(); + } +} diff --git a/tests/runner-tests/data-sources/subgraph.yaml b/tests/runner-tests/data-sources/subgraph.yaml new file mode 100644 index 00000000000..ed8ebed4a7f --- /dev/null +++ b/tests/runner-tests/data-sources/subgraph.yaml @@ -0,0 +1,32 @@ +specVersion: 0.0.8 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: Contract + network: test + context: + foo: + type: String + data: test + bar: + type: Int + data: 1 + isTest: + type: Bool + data: true + source: + address: "0x0000000000000000000000000000000000000000" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Contract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlock + file: ./src/mapping.ts diff --git a/tests/runner-tests/package.json b/tests/runner-tests/package.json index 9b2cf1f4977..cf9d2ac2ee0 100644 --- a/tests/runner-tests/package.json +++ b/tests/runner-tests/package.json @@ -3,6 +3,7 @@ "workspaces": [ "data-source-revert", "data-source-revert2", + "data-sources", "dynamic-data-source", "fatal-error", "file-data-sources", diff --git a/tests/runner-tests/yarn.lock b/tests/runner-tests/yarn.lock index 40d155b0bd1..48acd30e698 100644 --- a/tests/runner-tests/yarn.lock +++ b/tests/runner-tests/yarn.lock @@ -2,29 +2,37 @@ # yarn lockfile v1 -"@apollo/protobufjs@1.2.6": - version "1.2.6" - resolved "https://registry.yarnpkg.com/@apollo/protobufjs/-/protobufjs-1.2.6.tgz#d601e65211e06ae1432bf5993a1a0105f2862f27" - integrity sha512-Wqo1oSHNUj/jxmsVp4iR3I480p6qdqHikn38lKrFhfzcDJ7lwd7Ck7cHRl4JE81tWNArl77xhnG/OkZhxKBYOw== +"@ampproject/remapping@^2.2.0": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" + integrity sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg== dependencies: - "@protobufjs/aspromise" "^1.1.2" - "@protobufjs/base64" "^1.1.2" - "@protobufjs/codegen" "^2.0.4" - "@protobufjs/eventemitter" "^1.1.0" - "@protobufjs/fetch" "^1.1.0" - "@protobufjs/float" "^1.0.2" - "@protobufjs/inquire" "^1.1.0" - "@protobufjs/path" "^1.1.2" - "@protobufjs/pool" "^1.1.0" - "@protobufjs/utf8" "^1.1.0" - "@types/long" "^4.0.0" - "@types/node" "^10.1.0" - long "^4.0.0" + "@jridgewell/gen-mapping" "^0.3.0" + "@jridgewell/trace-mapping" "^0.3.9" -"@apollo/protobufjs@1.2.7": - version "1.2.7" - resolved "https://registry.yarnpkg.com/@apollo/protobufjs/-/protobufjs-1.2.7.tgz#3a8675512817e4a046a897e5f4f16415f16a7d8a" - integrity sha512-Lahx5zntHPZia35myYDBRuF58tlwPskwHc5CWBZC/4bMKB6siTBWwtMrkqXcsNwQiFSzSx5hKdRPUmemrEp3Gg== +"@apollo/client@^3.1.5": + version "3.8.1" + resolved "https://registry.yarnpkg.com/@apollo/client/-/client-3.8.1.tgz#a1e3045a5fb276c08e38f7b5f930551d79741257" + integrity sha512-JGGj/9bdoLEqzatRikDeN8etseY5qeFAY0vSAx/Pd0ePNsaflKzHx6V2NZ0NsGkInq+9IXXX3RLVDf0EotizMA== + dependencies: + "@graphql-typed-document-node/core" "^3.1.1" + "@wry/context" "^0.7.3" + "@wry/equality" "^0.5.6" + "@wry/trie" "^0.4.3" + graphql-tag "^2.12.6" + hoist-non-react-statics "^3.3.2" + optimism "^0.17.5" + prop-types "^15.7.2" + response-iterator "^0.2.6" + symbol-observable "^4.0.0" + ts-invariant "^0.10.3" + tslib "^2.3.0" + zen-observable-ts "^1.2.5" + +"@apollo/protobufjs@1.2.2": + version "1.2.2" + resolved "https://registry.yarnpkg.com/@apollo/protobufjs/-/protobufjs-1.2.2.tgz#4bd92cd7701ccaef6d517cdb75af2755f049f87c" + integrity sha512-vF+zxhPiLtkwxONs6YanSt1EpwpGilThpneExUN5K3tCymuxNnVq2yojTvnpRjv2QfsEIt/n7ozPIIzBLwGIDQ== dependencies: "@protobufjs/aspromise" "^1.1.2" "@protobufjs/base64" "^1.1.2" @@ -37,78 +45,63 @@ "@protobufjs/pool" "^1.1.0" "@protobufjs/utf8" "^1.1.0" "@types/long" "^4.0.0" + "@types/node" "^10.1.0" long "^4.0.0" -"@apollo/usage-reporting-protobuf@^4.0.0": - version "4.1.1" - resolved "https://registry.yarnpkg.com/@apollo/usage-reporting-protobuf/-/usage-reporting-protobuf-4.1.1.tgz#407c3d18c7fbed7a264f3b9a3812620b93499de1" - integrity sha512-u40dIUePHaSKVshcedO7Wp+mPiZsaU6xjv9J+VyxpoU/zL6Jle+9zWeG98tr/+SZ0nZ4OXhrbb8SNr0rAPpIDA== - dependencies: - "@apollo/protobufjs" "1.2.7" - -"@apollo/utils.dropunuseddefinitions@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@apollo/utils.dropunuseddefinitions/-/utils.dropunuseddefinitions-1.1.0.tgz#02b04006442eaf037f4c4624146b12775d70d929" - integrity sha512-jU1XjMr6ec9pPoL+BFWzEPW7VHHulVdGKMkPAMiCigpVIT11VmCbnij0bWob8uS3ODJ65tZLYKAh/55vLw2rbg== +"@apollographql/apollo-tools@^0.5.0": + version "0.5.4" + resolved "https://registry.yarnpkg.com/@apollographql/apollo-tools/-/apollo-tools-0.5.4.tgz#cb3998c6cf12e494b90c733f44dd9935e2d8196c" + integrity sha512-shM3q7rUbNyXVVRkQJQseXv6bnYM3BUma/eZhwXR4xsuM+bqWnJKvW7SAfRjP7LuSCocrexa5AXhjjawNHrIlw== -"@apollo/utils.keyvaluecache@^1.0.1": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@apollo/utils.keyvaluecache/-/utils.keyvaluecache-1.0.2.tgz#2bfe358c4d82f3a0950518451996758c52613f57" - integrity sha512-p7PVdLPMnPzmXSQVEsy27cYEjVON+SH/Wb7COyW3rQN8+wJgT1nv9jZouYtztWW8ZgTkii5T6tC9qfoDREd4mg== +"@apollographql/graphql-playground-html@1.6.27": + version "1.6.27" + resolved "https://registry.yarnpkg.com/@apollographql/graphql-playground-html/-/graphql-playground-html-1.6.27.tgz#bc9ab60e9445aa2a8813b4e94f152fa72b756335" + integrity sha512-tea2LweZvn6y6xFV11K0KC8ETjmm52mQrW+ezgB2O/aTQf8JGyFmMcRPFgUaQZeHbWdm8iisDC6EjOKsXu0nfw== dependencies: - "@apollo/utils.logger" "^1.0.0" - lru-cache "7.10.1 - 7.13.1" - -"@apollo/utils.logger@^1.0.0": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@apollo/utils.logger/-/utils.logger-1.0.1.tgz#aea0d1bb7ceb237f506c6bbf38f10a555b99a695" - integrity sha512-XdlzoY7fYNK4OIcvMD2G94RoFZbzTQaNP0jozmqqMudmaGo2I/2Jx71xlDJ801mWA/mbYRihyaw6KJii7k5RVA== - -"@apollo/utils.printwithreducedwhitespace@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@apollo/utils.printwithreducedwhitespace/-/utils.printwithreducedwhitespace-1.1.0.tgz#c466299a4766eef8577a2a64c8f27712e8bd7e30" - integrity sha512-GfFSkAv3n1toDZ4V6u2d7L4xMwLA+lv+6hqXicMN9KELSJ9yy9RzuEXaX73c/Ry+GzRsBy/fdSUGayGqdHfT2Q== - -"@apollo/utils.removealiases@1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@apollo/utils.removealiases/-/utils.removealiases-1.0.0.tgz#75f6d83098af1fcae2d3beb4f515ad4a8452a8c1" - integrity sha512-6cM8sEOJW2LaGjL/0vHV0GtRaSekrPQR4DiywaApQlL9EdROASZU5PsQibe2MWeZCOhNrPRuHh4wDMwPsWTn8A== + xss "^1.0.8" -"@apollo/utils.sortast@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@apollo/utils.sortast/-/utils.sortast-1.1.0.tgz#93218c7008daf3e2a0725196085a33f5aab5ad07" - integrity sha512-VPlTsmUnOwzPK5yGZENN069y6uUHgeiSlpEhRnLFYwYNoJHsuJq2vXVwIaSmts015WTPa2fpz1inkLYByeuRQA== +"@apollographql/graphql-upload-8-fork@^8.1.4": + version "8.1.4" + resolved "https://registry.yarnpkg.com/@apollographql/graphql-upload-8-fork/-/graphql-upload-8-fork-8.1.4.tgz#898a6826cf53b13e42161884b0090d3bb8c8b2f3" + integrity sha512-lHAj/PUegYu02zza9Pg0bQQYH5I0ah1nyIzu2YIqOv41P0vu3GCBISAmQCfFHThK7N3dy7dLFPhoKcXlXRLPoQ== dependencies: - lodash.sortby "^4.7.0" + "@types/express" "*" + "@types/fs-capacitor" "^2.0.0" + "@types/koa" "*" + busboy "^0.3.1" + fs-capacitor "^2.0.4" + http-errors "^1.7.3" + object-path "^0.11.4" -"@apollo/utils.stripsensitiveliterals@^1.2.0": - version "1.2.0" - resolved "https://registry.yarnpkg.com/@apollo/utils.stripsensitiveliterals/-/utils.stripsensitiveliterals-1.2.0.tgz#4920651f36beee8e260e12031a0c5863ad0c7b28" - integrity sha512-E41rDUzkz/cdikM5147d8nfCFVKovXxKBcjvLEQ7bjZm/cg9zEcXvS6vFY8ugTubI3fn6zoqo0CyU8zT+BGP9w== - -"@apollo/utils.usagereporting@^1.0.0": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@apollo/utils.usagereporting/-/utils.usagereporting-1.0.1.tgz#3c70b49e554771659576fe35381c7a4b321d27fd" - integrity sha512-6dk+0hZlnDbahDBB2mP/PZ5ybrtCJdLMbeNJD+TJpKyZmSY6bA3SjI8Cr2EM9QA+AdziywuWg+SgbWUF3/zQqQ== +"@ardatan/aggregate-error@0.0.6": + version "0.0.6" + resolved "https://registry.yarnpkg.com/@ardatan/aggregate-error/-/aggregate-error-0.0.6.tgz#fe6924771ea40fc98dc7a7045c2e872dc8527609" + integrity sha512-vyrkEHG1jrukmzTPtyWB4NLPauUw5bQeg4uhn8f+1SSynmrOcyvlb1GKQjjgoBzElLdfXCRYX8UnBlhklOHYRQ== dependencies: - "@apollo/usage-reporting-protobuf" "^4.0.0" - "@apollo/utils.dropunuseddefinitions" "^1.1.0" - "@apollo/utils.printwithreducedwhitespace" "^1.1.0" - "@apollo/utils.removealiases" "1.0.0" - "@apollo/utils.sortast" "^1.1.0" - "@apollo/utils.stripsensitiveliterals" "^1.2.0" - -"@apollographql/apollo-tools@^0.5.3": - version "0.5.4" - resolved "https://registry.yarnpkg.com/@apollographql/apollo-tools/-/apollo-tools-0.5.4.tgz#cb3998c6cf12e494b90c733f44dd9935e2d8196c" - integrity sha512-shM3q7rUbNyXVVRkQJQseXv6bnYM3BUma/eZhwXR4xsuM+bqWnJKvW7SAfRjP7LuSCocrexa5AXhjjawNHrIlw== + tslib "~2.0.1" -"@apollographql/graphql-playground-html@1.6.29": - version "1.6.29" - resolved "https://registry.yarnpkg.com/@apollographql/graphql-playground-html/-/graphql-playground-html-1.6.29.tgz#a7a646614a255f62e10dcf64a7f68ead41dec453" - integrity sha512-xCcXpoz52rI4ksJSdOCxeOCn2DLocxwHf9dVT/Q90Pte1LX+LY+91SFtJF3KXVHH8kEin+g1KKCQPKBjZJfWNA== - dependencies: - xss "^1.0.8" +"@ardatan/relay-compiler@12.0.0": + version "12.0.0" + resolved "https://registry.yarnpkg.com/@ardatan/relay-compiler/-/relay-compiler-12.0.0.tgz#2e4cca43088e807adc63450e8cab037020e91106" + integrity sha512-9anThAaj1dQr6IGmzBMcfzOQKTa5artjuPmw8NYK/fiGEMjADbSguBY2FMDykt+QhilR3wc9VA/3yVju7JHg7Q== + dependencies: + "@babel/core" "^7.14.0" + "@babel/generator" "^7.14.0" + "@babel/parser" "^7.14.0" + "@babel/runtime" "^7.0.0" + "@babel/traverse" "^7.14.0" + "@babel/types" "^7.0.0" + babel-preset-fbjs "^3.4.0" + chalk "^4.0.0" + fb-watchman "^2.0.0" + fbjs "^3.0.0" + glob "^7.1.1" + immutable "~3.7.6" + invariant "^2.2.4" + nullthrows "^1.1.1" + relay-runtime "12.0.0" + signedsource "^1.0.0" + yargs "^15.3.1" "@babel/code-frame@^7.0.0": version "7.18.6" @@ -117,52 +110,190 @@ dependencies: "@babel/highlight" "^7.18.6" -"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.22.5": - version "7.22.5" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.5.tgz#b1f6c86a02d85d2dd3368a2b67c09add8cd0c255" - integrity sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA== +"@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.10", "@babel/code-frame@^7.22.5": + version "7.22.13" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.13.tgz#e3c1c099402598483b7a8c46a721d1038803755e" + integrity sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w== + dependencies: + "@babel/highlight" "^7.22.13" + chalk "^2.4.2" -"@babel/helper-compilation-targets@^7.17.7": +"@babel/compat-data@^7.13.0", "@babel/compat-data@^7.20.5", "@babel/compat-data@^7.22.9": + version "7.22.9" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.9.tgz#71cdb00a1ce3a329ce4cbec3a44f9fef35669730" + integrity sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ== + +"@babel/core@^7.14.0": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.11.tgz#8033acaa2aa24c3f814edaaa057f3ce0ba559c24" + integrity sha512-lh7RJrtPdhibbxndr6/xx0w8+CVlY5FJZiaSz908Fpy+G0xkBFTvwLcKJFF4PJxVfGhVWNebikpWGnOoC71juQ== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.22.10" + "@babel/generator" "^7.22.10" + "@babel/helper-compilation-targets" "^7.22.10" + "@babel/helper-module-transforms" "^7.22.9" + "@babel/helpers" "^7.22.11" + "@babel/parser" "^7.22.11" + "@babel/template" "^7.22.5" + "@babel/traverse" "^7.22.11" + "@babel/types" "^7.22.11" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/generator@^7.12.13", "@babel/generator@^7.14.0", "@babel/generator@^7.22.10": + version "7.22.10" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.10.tgz#c92254361f398e160645ac58831069707382b722" + integrity sha512-79KIf7YiWjjdZ81JnLujDRApWtl7BxTqWD88+FFdQEIOG8LJ0etDOM7CXuIgGJa55sGOwZVwuEsaLEm0PJ5/+A== + dependencies: + "@babel/types" "^7.22.10" + "@jridgewell/gen-mapping" "^0.3.2" + "@jridgewell/trace-mapping" "^0.3.17" + jsesc "^2.5.1" + +"@babel/helper-annotate-as-pure@^7.22.5": version "7.22.5" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz#fc7319fc54c5e2fa14b2909cf3c5fd3046813e02" - integrity sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw== + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz#e7f06737b197d580a01edf75d97e2c8be99d3882" + integrity sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg== dependencies: - "@babel/compat-data" "^7.22.5" - "@babel/helper-validator-option" "^7.22.5" - browserslist "^4.21.3" - lru-cache "^5.1.1" - semver "^6.3.0" + "@babel/types" "^7.22.5" -"@babel/helper-define-polyfill-provider@^0.4.0": - version "0.4.0" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz#487053f103110f25b9755c5980e031e93ced24d8" - integrity sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg== +"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.20.7", "@babel/helper-compilation-targets@^7.22.10", "@babel/helper-compilation-targets@^7.22.5", "@babel/helper-compilation-targets@^7.22.6": + version "7.22.10" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.10.tgz#01d648bbc25dd88f513d862ee0df27b7d4e67024" + integrity sha512-JMSwHD4J7SLod0idLq5PKgI+6g/hLD/iuWBq08ZX49xE14VpVEojJ5rHWptpirV2j020MvypRLAXAO50igCJ5Q== dependencies: - "@babel/helper-compilation-targets" "^7.17.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/compat-data" "^7.22.9" + "@babel/helper-validator-option" "^7.22.5" + browserslist "^4.21.9" + lru-cache "^5.1.1" + semver "^6.3.1" + +"@babel/helper-create-class-features-plugin@^7.18.6": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.11.tgz#4078686740459eeb4af3494a273ac09148dfb213" + integrity sha512-y1grdYL4WzmUDBRGK0pDbIoFd7UZKoDurDzWEoNMYoj1EL+foGRQNyPWDcC+YyegN5y1DUsFFmzjGijB3nSVAQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-environment-visitor" "^7.22.5" + "@babel/helper-function-name" "^7.22.5" + "@babel/helper-member-expression-to-functions" "^7.22.5" + "@babel/helper-optimise-call-expression" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + semver "^6.3.1" + +"@babel/helper-define-polyfill-provider@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.1.5.tgz#3c2f91b7971b9fc11fe779c945c014065dea340e" + integrity sha512-nXuzCSwlJ/WKr8qxzW816gwyT6VZgiJG17zR40fou70yfAcqjoNyTLl/DQ+FExw5Hx5KNqshmN8Ldl/r2N7cTg== + dependencies: + "@babel/helper-compilation-targets" "^7.13.0" + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/traverse" "^7.13.0" debug "^4.1.1" lodash.debounce "^4.0.8" resolve "^1.14.2" semver "^6.1.2" -"@babel/helper-module-imports@^7.22.5": +"@babel/helper-environment-visitor@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz#f06dd41b7c1f44e1f8da6c4055b41ab3a09a7e98" + integrity sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q== + +"@babel/helper-function-name@^7.12.13", "@babel/helper-function-name@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz#ede300828905bb15e582c037162f99d5183af1be" + integrity sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ== + dependencies: + "@babel/template" "^7.22.5" + "@babel/types" "^7.22.5" + +"@babel/helper-hoist-variables@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" + integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-member-expression-to-functions@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz#0a7c56117cad3372fbf8d2fb4bf8f8d64a1e76b2" + integrity sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz#1a8f4c9f4027d23f520bd76b364d44434a72660c" integrity sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg== dependencies: "@babel/types" "^7.22.5" -"@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.22.5": +"@babel/helper-module-transforms@^7.22.9": + version "7.22.9" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.9.tgz#92dfcb1fbbb2bc62529024f72d942a8c97142129" + integrity sha512-t+WA2Xn5K+rTeGtC8jCsdAH52bjggG5TKRuRrAGNM/mjIbO4GxvlLMFOEz9wXY5I2XQ60PMFsAG2WIcG82dQMQ== + dependencies: + "@babel/helper-environment-visitor" "^7.22.5" + "@babel/helper-module-imports" "^7.22.5" + "@babel/helper-simple-access" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/helper-validator-identifier" "^7.22.5" + +"@babel/helper-optimise-call-expression@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz#f21531a9ccbff644fdd156b4077c16ff0c3f609e" + integrity sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295" integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg== +"@babel/helper-replace-supers@^7.22.5", "@babel/helper-replace-supers@^7.22.9": + version "7.22.9" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.22.9.tgz#cbdc27d6d8d18cd22c81ae4293765a5d9afd0779" + integrity sha512-LJIKvvpgPOPUThdYqcX6IXRuIcTkcAub0IaDRGCZH0p5GPUp7PhRU9QVgFcDDd51BaPkk77ZjqFwh6DZTAEmGg== + dependencies: + "@babel/helper-environment-visitor" "^7.22.5" + "@babel/helper-member-expression-to-functions" "^7.22.5" + "@babel/helper-optimise-call-expression" "^7.22.5" + +"@babel/helper-simple-access@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz#4938357dc7d782b80ed6dbb03a0fba3d22b1d5de" + integrity sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-skip-transparent-expression-wrappers@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz#007f15240b5751c537c40e77abb4e89eeaaa8847" + integrity sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-split-export-declaration@^7.12.13", "@babel/helper-split-export-declaration@^7.22.6": + version "7.22.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c" + integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g== + dependencies: + "@babel/types" "^7.22.5" + "@babel/helper-string-parser@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f" integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw== -"@babel/helper-validator-identifier@^7.22.5": +"@babel/helper-validator-identifier@^7.12.11", "@babel/helper-validator-identifier@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz#9544ef6a33999343c8740fa51350f30eeaaaf193" integrity sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ== @@ -172,15 +303,219 @@ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz#de52000a15a177413c8234fa3a8af4ee8102d0ac" integrity sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw== -"@babel/highlight@^7.22.5": - version "7.22.5" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.5.tgz#aa6c05c5407a67ebce408162b7ede789b4d22031" - integrity sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw== +"@babel/helpers@^7.22.11": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.11.tgz#b02f5d5f2d7abc21ab59eeed80de410ba70b056a" + integrity sha512-vyOXC8PBWaGc5h7GMsNx68OH33cypkEDJCHvYVVgVbbxJDROYVtexSk0gK5iCF1xNjRIN2s8ai7hwkWDq5szWg== + dependencies: + "@babel/template" "^7.22.5" + "@babel/traverse" "^7.22.11" + "@babel/types" "^7.22.11" + +"@babel/highlight@^7.18.6", "@babel/highlight@^7.22.13": + version "7.22.13" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.13.tgz#9cda839e5d3be9ca9e8c26b6dd69e7548f0cbf16" + integrity sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ== dependencies: "@babel/helper-validator-identifier" "^7.22.5" - chalk "^2.0.0" + chalk "^2.4.2" js-tokens "^4.0.0" +"@babel/parser@7.12.16": + version "7.12.16" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.12.16.tgz#cc31257419d2c3189d394081635703f549fc1ed4" + integrity sha512-c/+u9cqV6F0+4Hpq01jnJO+GLp2DdT63ppz9Xa+6cHaajM9VFzK/iDXiKK65YtpeVwu+ctfS6iqlMqRgQRzeCw== + +"@babel/parser@^7.12.13", "@babel/parser@^7.14.0", "@babel/parser@^7.22.11", "@babel/parser@^7.22.5": + version "7.22.14" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.14.tgz#c7de58e8de106e88efca42ce17f0033209dfd245" + integrity sha512-1KucTHgOvaw/LzCVrEOAyXkr9rQlp0A1HiHRYnSUE9dmb8PvPW7o5sscg+5169r54n3vGlbx6GevTE/Iw/P3AQ== + +"@babel/plugin-proposal-class-properties@^7.0.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz#b110f59741895f7ec21a6fff696ec46265c446a3" + integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-proposal-object-rest-spread@^7.0.0": + version "7.20.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz#aa662940ef425779c75534a5c41e9d936edc390a" + integrity sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg== + dependencies: + "@babel/compat-data" "^7.20.5" + "@babel/helper-compilation-targets" "^7.20.7" + "@babel/helper-plugin-utils" "^7.20.2" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.20.7" + +"@babel/plugin-syntax-class-properties@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" + integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-syntax-flow@^7.0.0", "@babel/plugin-syntax-flow@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.22.5.tgz#163b820b9e7696ce134df3ee716d9c0c98035859" + integrity sha512-9RdCl0i+q0QExayk2nOS7853w08yLucnnPML6EN9S8fgMPVtdLDCdx/cOQ/i44Lb9UeQX9A35yaqBBOMMZxPxQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-jsx@^7.0.0", "@babel/plugin-syntax-jsx@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz#a6b68e84fb76e759fc3b93e901876ffabbe1d918" + integrity sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-object-rest-spread@^7.0.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" + integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-transform-arrow-functions@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz#e5ba566d0c58a5b2ba2a8b795450641950b71958" + integrity sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-block-scoped-functions@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz#27978075bfaeb9fa586d3cb63a3d30c1de580024" + integrity sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-block-scoping@^7.0.0": + version "7.22.10" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.10.tgz#88a1dccc3383899eb5e660534a76a22ecee64faa" + integrity sha512-1+kVpGAOOI1Albt6Vse7c8pHzcZQdQKW+wJH+g8mCaszOdDVwRXa/slHPqIw+oJAJANTKDMuM2cBdV0Dg618Vg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-classes@^7.0.0": + version "7.22.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.6.tgz#e04d7d804ed5b8501311293d1a0e6d43e94c3363" + integrity sha512-58EgM6nuPNG6Py4Z3zSuu0xWu2VfodiMi72Jt5Kj2FECmaYk1RrTXA45z6KBFsu9tRgwQDwIiY4FXTt+YsSFAQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-compilation-targets" "^7.22.6" + "@babel/helper-environment-visitor" "^7.22.5" + "@babel/helper-function-name" "^7.22.5" + "@babel/helper-optimise-call-expression" "^7.22.5" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz#cd1e994bf9f316bd1c2dafcd02063ec261bb3869" + integrity sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/template" "^7.22.5" + +"@babel/plugin-transform-destructuring@^7.0.0": + version "7.22.10" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.10.tgz#38e2273814a58c810b6c34ea293be4973c4eb5e2" + integrity sha512-dPJrL0VOyxqLM9sritNbMSGx/teueHF/htMKrPT7DNxccXxRDPYqlgPFFdr8u+F+qUZOkZoXue/6rL5O5GduEw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-flow-strip-types@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.22.5.tgz#0bb17110c7bf5b35a60754b2f00c58302381dee2" + integrity sha512-tujNbZdxdG0/54g/oua8ISToaXTFBf8EnSb5PgQSciIXWOWKX3S4+JR7ZE9ol8FZwf9kxitzkGQ+QWeov/mCiA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-flow" "^7.22.5" + +"@babel/plugin-transform-for-of@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz#ab1b8a200a8f990137aff9a084f8de4099ab173f" + integrity sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-function-name@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz#935189af68b01898e0d6d99658db6b164205c143" + integrity sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg== + dependencies: + "@babel/helper-compilation-targets" "^7.22.5" + "@babel/helper-function-name" "^7.22.5" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-literals@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz#e9341f4b5a167952576e23db8d435849b1dd7920" + integrity sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-member-expression-literals@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz#4fcc9050eded981a468347dd374539ed3e058def" + integrity sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-modules-commonjs@^7.0.0": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.11.tgz#d7991d3abad199c03b68ee66a64f216c47ffdfae" + integrity sha512-o2+bg7GDS60cJMgz9jWqRUsWkMzLCxp+jFDeDUT5sjRlAxcJWZ2ylNdI7QQ2+CH5hWu7OnN+Cv3htt7AkSf96g== + dependencies: + "@babel/helper-module-transforms" "^7.22.9" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-simple-access" "^7.22.5" + +"@babel/plugin-transform-object-super@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz#794a8d2fcb5d0835af722173c1a9d704f44e218c" + integrity sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.5" + +"@babel/plugin-transform-parameters@^7.0.0", "@babel/plugin-transform-parameters@^7.20.7": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz#c3542dd3c39b42c8069936e48717a8d179d63a18" + integrity sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-property-literals@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz#b5ddabd73a4f7f26cd0e20f5db48290b88732766" + integrity sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-display-name@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz#3c4326f9fce31c7968d6cb9debcaf32d9e279a2b" + integrity sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-jsx@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz#932c291eb6dd1153359e2a90cb5e557dcf068416" + integrity sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-module-imports" "^7.22.5" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-jsx" "^7.22.5" + "@babel/types" "^7.22.5" + "@babel/plugin-transform-runtime@^7.5.5": version "7.13.10" resolved "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.10.tgz" @@ -193,12 +528,34 @@ babel-plugin-polyfill-regenerator "^0.1.2" semver "^6.3.0" -"@babel/runtime@^7.4.4", "@babel/runtime@^7.9.2": +"@babel/plugin-transform-shorthand-properties@^7.0.0": version "7.22.5" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.5.tgz#8564dd588182ce0047d55d7a75e93921107b57ec" - integrity sha512-ecjvYlnAaZ/KVneE/OdKYBYfgXV3Ptu6zQWmgEF7vwKhQnvVS6bjMD2XYgj+SNvQ1GfK/pjgokfPkC/2CO8CuA== + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz#6e277654be82b5559fc4b9f58088507c24f0c624" + integrity sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA== dependencies: - regenerator-runtime "^0.13.11" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-spread@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz#6487fd29f229c95e284ba6c98d65eafb893fea6b" + integrity sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + +"@babel/plugin-transform-template-literals@^7.0.0": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz#8f38cf291e5f7a8e60e9f733193f0bcc10909bff" + integrity sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/runtime@^7.0.0", "@babel/runtime@^7.11.2": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.11.tgz#7a9ba3bbe406ad6f9e8dd4da2ece453eb23a77a4" + integrity sha512-ee7jVNlWN09+KftVOu9n7S8gQzD/Z6hN/I8VBRXW4P1+Xe7kJGXMwu8vds4aGIMHZnNbdpSWCfZZtinytpcAvA== + dependencies: + regenerator-runtime "^0.14.0" "@babel/runtime@^7.5.5", "@babel/runtime@^7.6.3": version "7.13.10" @@ -207,6 +564,64 @@ dependencies: regenerator-runtime "^0.13.4" +"@babel/template@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.5.tgz#0c8c4d944509875849bd0344ff0050756eefc6ec" + integrity sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw== + dependencies: + "@babel/code-frame" "^7.22.5" + "@babel/parser" "^7.22.5" + "@babel/types" "^7.22.5" + +"@babel/traverse@7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.12.13.tgz#689f0e4b4c08587ad26622832632735fb8c4e0c0" + integrity sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.12.13" + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/parser" "^7.12.13" + "@babel/types" "^7.12.13" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.19" + +"@babel/traverse@^7.13.0", "@babel/traverse@^7.14.0", "@babel/traverse@^7.22.11": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.11.tgz#71ebb3af7a05ff97280b83f05f8865ac94b2027c" + integrity sha512-mzAenteTfomcB7mfPtyi+4oe5BZ6MXxWcn4CX+h4IRJ+OOGXBrWU6jDQavkQI9Vuc5P+donFabBfFCcmWka9lQ== + dependencies: + "@babel/code-frame" "^7.22.10" + "@babel/generator" "^7.22.10" + "@babel/helper-environment-visitor" "^7.22.5" + "@babel/helper-function-name" "^7.22.5" + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/parser" "^7.22.11" + "@babel/types" "^7.22.11" + debug "^4.1.0" + globals "^11.1.0" + +"@babel/types@7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.12.13.tgz#8be1aa8f2c876da11a9cf650c0ecf656913ad611" + integrity sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ== + dependencies: + "@babel/helper-validator-identifier" "^7.12.11" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + +"@babel/types@^7.0.0", "@babel/types@^7.12.13", "@babel/types@^7.22.10", "@babel/types@^7.22.11": + version "7.22.11" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.11.tgz#0e65a6a1d4d9cbaa892b2213f6159485fe632ea2" + integrity sha512-siazHiGuZRz9aB9NpHy9GOs9xiQPKnMzgdr493iI1M67vRXpnEq8ZOOKzezC5q7zwuQ6sDhdSp4SD9ixKSqKZg== + dependencies: + "@babel/helper-string-parser" "^7.22.5" + "@babel/helper-validator-identifier" "^7.22.5" + to-fast-properties "^2.0.0" + "@babel/types@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.5.tgz#cd93eeaab025880a3a47ec881f4b096a5b786fbe" @@ -223,49 +638,6 @@ dependencies: "@jridgewell/trace-mapping" "0.3.9" -"@ensdomains/address-encoder@^0.1.7": - version "0.1.9" - resolved "https://registry.yarnpkg.com/@ensdomains/address-encoder/-/address-encoder-0.1.9.tgz#f948c485443d9ef7ed2c0c4790e931c33334d02d" - integrity sha512-E2d2gP4uxJQnDu2Kfg1tHNspefzbLT8Tyjrm5sEuim32UkU2sm5xL4VXtgc2X33fmPEw9+jUMpGs4veMbf+PYg== - dependencies: - bech32 "^1.1.3" - blakejs "^1.1.0" - bn.js "^4.11.8" - bs58 "^4.0.1" - crypto-addr-codec "^0.1.7" - nano-base32 "^1.0.1" - ripemd160 "^2.0.2" - -"@ensdomains/ens@0.4.5": - version "0.4.5" - resolved "https://registry.yarnpkg.com/@ensdomains/ens/-/ens-0.4.5.tgz#e0aebc005afdc066447c6e22feb4eda89a5edbfc" - integrity sha512-JSvpj1iNMFjK6K+uVl4unqMoa9rf5jopb8cya5UGBWz23Nw8hSNT7efgUx4BTlAPAgpNlEioUfeTyQ6J9ZvTVw== - dependencies: - bluebird "^3.5.2" - eth-ens-namehash "^2.0.8" - solc "^0.4.20" - testrpc "0.0.1" - web3-utils "^1.0.0-beta.31" - -"@ensdomains/ensjs@^2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@ensdomains/ensjs/-/ensjs-2.1.0.tgz#0a7296c1f3d735ef019320d863a7846a0760c460" - integrity sha512-GRbGPT8Z/OJMDuxs75U/jUNEC0tbL0aj7/L/QQznGYKm/tiasp+ndLOaoULy9kKJFC0TBByqfFliEHDgoLhyog== - dependencies: - "@babel/runtime" "^7.4.4" - "@ensdomains/address-encoder" "^0.1.7" - "@ensdomains/ens" "0.4.5" - "@ensdomains/resolver" "0.2.4" - content-hash "^2.5.2" - eth-ens-namehash "^2.0.8" - ethers "^5.0.13" - js-sha3 "^0.8.0" - -"@ensdomains/resolver@0.2.4": - version "0.2.4" - resolved "https://registry.yarnpkg.com/@ensdomains/resolver/-/resolver-0.2.4.tgz#c10fe28bf5efbf49bff4666d909aed0265efbc89" - integrity sha512-bvaTH34PMCbv6anRa9I/0zjLJgY4EuznbEMgbV77JBCQ9KNC46rzi0avuxpOfu+xDjPEtSFGqVEOr5GlUSGudA== - "@ethereumjs/common@2.5.0": version "2.5.0" resolved "https://registry.yarnpkg.com/@ethereumjs/common/-/common-2.5.0.tgz#ec61551b31bef7a69d1dc634d8932468866a4268" @@ -274,7 +646,7 @@ crc-32 "^1.2.0" ethereumjs-util "^7.1.1" -"@ethereumjs/common@^2.4.0", "@ethereumjs/common@^2.5.0", "@ethereumjs/common@^2.6.4": +"@ethereumjs/common@^2.5.0": version "2.6.5" resolved "https://registry.yarnpkg.com/@ethereumjs/common/-/common-2.6.5.tgz#0a75a22a046272579d91919cb12d84f2756e8d30" integrity sha512-lRyVQOeCDaIVtgfbowla32pzeDv2Obr8oR8Put5RdUBNRGr1VGPGQNGP6elWIpgK3YdpzqTOh4GyUGOureVeeA== @@ -282,6 +654,11 @@ crc-32 "^1.2.0" ethereumjs-util "^7.1.5" +"@ethereumjs/rlp@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@ethereumjs/rlp/-/rlp-4.0.1.tgz#626fabfd9081baab3d0a3074b0c7ecaf674aaa41" + integrity sha512-tqsQiBQDQdmPWE1xkkBq4rlSW5QZpLOUJ5RJh2/9fug+q9tnUhuZoVLk7s0scUIKTOzEtR72DFBXI4WiZcMpvw== + "@ethereumjs/tx@3.3.2": version "3.3.2" resolved "https://registry.yarnpkg.com/@ethereumjs/tx/-/tx-3.3.2.tgz#348d4624bf248aaab6c44fec2ae67265efe3db00" @@ -290,13 +667,29 @@ "@ethereumjs/common" "^2.5.0" ethereumjs-util "^7.1.2" -"@ethereumjs/tx@^3.3.0": - version "3.5.2" - resolved "https://registry.yarnpkg.com/@ethereumjs/tx/-/tx-3.5.2.tgz#197b9b6299582ad84f9527ca961466fce2296c1c" - integrity sha512-gQDNJWKrSDGu2w7w0PzVXVBNMzb7wwdDOmOqczmhNjqFxFuIbhVJDwiGEnxFNC2/b8ifcZzY7MLcluizohRzNw== - dependencies: - "@ethereumjs/common" "^2.6.4" - ethereumjs-util "^7.1.5" +"@ethereumjs/util@^8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@ethereumjs/util/-/util-8.1.0.tgz#299df97fb6b034e0577ce9f94c7d9d1004409ed4" + integrity sha512-zQ0IqbdX8FZ9aw11vP+dZkKDkS+kgIvQPHnSAXzP9pLu+Rfu3D3XEeLbicvoXJTYnhZiPmsZUxgdzXwNKxRPbA== + dependencies: + "@ethereumjs/rlp" "^4.0.1" + ethereum-cryptography "^2.0.0" + micro-ftch "^0.3.1" + +"@ethersproject/abi@5.0.0-beta.153": + version "5.0.0-beta.153" + resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.0.0-beta.153.tgz#43a37172b33794e4562999f6e2d555b7599a8eee" + integrity sha512-aXweZ1Z7vMNzJdLpR1CZUAIgnwjrZeUSvN9syCwlBaEBUFJmFY+HHnfuTI5vIhVs/mRkfJVrbEyl51JZQqyjAg== + dependencies: + "@ethersproject/address" ">=5.0.0-beta.128" + "@ethersproject/bignumber" ">=5.0.0-beta.130" + "@ethersproject/bytes" ">=5.0.0-beta.129" + "@ethersproject/constants" ">=5.0.0-beta.128" + "@ethersproject/hash" ">=5.0.0-beta.128" + "@ethersproject/keccak256" ">=5.0.0-beta.127" + "@ethersproject/logger" ">=5.0.0-beta.129" + "@ethersproject/properties" ">=5.0.0-beta.131" + "@ethersproject/strings" ">=5.0.0-beta.130" "@ethersproject/abi@5.0.7": version "5.0.7" @@ -313,7 +706,7 @@ "@ethersproject/properties" "^5.0.3" "@ethersproject/strings" "^5.0.4" -"@ethersproject/abi@5.7.0", "@ethersproject/abi@^5.6.3", "@ethersproject/abi@^5.7.0": +"@ethersproject/abi@^5.6.3": version "5.7.0" resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.7.0.tgz#b3f3e045bbbeed1af3947335c247ad625a44e449" integrity sha512-351ktp42TiRcYB3H1OP8yajPeAQstMW/yCFokj/AthP9bLHzQFPlOrxOcwYEDkUAICmOHljvN4K39OMTMUa9RA== @@ -328,7 +721,7 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@ethersproject/abstract-provider@5.7.0", "@ethersproject/abstract-provider@^5.7.0": +"@ethersproject/abstract-provider@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/abstract-provider/-/abstract-provider-5.7.0.tgz" integrity sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw== @@ -341,7 +734,7 @@ "@ethersproject/transactions" "^5.7.0" "@ethersproject/web" "^5.7.0" -"@ethersproject/abstract-signer@5.7.0", "@ethersproject/abstract-signer@^5.7.0": +"@ethersproject/abstract-signer@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/abstract-signer/-/abstract-signer-5.7.0.tgz" integrity sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ== @@ -352,7 +745,7 @@ "@ethersproject/logger" "^5.7.0" "@ethersproject/properties" "^5.7.0" -"@ethersproject/address@5.7.0", "@ethersproject/address@^5.0.4", "@ethersproject/address@^5.7.0": +"@ethersproject/address@>=5.0.0-beta.128", "@ethersproject/address@^5.0.4", "@ethersproject/address@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/address/-/address-5.7.0.tgz" integrity sha512-9wYhYt7aghVGo758POM5nqcOMaE168Q6aRLJZwUmiqSrAungkG74gSSeKEIR7ukixesdRZGPgVqme6vmxs1fkA== @@ -363,22 +756,14 @@ "@ethersproject/logger" "^5.7.0" "@ethersproject/rlp" "^5.7.0" -"@ethersproject/base64@5.7.0", "@ethersproject/base64@^5.7.0": +"@ethersproject/base64@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/base64/-/base64-5.7.0.tgz" integrity sha512-Dr8tcHt2mEbsZr/mwTPIQAf3Ai0Bks/7gTw9dSqk1mQvhW3XvRlmDJr/4n+wg1JmCl16NZue17CDh8xb/vZ0sQ== dependencies: "@ethersproject/bytes" "^5.7.0" -"@ethersproject/basex@5.7.0", "@ethersproject/basex@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/basex/-/basex-5.7.0.tgz#97034dc7e8938a8ca943ab20f8a5e492ece4020b" - integrity sha512-ywlh43GwZLv2Voc2gQVTKBoVQ1mti3d8HK5aMxsfu/nRDnMmNqaSJ3r3n85HBByT8OpoY96SXM1FogC533T4zw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - -"@ethersproject/bignumber@5.7.0", "@ethersproject/bignumber@^5.0.7", "@ethersproject/bignumber@^5.7.0": +"@ethersproject/bignumber@>=5.0.0-beta.130", "@ethersproject/bignumber@^5.0.7", "@ethersproject/bignumber@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/bignumber/-/bignumber-5.7.0.tgz" integrity sha512-n1CAdIHRWjSucQO3MC1zPSVgV/6dy/fjL9pMrPP9peL+QxEg9wOsVqwD4+818B6LUEtaXzVHQiuivzRoxPxUGw== @@ -387,37 +772,21 @@ "@ethersproject/logger" "^5.7.0" bn.js "^5.2.1" -"@ethersproject/bytes@5.7.0", "@ethersproject/bytes@^5.0.4", "@ethersproject/bytes@^5.7.0": +"@ethersproject/bytes@>=5.0.0-beta.129", "@ethersproject/bytes@^5.0.4", "@ethersproject/bytes@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/bytes/-/bytes-5.7.0.tgz" integrity sha512-nsbxwgFXWh9NyYWo+U8atvmMsSdKJprTcICAkvbBffT75qDocbuggBU0SJiVK2MuTrp0q+xvLkTnGMPK1+uA9A== dependencies: "@ethersproject/logger" "^5.7.0" -"@ethersproject/constants@5.7.0", "@ethersproject/constants@^5.0.4", "@ethersproject/constants@^5.7.0": +"@ethersproject/constants@>=5.0.0-beta.128", "@ethersproject/constants@^5.0.4", "@ethersproject/constants@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/constants/-/constants-5.7.0.tgz" integrity sha512-DHI+y5dBNvkpYUMiRQyxRBYBefZkJfo70VUkUAsRjcPs47muV9evftfZ0PJVCXYbAiCgght0DtcF9srFQmIgWA== dependencies: "@ethersproject/bignumber" "^5.7.0" -"@ethersproject/contracts@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/contracts/-/contracts-5.7.0.tgz#c305e775abd07e48aa590e1a877ed5c316f8bd1e" - integrity sha512-5GJbzEU3X+d33CdfPhcyS+z8MzsTrBGk/sc+G+59+tPa9yFkl6HQ9D6L0QMgNTA9q8dT0XKxxkyp883XsQvbbg== - dependencies: - "@ethersproject/abi" "^5.7.0" - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - -"@ethersproject/hash@5.7.0", "@ethersproject/hash@^5.0.4", "@ethersproject/hash@^5.7.0": +"@ethersproject/hash@>=5.0.0-beta.128", "@ethersproject/hash@^5.0.4", "@ethersproject/hash@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/hash/-/hash-5.7.0.tgz" integrity sha512-qX5WrQfnah1EFnO5zJv1v46a8HW0+E5xuBBDTwMFZLuVTx0tbU2kkx15NqdjxecrLGatQN9FGQKpb1FKdHCt+g== @@ -432,44 +801,7 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@ethersproject/hdnode@5.7.0", "@ethersproject/hdnode@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/hdnode/-/hdnode-5.7.0.tgz#e627ddc6b466bc77aebf1a6b9e47405ca5aef9cf" - integrity sha512-OmyYo9EENBPPf4ERhR7oj6uAtUAhYGqOnIS+jE5pTXvdKBS99ikzq1E7Iv0ZQZ5V36Lqx1qZLeak0Ra16qpeOg== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/json-wallets@5.7.0", "@ethersproject/json-wallets@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/json-wallets/-/json-wallets-5.7.0.tgz#5e3355287b548c32b368d91014919ebebddd5360" - integrity sha512-8oee5Xgu6+RKgJTkvEMl2wDgSPSAQ9MB/3JYjFV9jlKvcYHUXZC+cQp0njgmxdHkYWn8s6/IqIZYm0YWCjO/0g== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - aes-js "3.0.0" - scrypt-js "3.0.1" - -"@ethersproject/keccak256@5.7.0", "@ethersproject/keccak256@^5.0.3", "@ethersproject/keccak256@^5.7.0": +"@ethersproject/keccak256@>=5.0.0-beta.127", "@ethersproject/keccak256@^5.0.3", "@ethersproject/keccak256@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/keccak256/-/keccak256-5.7.0.tgz" integrity sha512-2UcPboeL/iW+pSg6vZ6ydF8tCnv3Iu/8tUmLLzWWGzxWKFFqOBQFLo6uLUv6BDrLgCDfN28RJ/wtByx+jZ4KBg== @@ -477,18 +809,11 @@ "@ethersproject/bytes" "^5.7.0" js-sha3 "0.8.0" -"@ethersproject/logger@5.7.0", "@ethersproject/logger@^5.0.5", "@ethersproject/logger@^5.7.0": +"@ethersproject/logger@>=5.0.0-beta.129", "@ethersproject/logger@^5.0.5", "@ethersproject/logger@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/logger/-/logger-5.7.0.tgz" integrity sha512-0odtFdXu/XHtjQXJYA3u9G0G8btm0ND5Cu8M7i5vhEcE8/HmF4Lbdqanwyv4uQTr2tx6b7fQRmgLrsnpQlmnig== -"@ethersproject/networks@5.7.1": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/networks/-/networks-5.7.1.tgz#118e1a981d757d45ccea6bb58d9fd3d9db14ead6" - integrity sha512-n/MufjFYv3yFcUyfhnXotyDlNdFb7onmkSy8aQERi2PjNcnWQ66xXxa3XlS8nCcA8aJKJjIIMNJTC7tu80GwpQ== - dependencies: - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/networks/-/networks-5.7.0.tgz" @@ -496,56 +821,14 @@ dependencies: "@ethersproject/logger" "^5.7.0" -"@ethersproject/pbkdf2@5.7.0", "@ethersproject/pbkdf2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/pbkdf2/-/pbkdf2-5.7.0.tgz#d2267d0a1f6e123f3771007338c47cccd83d3102" - integrity sha512-oR/dBRZR6GTyaofd86DehG72hY6NpAjhabkhxgr3X2FpJtJuodEl2auADWBZfhDHgVCbu3/H/Ocq2uC6dpNjjw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - -"@ethersproject/properties@5.7.0", "@ethersproject/properties@^5.0.3", "@ethersproject/properties@^5.7.0": +"@ethersproject/properties@>=5.0.0-beta.131", "@ethersproject/properties@^5.0.3", "@ethersproject/properties@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/properties/-/properties-5.7.0.tgz" integrity sha512-J87jy8suntrAkIZtecpxEPxY//szqr1mlBaYlQ0r4RCaiD2hjheqF9s1LVE8vVuJCXisjIP+JgtK/Do54ej4Sw== dependencies: "@ethersproject/logger" "^5.7.0" -"@ethersproject/providers@5.7.2": - version "5.7.2" - resolved "https://registry.yarnpkg.com/@ethersproject/providers/-/providers-5.7.2.tgz#f8b1a4f275d7ce58cf0a2eec222269a08beb18cb" - integrity sha512-g34EWZ1WWAVgr4aptGlVBF8mhl3VWjv+8hoAnzStu8Ah22VHBsuGzP17eb6xDVRzw895G4W7vvx60lFFur/1Rg== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/base64" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/web" "^5.7.0" - bech32 "1.1.4" - ws "7.4.6" - -"@ethersproject/random@5.7.0", "@ethersproject/random@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/random/-/random-5.7.0.tgz#af19dcbc2484aae078bb03656ec05df66253280c" - integrity sha512-19WjScqRA8IIeWclFme75VMXSBvi4e6InrUNuaR4s5pTF2qNhcGdCUwdxUVGtDDqC00sDLCO93jPQoDUH4HVmQ== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/rlp@5.7.0", "@ethersproject/rlp@^5.7.0": +"@ethersproject/rlp@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/rlp/-/rlp-5.7.0.tgz" integrity sha512-rBxzX2vK8mVF7b0Tol44t5Tb8gomOHkj5guL+HhzQ1yBh/ydjGnpw6at+X6Iw0Kp3OzzzkcKp8N9r0W4kYSs9w== @@ -553,16 +836,7 @@ "@ethersproject/bytes" "^5.7.0" "@ethersproject/logger" "^5.7.0" -"@ethersproject/sha2@5.7.0", "@ethersproject/sha2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/sha2/-/sha2-5.7.0.tgz#9a5f7a7824ef784f7f7680984e593a800480c9fb" - integrity sha512-gKlH42riwb3KYp0reLsFTokByAKoJdgFCwI+CCiX/k+Jm2mbNs6oOaCjYQSlI1+XBVejwH2KrmCbMAT/GnRDQw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - hash.js "1.1.7" - -"@ethersproject/signing-key@5.7.0", "@ethersproject/signing-key@^5.7.0": +"@ethersproject/signing-key@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/signing-key/-/signing-key-5.7.0.tgz" integrity sha512-MZdy2nL3wO0u7gkB4nA/pEf8lu1TlFswPNmy8AiYkfKTdO6eXBJyUdmHO/ehm/htHw9K/qF8ujnTyUAD+Ry54Q== @@ -574,19 +848,7 @@ elliptic "6.5.4" hash.js "1.1.7" -"@ethersproject/solidity@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/solidity/-/solidity-5.7.0.tgz#5e9c911d8a2acce2a5ebb48a5e2e0af20b631cb8" - integrity sha512-HmabMd2Dt/raavyaGukF4XxizWKhKQ24DoLtdNbBmNKUOPqwjsKQSdV9GQtj9CBEea9DlzETlVER1gYeXXBGaA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/strings@5.7.0", "@ethersproject/strings@^5.0.4", "@ethersproject/strings@^5.7.0": +"@ethersproject/strings@>=5.0.0-beta.130", "@ethersproject/strings@^5.0.4", "@ethersproject/strings@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/strings/-/strings-5.7.0.tgz" integrity sha512-/9nu+lj0YswRNSH0NXYqrh8775XNyEdUQAuf3f+SmOrnVewcJ5SBNAjF7lpgehKi4abvNNXyf+HX86czCdJ8Mg== @@ -595,7 +857,7 @@ "@ethersproject/constants" "^5.7.0" "@ethersproject/logger" "^5.7.0" -"@ethersproject/transactions@5.7.0", "@ethersproject/transactions@^5.6.2", "@ethersproject/transactions@^5.7.0": +"@ethersproject/transactions@^5.0.0-beta.135", "@ethersproject/transactions@^5.6.2", "@ethersproject/transactions@^5.7.0": version "5.7.0" resolved "https://registry.npmjs.org/@ethersproject/transactions/-/transactions-5.7.0.tgz" integrity sha512-kmcNicCp1lp8qanMTC3RIikGgoJ80ztTyvtsFvCYpSCfkjhD0jZ2LOrnbcuxuToLIUYYf+4XwD1rP+B/erDIhQ== @@ -610,39 +872,7 @@ "@ethersproject/rlp" "^5.7.0" "@ethersproject/signing-key" "^5.7.0" -"@ethersproject/units@5.7.0": - version "5.7.0" - resolved "https://registry.npmjs.org/@ethersproject/web/-/web-5.7.0.tgz" - integrity sha512-ApHcbbj+muRASVDSCl/tgxaH2LBkRMEYfLOLVa0COipx0+nlu0QKet7U2lEg0vdkh8XRSLf2nd1f1Uk9SrVSGA== - dependencies: - "@ethersproject/base64" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/wallet@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wallet/-/wallet-5.7.0.tgz#4e5d0790d96fe21d61d38fb40324e6c7ef350b2d" - integrity sha512-MhmXlJXEJFBFVKrDLB4ZdDzxcBxQ3rLyCkhNqVu3CDYvR97E+8r01UgrI+TI99Le+aYm/in/0vp86guJuM7FCA== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/json-wallets" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/web@5.7.1", "@ethersproject/web@^5.7.0": +"@ethersproject/web@^5.7.0": version "5.7.1" resolved "https://registry.yarnpkg.com/@ethersproject/web/-/web-5.7.1.tgz#de1f285b373149bee5928f4eb7bcb87ee5fbb4ae" integrity sha512-Gueu8lSvyjBWL4cYsWsjh6MtMwM0+H4HvqFPZfB6dV8ctbP9zFAO73VG1cMWae0FLPCtz0peKPpZY8/ugJJX2w== @@ -653,17 +883,6 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@ethersproject/wordlists@5.7.0", "@ethersproject/wordlists@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wordlists/-/wordlists-5.7.0.tgz#8fb2c07185d68c3e09eb3bfd6e779ba2774627f5" - integrity sha512-S2TFNJNfHWVHNE6cNDjbVlZ6MgE17MIxMbMg2zv3wn+3XSJGosL1m9ZVv3GXCf/2ymSsQ+hRI5IzoMJTG6aoVA== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@float-capital/float-subgraph-uncrashable@^0.0.0-alpha.4": version "0.0.0-internal-testing.5" resolved "https://registry.npmjs.org/@float-capital/float-subgraph-uncrashable/-/float-subgraph-uncrashable-0.0.0-internal-testing.5.tgz" @@ -802,6 +1021,38 @@ which "2.0.2" yaml "1.10.2" +"@graphprotocol/graph-cli@0.57.0-alpha-20230831103613-4c8bdf8": + version "0.57.0-alpha-20230831103613-4c8bdf8" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.57.0-alpha-20230831103613-4c8bdf8.tgz#fccefd6be38c2cc8f69fd19ea417c157762dc8dc" + integrity sha512-y2xXgqaq0sAMPcXpfHn/lWx9j7A/a8GHdpZzqEZixAMEb9bsZQYkfs90eOUKKYOWU9CwCSSpKQ56soEMPgrijA== + dependencies: + "@float-capital/float-subgraph-uncrashable" "^0.0.0-alpha.4" + "@oclif/core" "2.8.6" + "@whatwg-node/fetch" "^0.8.4" + assemblyscript "0.19.23" + binary-install-raw "0.0.13" + chalk "3.0.0" + chokidar "3.5.3" + debug "4.3.4" + docker-compose "0.23.19" + dockerode "2.5.8" + fs-extra "9.1.0" + glob "9.3.5" + gluegun "5.1.2" + graphql "15.5.0" + immutable "4.2.1" + ipfs-http-client "55.0.0" + jayson "4.0.0" + js-yaml "3.14.1" + prettier "1.19.1" + request "2.88.2" + semver "7.4.0" + sync-request "6.1.0" + tmp-promise "3.0.3" + web3-eth-abi "1.7.0" + which "2.0.2" + yaml "1.10.2" + "@graphprotocol/graph-ts@0.30.0": version "0.30.0" resolved "https://registry.npmjs.org/@graphprotocol/graph-ts/-/graph-ts-0.30.0.tgz" @@ -816,27 +1067,151 @@ dependencies: assemblyscript "0.19.10" -"@graphql-tools/batch-execute@8.5.1": - version "8.5.1" - resolved "https://registry.yarnpkg.com/@graphql-tools/batch-execute/-/batch-execute-8.5.1.tgz#fa3321d58c64041650be44250b1ebc3aab0ba7a9" - integrity sha512-hRVDduX0UDEneVyEWtc2nu5H2PxpfSfM/riUlgZvo/a/nG475uyehxR5cFGvTEPEQUKY3vGIlqvtRigzqTfCew== +"@graphql-tools/batch-delegate@^6.2.4", "@graphql-tools/batch-delegate@^6.2.6": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/batch-delegate/-/batch-delegate-6.2.6.tgz#fbea98dc825f87ef29ea5f3f371912c2a2aa2f2c" + integrity sha512-QUoE9pQtkdNPFdJHSnBhZtUfr3M7pIRoXoMR+TG7DK2Y62ISKbT/bKtZEUU1/2v5uqd5WVIvw9dF8gHDSJAsSA== dependencies: - "@graphql-tools/utils" "8.9.0" - dataloader "2.1.0" - tslib "^2.4.0" - value-or-promise "1.0.11" + "@graphql-tools/delegate" "^6.2.4" + dataloader "2.0.0" + tslib "~2.0.1" -"@graphql-tools/delegate@^8.4.3": - version "8.8.1" - resolved "https://registry.yarnpkg.com/@graphql-tools/delegate/-/delegate-8.8.1.tgz#0653a72f38947f38ab7917dfac50ebf6a6b883e9" - integrity sha512-NDcg3GEQmdEHlnF7QS8b4lM1PSF+DKeFcIlLEfZFBvVq84791UtJcDj8734sIHLukmyuAxXMfA1qLd2l4lZqzA== +"@graphql-tools/batch-execute@^7.1.2": + version "7.1.2" + resolved "https://registry.yarnpkg.com/@graphql-tools/batch-execute/-/batch-execute-7.1.2.tgz#35ba09a1e0f80f34f1ce111d23c40f039d4403a0" + integrity sha512-IuR2SB2MnC2ztA/XeTMTfWcA0Wy7ZH5u+nDkDNLAdX+AaSyDnsQS35sCmHqG0VOGTl7rzoyBWLCKGwSJplgtwg== + dependencies: + "@graphql-tools/utils" "^7.7.0" + dataloader "2.0.0" + tslib "~2.2.0" + value-or-promise "1.0.6" + +"@graphql-tools/code-file-loader@^6.2.4": + version "6.3.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/code-file-loader/-/code-file-loader-6.3.1.tgz#42dfd4db5b968acdb453382f172ec684fa0c34ed" + integrity sha512-ZJimcm2ig+avgsEOWWVvAaxZrXXhiiSZyYYOJi0hk9wh5BxZcLUNKkTp6EFnZE/jmGUwuos3pIjUD3Hwi3Bwhg== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.5.1" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/delegate@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/delegate/-/delegate-6.2.4.tgz#db553b63eb9512d5eb5bbfdfcd8cb1e2b534699c" + integrity sha512-mXe6DfoWmq49kPcDrpKHgC2DSWcD5q0YCaHHoXYPAOlnLH8VMTY8BxcE8y/Do2eyg+GLcwAcrpffVszWMwqw0w== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + dataloader "2.0.0" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/delegate@^7.0.1", "@graphql-tools/delegate@^7.1.5": + version "7.1.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/delegate/-/delegate-7.1.5.tgz#0b027819b7047eff29bacbd5032e34a3d64bd093" + integrity sha512-bQu+hDd37e+FZ0CQGEEczmRSfQRnnXeUxI/0miDV+NV/zCbEdIJj5tYFNrKT03W6wgdqx8U06d8L23LxvGri/g== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + "@graphql-tools/batch-execute" "^7.1.2" + "@graphql-tools/schema" "^7.1.5" + "@graphql-tools/utils" "^7.7.1" + dataloader "2.0.0" + tslib "~2.2.0" + value-or-promise "1.0.6" + +"@graphql-tools/git-loader@^6.2.4": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/git-loader/-/git-loader-6.2.6.tgz#c2226f4b8f51f1c05c9ab2649ba32d49c68cd077" + integrity sha512-ooQTt2CaG47vEYPP3CPD+nbA0F+FYQXfzrB1Y1ABN9K3d3O2RK3g8qwslzZaI8VJQthvKwt0A95ZeE4XxteYfw== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/github-loader@^6.2.4": + version "6.2.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/github-loader/-/github-loader-6.2.5.tgz#460dff6f5bbaa26957a5ea3be4f452b89cc6a44b" + integrity sha512-DLuQmYeNNdPo8oWus8EePxWCfCAyUXPZ/p1PWqjrX/NGPyH2ZObdqtDAfRHztljt0F/qkBHbGHCEk2TKbRZTRw== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + cross-fetch "3.0.6" + tslib "~2.0.1" + +"@graphql-tools/graphql-file-loader@^6.2.4": + version "6.2.7" + resolved "https://registry.yarnpkg.com/@graphql-tools/graphql-file-loader/-/graphql-file-loader-6.2.7.tgz#d3720f2c4f4bb90eb2a03a7869a780c61945e143" + integrity sha512-5k2SNz0W87tDcymhEMZMkd6/vs6QawDyjQXWtqkuLTBF3vxjxPD1I4dwHoxgWPIjjANhXybvulD7E+St/7s9TQ== + dependencies: + "@graphql-tools/import" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/graphql-tag-pluck@^6.2.4", "@graphql-tools/graphql-tag-pluck@^6.2.6", "@graphql-tools/graphql-tag-pluck@^6.5.1": + version "6.5.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/graphql-tag-pluck/-/graphql-tag-pluck-6.5.1.tgz#5fb227dbb1e19f4b037792b50f646f16a2d4c686" + integrity sha512-7qkm82iFmcpb8M6/yRgzjShtW6Qu2OlCSZp8uatA3J0eMl87TxyJoUmL3M3UMMOSundAK8GmoyNVFUrueueV5Q== + dependencies: + "@babel/parser" "7.12.16" + "@babel/traverse" "7.12.13" + "@babel/types" "7.12.13" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/import@^6.2.4", "@graphql-tools/import@^6.2.6": + version "6.7.18" + resolved "https://registry.yarnpkg.com/@graphql-tools/import/-/import-6.7.18.tgz#ad092d8a4546bb6ffc3e871e499eec7ac368680b" + integrity sha512-XQDdyZTp+FYmT7as3xRWH/x8dx0QZA2WZqfMF5EWb36a0PiH7WwlRQYIdyYXj8YCLpiWkeBXgBRHmMnwEYR8iQ== dependencies: - "@graphql-tools/batch-execute" "8.5.1" - "@graphql-tools/schema" "8.5.1" - "@graphql-tools/utils" "8.9.0" - dataloader "2.1.0" - tslib "~2.4.0" - value-or-promise "1.0.11" + "@graphql-tools/utils" "^9.2.1" + resolve-from "5.0.0" + tslib "^2.4.0" + +"@graphql-tools/json-file-loader@^6.2.4": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/json-file-loader/-/json-file-loader-6.2.6.tgz#830482cfd3721a0799cbf2fe5b09959d9332739a" + integrity sha512-CnfwBSY5926zyb6fkDBHnlTblHnHI4hoBALFYXnrg0Ev4yWU8B04DZl/pBRUc459VNgO2x8/mxGIZj2hPJG1EA== + dependencies: + "@graphql-tools/utils" "^7.0.0" + tslib "~2.0.1" + +"@graphql-tools/links@^6.2.4": + version "6.2.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/links/-/links-6.2.5.tgz#b172cadc4b7cbe27bfc1dc787651f92517f583bc" + integrity sha512-XeGDioW7F+HK6HHD/zCeF0HRC9s12NfOXAKv1HC0J7D50F4qqMvhdS/OkjzLoBqsgh/Gm8icRc36B5s0rOA9ig== + dependencies: + "@graphql-tools/utils" "^7.0.0" + apollo-link "1.2.14" + apollo-upload-client "14.1.2" + cross-fetch "3.0.6" + form-data "3.0.0" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/load-files@^6.2.4": + version "6.6.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/load-files/-/load-files-6.6.1.tgz#91ce18d910baf8678459486d8cccd474767bec0a" + integrity sha512-nd4GOjdD68bdJkHfRepILb0gGwF63mJI7uD4oJuuf2Kzeq8LorKa6WfyxUhdMuLmZhnx10zdAlWPfwv1NOAL4Q== + dependencies: + globby "11.1.0" + tslib "^2.4.0" + unixify "1.0.0" + +"@graphql-tools/load@^6.2.4": + version "6.2.8" + resolved "https://registry.yarnpkg.com/@graphql-tools/load/-/load-6.2.8.tgz#16900fb6e75e1d075cad8f7ea439b334feb0b96a" + integrity sha512-JpbyXOXd8fJXdBh2ta0Q4w8ia6uK5FHzrTNmcvYBvflFuWly2LDTk2abbSl81zKkzswQMEd2UIYghXELRg8eTA== + dependencies: + "@graphql-tools/merge" "^6.2.12" + "@graphql-tools/utils" "^7.5.0" + globby "11.0.3" + import-from "3.0.0" + is-glob "4.0.1" + p-limit "3.1.0" + tslib "~2.2.0" + unixify "1.0.0" + valid-url "1.0.9" "@graphql-tools/merge@8.3.1": version "8.3.1" @@ -846,25 +1221,69 @@ "@graphql-tools/utils" "8.9.0" tslib "^2.4.0" -"@graphql-tools/merge@^8.4.1": - version "8.4.2" - resolved "https://registry.yarnpkg.com/@graphql-tools/merge/-/merge-8.4.2.tgz#95778bbe26b635e8d2f60ce9856b388f11fe8288" - integrity sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw== +"@graphql-tools/merge@^6.2.12", "@graphql-tools/merge@^6.2.4": + version "6.2.17" + resolved "https://registry.yarnpkg.com/@graphql-tools/merge/-/merge-6.2.17.tgz#4dedf87d8435a5e1091d7cc8d4f371ed1e029f1f" + integrity sha512-G5YrOew39fZf16VIrc49q3c8dBqQDD0ax5LYPiNja00xsXDi0T9zsEWVt06ApjtSdSF6HDddlu5S12QjeN8Tow== + dependencies: + "@graphql-tools/schema" "^8.0.2" + "@graphql-tools/utils" "8.0.2" + tslib "~2.3.0" + +"@graphql-tools/mock@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/mock/-/mock-6.2.4.tgz#205323c51f89dd855d345d130c7713d0420909ea" + integrity sha512-O5Zvq/mcDZ7Ptky0IZ4EK9USmxV6FEVYq0Jxv2TI80kvxbCjt0tbEpZ+r1vIt1gZOXlAvadSHYyzWnUPh+1vkQ== + dependencies: + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + tslib "~2.0.1" + +"@graphql-tools/module-loader@^6.2.4": + version "6.2.7" + resolved "https://registry.yarnpkg.com/@graphql-tools/module-loader/-/module-loader-6.2.7.tgz#66ab9468775fac8079ca46ea9896ceea76e4ef69" + integrity sha512-ItAAbHvwfznY9h1H9FwHYDstTcm22Dr5R9GZtrWlpwqj0jaJGcBxsMB9jnK9kFqkbtFYEe4E/NsSnxsS4/vViQ== + dependencies: + "@graphql-tools/utils" "^7.5.0" + tslib "~2.1.0" + +"@graphql-tools/relay-operation-optimizer@^6.2.4": + version "6.5.18" + resolved "https://registry.yarnpkg.com/@graphql-tools/relay-operation-optimizer/-/relay-operation-optimizer-6.5.18.tgz#a1b74a8e0a5d0c795b8a4d19629b654cf66aa5ab" + integrity sha512-mc5VPyTeV+LwiM+DNvoDQfPqwQYhPV/cl5jOBjTgSniyaq8/86aODfMkrE2OduhQ5E00hqrkuL2Fdrgk0w1QJg== dependencies: + "@ardatan/relay-compiler" "12.0.0" "@graphql-tools/utils" "^9.2.1" tslib "^2.4.0" -"@graphql-tools/mock@^8.1.2": - version "8.7.20" - resolved "https://registry.yarnpkg.com/@graphql-tools/mock/-/mock-8.7.20.tgz#c83ae0f1940d194a3982120c9c85f3ac6b4f7f20" - integrity sha512-ljcHSJWjC/ZyzpXd5cfNhPI7YljRVvabKHPzKjEs5ElxWu2cdlLGvyNYepApXDsM/OJG/2xuhGM+9GWu5gEAPQ== +"@graphql-tools/resolvers-composition@^6.2.4": + version "6.5.18" + resolved "https://registry.yarnpkg.com/@graphql-tools/resolvers-composition/-/resolvers-composition-6.5.18.tgz#6e15139d0bdacce73002f190605d1fd63fe9f5bd" + integrity sha512-RhKDkq58wVCmL8roC/XndCKurKG65/8VoXBiJ3rgehuIXbuZusMwk7sUfO2b7OoaDmK3ARmpBO0NAkeo6Aaj0A== dependencies: - "@graphql-tools/schema" "^9.0.18" "@graphql-tools/utils" "^9.2.1" - fast-json-stable-stringify "^2.1.0" + lodash "4.17.21" + micromatch "^4.0.4" tslib "^2.4.0" -"@graphql-tools/schema@8.5.1", "@graphql-tools/schema@^8.0.0", "@graphql-tools/schema@^8.3.1": +"@graphql-tools/schema@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-6.2.4.tgz#cc4e9f5cab0f4ec48500e666719d99fc5042481d" + integrity sha512-rh+14lSY1q8IPbEv2J9x8UBFJ5NrDX9W5asXEUlPp+7vraLp/Tiox4GXdgyA92JhwpYco3nTf5Bo2JDMt1KnAQ== + dependencies: + "@graphql-tools/utils" "^6.2.4" + tslib "~2.0.1" + +"@graphql-tools/schema@^7.1.5": + version "7.1.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-7.1.5.tgz#07b24e52b182e736a6b77c829fc48b84d89aa711" + integrity sha512-uyn3HSNSckf4mvQSq0Q07CPaVZMNFCYEVxroApOaw802m9DcZPgf9XVPy/gda5GWj9AhbijfRYVTZQgHnJ4CXA== + dependencies: + "@graphql-tools/utils" "^7.1.2" + tslib "~2.2.0" + value-or-promise "1.0.6" + +"@graphql-tools/schema@^8.0.2": version "8.5.1" resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-8.5.1.tgz#c2f2ff1448380919a330312399c9471db2580b58" integrity sha512-0Esilsh0P/qYcB5DKQpiKeQs/jevzIadNTaT0jeWklPMwNbT7yMX4EqZany7mbeRRlSRwMzNzL5olyFdffHBZg== @@ -874,15 +1293,51 @@ tslib "^2.4.0" value-or-promise "1.0.11" -"@graphql-tools/schema@^9.0.18": - version "9.0.19" - resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-9.0.19.tgz#c4ad373b5e1b8a0cf365163435b7d236ebdd06e7" - integrity sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w== +"@graphql-tools/stitch@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/stitch/-/stitch-6.2.4.tgz#acfa6a577a33c0f02e4940ffff04753b23b87fd6" + integrity sha512-0C7PNkS7v7iAc001m7c1LPm5FUB0/DYw+s3OyCii6YYYHY8NwdI0roeOyeDGFJkFubWBQfjc3hoSyueKtU73mw== + dependencies: + "@graphql-tools/batch-delegate" "^6.2.4" + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/merge" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + "@graphql-tools/wrap" "^6.2.4" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/url-loader@^6.2.4": + version "6.10.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/url-loader/-/url-loader-6.10.1.tgz#dc741e4299e0e7ddf435eba50a1f713b3e763b33" + integrity sha512-DSDrbhQIv7fheQ60pfDpGD256ixUQIR6Hhf9Z5bRjVkXOCvO5XrkwoWLiU7iHL81GB1r0Ba31bf+sl+D4nyyfw== + dependencies: + "@graphql-tools/delegate" "^7.0.1" + "@graphql-tools/utils" "^7.9.0" + "@graphql-tools/wrap" "^7.0.4" + "@microsoft/fetch-event-source" "2.0.1" + "@types/websocket" "1.0.2" + abort-controller "3.0.0" + cross-fetch "3.1.4" + extract-files "9.0.0" + form-data "4.0.0" + graphql-ws "^4.4.1" + is-promise "4.0.0" + isomorphic-ws "4.0.1" + lodash "4.17.21" + meros "1.1.4" + subscriptions-transport-ws "^0.9.18" + sync-fetch "0.3.0" + tslib "~2.2.0" + valid-url "1.0.9" + ws "7.4.5" + +"@graphql-tools/utils@8.0.2": + version "8.0.2" + resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-8.0.2.tgz#795a8383cdfdc89855707d62491c576f439f3c51" + integrity sha512-gzkavMOgbhnwkHJYg32Adv6f+LxjbQmmbdD5Hty0+CWxvaiuJq+nU6tzb/7VSU4cwhbNLx/lGu2jbCPEW1McZQ== dependencies: - "@graphql-tools/merge" "^8.4.1" - "@graphql-tools/utils" "^9.2.1" - tslib "^2.4.0" - value-or-promise "^1.0.12" + tslib "~2.3.0" "@graphql-tools/utils@8.9.0": version "8.9.0" @@ -891,6 +1346,24 @@ dependencies: tslib "^2.4.0" +"@graphql-tools/utils@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-6.2.4.tgz#38a2314d2e5e229ad4f78cca44e1199e18d55856" + integrity sha512-ybgZ9EIJE3JMOtTrTd2VcIpTXtDrn2q6eiYkeYMKRVh3K41+LZa6YnR2zKERTXqTWqhobROwLt4BZbw2O3Aeeg== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + camel-case "4.1.1" + tslib "~2.0.1" + +"@graphql-tools/utils@^7.0.0", "@graphql-tools/utils@^7.1.2", "@graphql-tools/utils@^7.5.0", "@graphql-tools/utils@^7.7.0", "@graphql-tools/utils@^7.7.1", "@graphql-tools/utils@^7.8.1", "@graphql-tools/utils@^7.9.0": + version "7.10.0" + resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-7.10.0.tgz#07a4cb5d1bec1ff1dc1d47a935919ee6abd38699" + integrity sha512-d334r6bo9mxdSqZW6zWboEnnOOFRrAPVQJ7LkU8/6grglrbcu6WhwCLzHb90E94JI3TD3ricC3YGbUqIi9Xg0w== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + camel-case "4.1.2" + tslib "~2.2.0" + "@graphql-tools/utils@^9.2.1": version "9.2.1" resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-9.2.1.tgz#1b3df0ef166cfa3eae706e3518b17d5922721c57" @@ -899,6 +1372,28 @@ "@graphql-typed-document-node/core" "^3.1.1" tslib "^2.4.0" +"@graphql-tools/wrap@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/wrap/-/wrap-6.2.4.tgz#2709817da6e469753735a9fe038c9e99736b2c57" + integrity sha512-cyQgpybolF9DjL2QNOvTS1WDCT/epgYoiA8/8b3nwv5xmMBQ6/6nYnZwityCZ7njb7MMyk7HBEDNNlP9qNJDcA== + dependencies: + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/wrap@^7.0.4": + version "7.0.8" + resolved "https://registry.yarnpkg.com/@graphql-tools/wrap/-/wrap-7.0.8.tgz#ad41e487135ca3ea1ae0ea04bb3f596177fb4f50" + integrity sha512-1NDUymworsOlb53Qfh7fonDi2STvqCtbeE68ntKY9K/Ju/be2ZNxrFSbrBHwnxWcN9PjISNnLcAyJ1L5tCUyhg== + dependencies: + "@graphql-tools/delegate" "^7.1.5" + "@graphql-tools/schema" "^7.1.5" + "@graphql-tools/utils" "^7.8.1" + tslib "~2.2.0" + value-or-promise "1.0.6" + "@graphql-typed-document-node/core@^3.1.1": version "3.2.0" resolved "https://registry.yarnpkg.com/@graphql-typed-document-node/core/-/core-3.2.0.tgz#5f3d96ec6b2354ad6d8a28bf216a1d97b5426861" @@ -932,12 +1427,26 @@ resolved "https://registry.npmjs.org/@josephg/resolvable/-/resolvable-1.0.1.tgz" integrity sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg== -"@jridgewell/resolve-uri@^3.0.3": +"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2": + version "0.3.3" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098" + integrity sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + +"@jridgewell/resolve-uri@^3.0.3", "@jridgewell/resolve-uri@^3.1.0": version "3.1.1" resolved "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz" integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== -"@jridgewell/sourcemap-codec@^1.4.10": +"@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": version "1.4.15" resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz" integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== @@ -950,20 +1459,45 @@ "@jridgewell/resolve-uri" "^3.0.3" "@jridgewell/sourcemap-codec" "^1.4.10" -"@nodelib/fs.scandir@2.1.5": - version "2.1.5" - resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" - integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== +"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.9": + version "0.3.19" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz#f8a3249862f91be48d3127c3cfe992f79b4b8811" + integrity sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw== dependencies: - "@nodelib/fs.stat" "2.0.5" - run-parallel "^1.1.9" + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" -"@nodelib/fs.stat@2.0.5": - version "2.0.5" - resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" - integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== +"@microsoft/fetch-event-source@2.0.1": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@microsoft/fetch-event-source/-/fetch-event-source-2.0.1.tgz#9ceecc94b49fbaa15666e38ae8587f64acce007d" + integrity sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA== + +"@noble/curves@1.1.0", "@noble/curves@~1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.1.0.tgz#f13fc667c89184bc04cccb9b11e8e7bae27d8c3d" + integrity sha512-091oBExgENk/kGj3AZmtBDMpxQPDtxQABR2B9lb1JbVTs6ytdzZNwvhxQ4MWasRNEzlbEH8jCWFCwhF/Obj5AA== + dependencies: + "@noble/hashes" "1.3.1" + +"@noble/hashes@1.3.1": + version "1.3.1" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.3.1.tgz#8831ef002114670c603c458ab8b11328406953a9" + integrity sha512-EbqwksQwz9xDRGfDST86whPBgM65E0OH/pCgqW0GBVzO22bNE+NuIbeTb714+IfSjU3aRk47EUvXIb5bTsenKA== -"@nodelib/fs.stat@^2.0.2": +"@noble/hashes@~1.3.0", "@noble/hashes@~1.3.1": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.3.2.tgz#6f26dbc8fbc7205873ce3cee2f690eba0d421b39" + integrity sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ== + +"@nodelib/fs.scandir@2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" + integrity sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA== + dependencies: + "@nodelib/fs.stat" "2.0.4" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.4", "@nodelib/fs.stat@^2.0.2": version "2.0.4" resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz" integrity sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q== @@ -1140,19 +1674,19 @@ redux "^4.0.4" typescript-tuple "^2.2.1" -"@redux-saga/deferred@^1.2.1": +"@redux-saga/deferred@^1.1.2": version "1.2.1" resolved "https://registry.yarnpkg.com/@redux-saga/deferred/-/deferred-1.2.1.tgz#aca373a08ccafd6f3481037f2f7ee97f2c87c3ec" integrity sha512-cmin3IuuzMdfQjA0lG4B+jX+9HdTgHZZ+6u3jRAOwGUxy77GSlTi4Qp2d6PM1PUoTmQUR5aijlA39scWWPF31g== -"@redux-saga/delay-p@^1.2.1": +"@redux-saga/delay-p@^1.1.2": version "1.2.1" resolved "https://registry.yarnpkg.com/@redux-saga/delay-p/-/delay-p-1.2.1.tgz#e72ac4731c5080a21f75b61bedc31cb639d9e446" integrity sha512-MdiDxZdvb1m+Y0s4/hgdcAXntpUytr9g0hpcOO1XFVyyzkrDu3SKPgBFOtHn7lhu7n24ZKIAT1qtKyQjHqRd+w== dependencies: "@redux-saga/symbols" "^1.1.3" -"@redux-saga/is@^1.1.3": +"@redux-saga/is@^1.1.2": version "1.1.3" resolved "https://registry.yarnpkg.com/@redux-saga/is/-/is-1.1.3.tgz#b333f31967e87e32b4e6b02c75b78d609dd4ad73" integrity sha512-naXrkETG1jLRfVfhOx/ZdLj0EyAzHYbgJWkXbB3qFliPcHKiWbv/ULQryOAEKyjrhiclmr6AMdgsXFyx7/yE6Q== @@ -1160,12 +1694,12 @@ "@redux-saga/symbols" "^1.1.3" "@redux-saga/types" "^1.2.1" -"@redux-saga/symbols@^1.1.3": +"@redux-saga/symbols@^1.1.2", "@redux-saga/symbols@^1.1.3": version "1.1.3" resolved "https://registry.yarnpkg.com/@redux-saga/symbols/-/symbols-1.1.3.tgz#b731d56201719e96dc887dc3ae9016e761654367" integrity sha512-hCx6ZvU4QAEUojETnX8EVg4ubNLBFl1Lps4j2tX7o45x/2qg37m3c6v+kSp8xjDJY+2tJw4QB3j8o8dsl1FDXg== -"@redux-saga/types@^1.2.1": +"@redux-saga/types@^1.1.0", "@redux-saga/types@^1.2.1": version "1.2.1" resolved "https://registry.yarnpkg.com/@redux-saga/types/-/types-1.2.1.tgz#9403f51c17cae37edf870c6bc0c81c1ece5ccef8" integrity sha512-1dgmkh+3so0+LlBWRhGA33ua4MYr7tUOj+a9Si28vUi0IUFNbff1T3sgpeDJI/LaC75bBYnQ0A3wXjn0OrRNBA== @@ -1175,17 +1709,44 @@ resolved "https://registry.npmjs.org/@rescript/std/-/std-9.0.0.tgz" integrity sha512-zGzFsgtZ44mgL4Xef2gOy1hrRVdrs9mcxCOOKZrIPsmbZW14yTkaF591GXxpQvjXiHtgZ/iA9qLyWH6oSReIxQ== -"@sindresorhus/is@^4.0.0", "@sindresorhus/is@^4.6.0": +"@scure/base@~1.1.0": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.3.tgz#8584115565228290a6c6c4961973e0903bb3df2f" + integrity sha512-/+SgoRjLq7Xlf0CWuLHq2LUZeL/w65kfzAPG5NH9pcmBhs+nunQTn4gvdwgMTIXnt9b2C/1SeL2XiysZEyIC9Q== + +"@scure/bip32@1.3.1": + version "1.3.1" + resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.3.1.tgz#7248aea723667f98160f593d621c47e208ccbb10" + integrity sha512-osvveYtyzdEVbt3OfwwXFr4P2iVBL5u1Q3q4ONBfDY/UpOuXmOlbgwc1xECEboY8wIays8Yt6onaWMUdUbfl0A== + dependencies: + "@noble/curves" "~1.1.0" + "@noble/hashes" "~1.3.1" + "@scure/base" "~1.1.0" + +"@scure/bip39@1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.2.1.tgz#5cee8978656b272a917b7871c981e0541ad6ac2a" + integrity sha512-Z3/Fsz1yr904dduJD0NpiyRHhRYHdcnyh73FZWiV+/qhWi83wNJ3NWolYqCEN+ZWsUz2TWwajJggcRE9r1zUYg== + dependencies: + "@noble/hashes" "~1.3.0" + "@scure/base" "~1.1.0" + +"@sindresorhus/is@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" + integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== + +"@sindresorhus/is@^4.6.0": version "4.6.0" resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-4.6.0.tgz#3c7c9c46e678feefe7a2e5bb609d3dbd665ffb3f" integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw== -"@szmarczak/http-timer@^4.0.5": - version "4.0.6" - resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-4.0.6.tgz#b4a914bb62e7c272d4e5989fe4440f812ab1d807" - integrity sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w== +"@szmarczak/http-timer@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" + integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== dependencies: - defer-to-connect "^2.0.0" + defer-to-connect "^1.0.1" "@szmarczak/http-timer@^5.0.1": version "5.0.1" @@ -1194,19 +1755,46 @@ dependencies: defer-to-connect "^2.0.1" -"@truffle/abi-utils@^1.0.1": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@truffle/abi-utils/-/abi-utils-1.0.1.tgz#bf72d595f2eb03905429210b394f416fb774a61e" - integrity sha512-ZQUY3XUxEPdqxNaoXsOqF0spTtb6f5RNlnN4MUrVsJ64sOh0FJsY7rxZiUI3khfePmNh4i2qcJrQlKT36YcWUA== +"@truffle/abi-utils@^0.1.4": + version "0.1.6" + resolved "https://registry.yarnpkg.com/@truffle/abi-utils/-/abi-utils-0.1.6.tgz#d754a54caec2577efaa05f0ca66c58e73676884e" + integrity sha512-A9bW5XHywPNHod8rsu4x4eyM4C6k3eMeyOCd47edhiA/e9kgAVp6J3QDzKoHS8nuJ2qiaq+jk5bLnAgNWAHYyQ== + dependencies: + change-case "3.0.2" + faker "^5.3.1" + fast-check "^2.12.1" + +"@truffle/abi-utils@^0.2.1", "@truffle/abi-utils@^0.2.9": + version "0.2.17" + resolved "https://registry.yarnpkg.com/@truffle/abi-utils/-/abi-utils-0.2.17.tgz#a234ff38a12e80a1342ea3d7193e9c3caaf5cc03" + integrity sha512-Lz8bDjEYInaesdpP3ENVnDkfSkEmrV2z9BNcIEQPQnqdB5x/H+rIrWGny7LV6rX3Kz3kAerF8eGBFh7Z/dEhZw== + dependencies: + change-case "3.0.2" + faker "5.5.3" + fast-check "3.1.1" + +"@truffle/abi-utils@^1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@truffle/abi-utils/-/abi-utils-1.0.2.tgz#41210b234912051433960382af009f339f8a9642" + integrity sha512-MefEcxsBlprKIpfW7eh2I5zJqlWM18xk3duL7SW4VhIs6kNEec//hCpEDoE6P0m7GjqY3vk8X4vnf4aLlZkRcA== dependencies: change-case "3.0.2" fast-check "3.1.1" web3-utils "1.10.0" -"@truffle/blockchain-utils@^0.1.8": - version "0.1.8" - resolved "https://registry.yarnpkg.com/@truffle/blockchain-utils/-/blockchain-utils-0.1.8.tgz#0c1a369aa72f51df5af095678803242ea0a0d6ae" - integrity sha512-ZskpYDNHkXD3ota4iU3pZz6kLth87RC+wDn66Rp2Or+DqqJCKdnmS9GDctBi1EcMPDEi0BqpkdrfBuzA9uIkGg== +"@truffle/blockchain-utils@^0.0.26": + version "0.0.26" + resolved "https://registry.yarnpkg.com/@truffle/blockchain-utils/-/blockchain-utils-0.0.26.tgz#f4ea794e0a18c74d73ea10e29a506c9ed0a503ee" + integrity sha512-M91NJkfapK1RqdzVwKSSenPEE2cHzAAFwC3aPhA8Y3DznRfzOcck4mDH6eY71sytVCrGaXGm/Wirn3drGSH+qQ== + dependencies: + source-map-support "^0.5.19" + +"@truffle/code-utils@^1.2.23": + version "1.2.34" + resolved "https://registry.yarnpkg.com/@truffle/code-utils/-/code-utils-1.2.34.tgz#1e843baa7da6cd178d392cf5f1b95df4409ff7b2" + integrity sha512-Ie+PTdJIvK90voInSvn7WEdAsXd1VUw0TsX2225OMGVyYRWiQdX0K6Vfkib7RSZvdUEaURFAaHo5r57l2RacWg== + dependencies: + cbor "^5.1.0" "@truffle/code-utils@^3.0.3": version "3.0.3" @@ -1215,13 +1803,49 @@ dependencies: cbor "^5.2.0" -"@truffle/codec@^0.16.0": - version "0.16.0" - resolved "https://registry.yarnpkg.com/@truffle/codec/-/codec-0.16.0.tgz#86b98d1e20720fad7d00d4b519d9c06d96b4b079" - integrity sha512-cAX2mnkEhvcjG75JvOPvepMZmGSooOYtQpVMJIIGVp1x2B1V9aE+A5tqLEAr0idkFGCthGMvF/FfMRUQ/rWZMw== +"@truffle/codec@^0.10.7": + version "0.10.9" + resolved "https://registry.yarnpkg.com/@truffle/codec/-/codec-0.10.9.tgz#9c6f6a57b12894ad44fc37f41ddce18ebfc7b7e5" + integrity sha512-+xBcn1mTAqBhVaFULkMC+pJnUp3prL9QZtE5I4XhlCar3QLkSGR9Oy+Bm5qZwH72rctBRD/lGp2ezUo/oFc2MQ== + dependencies: + big.js "^5.2.2" + bn.js "^5.1.3" + cbor "^5.1.0" + debug "^4.3.1" + lodash.clonedeep "^4.5.0" + lodash.escaperegexp "^4.1.2" + lodash.partition "^4.6.0" + lodash.sum "^4.0.2" + semver "^7.3.4" + utf8 "^3.0.0" + web3-utils "1.3.6" + +"@truffle/codec@^0.11.19": + version "0.11.27" + resolved "https://registry.yarnpkg.com/@truffle/codec/-/codec-0.11.27.tgz#6aecbbf486cc834989c0b5b07418d02d77abcf2c" + integrity sha512-zPlbrGSZ975jscoJ4NhQpaJGwJXkasnpSoUAEjzppr6FCLKtutxssy6yfz4EUHaQDTg1SqxlVBfBhqYcrCyjvw== + dependencies: + "@truffle/abi-utils" "^0.2.9" + "@truffle/compile-common" "^0.7.28" + big.js "^5.2.2" + bn.js "^5.1.3" + cbor "^5.1.0" + debug "^4.3.1" + lodash.clonedeep "^4.5.0" + lodash.escaperegexp "^4.1.2" + lodash.partition "^4.6.0" + lodash.sum "^4.0.2" + semver "^7.3.4" + utf8 "^3.0.0" + web3-utils "1.5.3" + +"@truffle/codec@^0.17.2": + version "0.17.2" + resolved "https://registry.yarnpkg.com/@truffle/codec/-/codec-0.17.2.tgz#52a3604f73b89964373eec945f20d5cd0f4244d4" + integrity sha512-n9HX8R5a5+/j6Y0+lqSzIyU1cUxTRYn/xEWp0Qc1b0Vtltad7wvVh+KLGvbm/KQEX3o1RK1xRIUN2E0QlDeQnA== dependencies: - "@truffle/abi-utils" "^1.0.1" - "@truffle/compile-common" "^0.9.6" + "@truffle/abi-utils" "^1.0.2" + "@truffle/compile-common" "^0.9.7" big.js "^6.0.3" bn.js "^5.1.3" cbor "^5.2.0" @@ -1231,32 +1855,40 @@ utf8 "^3.0.0" web3-utils "1.10.0" -"@truffle/compile-common@^0.9.6": - version "0.9.6" - resolved "https://registry.yarnpkg.com/@truffle/compile-common/-/compile-common-0.9.6.tgz#037d74bc00ded33b9212d886531c2cee998662da" - integrity sha512-TCcmr1E0GqMZJ2tOaCRNEllxTBJ/g7TuD6jDJpw5Gt9Bw0YO3Cmp6yPQRynRSO4xMJbHUgiEsSfRgIhswut5UA== +"@truffle/compile-common@^0.7.28": + version "0.7.34" + resolved "https://registry.yarnpkg.com/@truffle/compile-common/-/compile-common-0.7.34.tgz#c5b3e31cc716af91330f6a66f4dde1dbc491a58b" + integrity sha512-NA8HuTCw6pgTpCyMd7M70Ii8AVD921R95UnXB3dwVWwEyV1OksaAsTKfdLxeLnFR4ISkK6o2NqpFb/lM3+V+9w== + dependencies: + "@truffle/error" "^0.1.1" + colors "1.4.0" + +"@truffle/compile-common@^0.9.7": + version "0.9.7" + resolved "https://registry.yarnpkg.com/@truffle/compile-common/-/compile-common-0.9.7.tgz#e8ba6cd49c4d4e7ae4684ba453fb9b2dcd09b347" + integrity sha512-TXuVLc5yJ/A0bSWw5OWIdXmcyaPpj3TJQ60ki7w9cIuW65Bazw7P4FRPaVNjR9YGe1FLYJ36GSdd9V3egPbzCg== dependencies: "@truffle/error" "^0.2.1" colors "1.4.0" -"@truffle/config@^1.3.58": - version "1.3.58" - resolved "https://registry.yarnpkg.com/@truffle/config/-/config-1.3.58.tgz#ba5b966a84990a2ea9453ef2a586891ccd29ed7f" - integrity sha512-M6e7dAx6QMMskhwpqpOE4dAj72HapcMPtw/7c6bssCZd/E1quyAs/CpiYGDIxp2EuZHxW/9X16VzIac8sIOW7w== +"@truffle/config@^1.2.35": + version "1.3.60" + resolved "https://registry.yarnpkg.com/@truffle/config/-/config-1.3.60.tgz#0299ba660cb0b4822aab013258d32d4c54c55b6f" + integrity sha512-ccpdafmky4sC9th0KgJiQsDMPqsgKLC0vgaCebBHTbOvkWHjKB5G7NDXYBd6ex3GGU3Jvs3j4sjvI2TTO+fsdw== dependencies: "@truffle/error" "^0.2.1" "@truffle/events" "^0.1.24" - "@truffle/provider" "^0.3.10" + "@truffle/provider" "^0.3.12" conf "^10.1.2" debug "^4.3.1" find-up "^2.1.0" lodash "^4.17.21" original-require "^1.0.1" -"@truffle/contract-schema@^3.4.14": - version "3.4.14" - resolved "https://registry.yarnpkg.com/@truffle/contract-schema/-/contract-schema-3.4.14.tgz#ded13d54daa7621dc9894fa7bf813f557e025b58" - integrity sha512-IwVQZG9RVNwTdn321+jbFIcky3/kZLkCtq8tqil4jZwivvmZQg8rIVC8GJ7Lkrmixl9/yTyQNL6GtIUUvkZxyA== +"@truffle/contract-schema@^3.3.4": + version "3.4.15" + resolved "https://registry.yarnpkg.com/@truffle/contract-schema/-/contract-schema-3.4.15.tgz#199789b3f0a61b0e564ee8d62d7a7e5a8e6b749f" + integrity sha512-m13e1VlXEdxiXiqv/SmPlqbdtcuhjwIGTICm+JCEO8nt0NYBbdMC2paNkpUvGz9lK139JxIupMHctEV4vgkldw== dependencies: ajv "^6.10.0" debug "^4.3.1" @@ -1301,13 +1933,6 @@ resolved "https://registry.yarnpkg.com/@truffle/dashboard-message-bus-common/-/dashboard-message-bus-common-0.1.6.tgz#53bd095d84b5913753cab7c6c78d3870b8ec2207" integrity sha512-93HNXILKeKgmW1YaWPdsQ55MJ0MaLzOA8kRXPnM5jF2H3KRlRxOeEg77R4YWtGH+cVZP4VYGXdpvUap/lOAnvw== -"@truffle/db-loader@^0.2.30": - version "0.2.30" - resolved "https://registry.yarnpkg.com/@truffle/db-loader/-/db-loader-0.2.30.tgz#25ac2bf80608edac0a19cc3a3a75923f3312ae8c" - integrity sha512-dQv8Q/JY+toSgOH5kHzVhEt1Ig6Wg0onn219qo2terL8xX5g5Ia2mM0Pg82Ba+B8ZCkCnz+kYC/htscF8d6ugw== - optionalDependencies: - "@truffle/db" "^2.0.30" - "@truffle/db@^0.5.3": version "0.5.3" resolved "https://registry.npmjs.org/@truffle/db/-/db-0.5.3.tgz" @@ -1334,63 +1959,52 @@ source-map-support "^0.5.19" web3-utils "1.2.9" -"@truffle/db@^2.0.30": - version "2.0.30" - resolved "https://registry.yarnpkg.com/@truffle/db/-/db-2.0.30.tgz#8c76babfe423b08d8d6e0cd8a5cde6bca0ecac99" - integrity sha512-vbOt7r3ybmoo3xZjyTyWe9SC98b6JcMeOpmi59Trdxwp+h3Lo9wdqdvv4595QxwDwzHvISSFTJRGKNDMtRD2rg== - dependencies: - "@graphql-tools/delegate" "^8.4.3" - "@graphql-tools/schema" "^8.3.1" - "@truffle/abi-utils" "^1.0.1" - "@truffle/code-utils" "^3.0.3" - "@truffle/config" "^1.3.58" - abstract-leveldown "^7.2.0" - apollo-server "^3.11.0" - debug "^4.3.1" - fs-extra "^9.1.0" - graphql "^15.3.0" - graphql-tag "^2.12.6" - json-stable-stringify "^1.0.1" - pascal-case "^2.0.1" - pluralize "^8.0.0" - pouchdb "7.3.0" - pouchdb-adapter-memory "^7.1.1" - pouchdb-debug "^7.1.1" - pouchdb-find "^7.0.0" - web3-utils "1.10.0" - -"@truffle/debug-utils@^6.0.53": - version "6.0.53" - resolved "https://registry.yarnpkg.com/@truffle/debug-utils/-/debug-utils-6.0.53.tgz#664a53b06096d77eba47e09e10b9127eb5ce1559" - integrity sha512-czfIjw41UA7eEX0Z660JAQkxyqjr4+B/ecDHFia/feIprLAdFz3VDuhqf/zoMnrKczqS2qbMtnknExgbbrH+eg== +"@truffle/debug-utils@^5.0.11": + version "5.1.21" + resolved "https://registry.yarnpkg.com/@truffle/debug-utils/-/debug-utils-5.1.21.tgz#2f424f87e9f19a52b45fd708ba4194e009bb5820" + integrity sha512-BcoeKWCcfmzQ0XEFIrViVSfbnJNOEtDV5ZvTe2IHtk9QlkQhtFalLkw8+n4OyXPHSyyvNKGLDVVmJSKXuA2vvQ== dependencies: - "@truffle/codec" "^0.16.0" - "@trufflesuite/chromafi" "^3.0.0" + "@truffle/codec" "^0.11.19" + "@trufflesuite/chromafi" "^2.2.2" bn.js "^5.1.3" chalk "^2.4.2" debug "^4.3.1" - highlightjs-solidity "^2.0.6" + highlightjs-solidity "^2.0.2" -"@truffle/debugger@^12.0.0": - version "12.0.0" - resolved "https://registry.yarnpkg.com/@truffle/debugger/-/debugger-12.0.0.tgz#8699af4b49307b72a113e009f4f3bd7338f6bc23" - integrity sha512-ICJPM4R2VjwWJI85r7R2TzmmL0VqZoCdPyFJ1JXYXhK/gmuRyPHb0wmSALgu4edXyyauN4ekDc2qi/lX/oU/hg== +"@truffle/debugger@^8.0.17": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@truffle/debugger/-/debugger-8.1.0.tgz#0bfd643df6bdae706664d8d1e5a94b09af178732" + integrity sha512-H0WHJGcsRsntefi66Ow6Ap83NTNOxofGVFBM5Dkg/Mm9QXyAiwHRu20Kkn/ihnOPrDpKwq5RB/1dvZiUDXXh+g== dependencies: - "@ensdomains/ensjs" "^2.1.0" - "@truffle/abi-utils" "^1.0.1" - "@truffle/codec" "^0.16.0" - "@truffle/source-map-utils" "^1.3.115" + "@truffle/abi-utils" "^0.2.1" + "@truffle/codec" "^0.10.7" + "@truffle/source-map-utils" "^1.3.41" bn.js "^5.1.3" debug "^4.3.1" - json-pointer "^0.6.1" + json-pointer "^0.6.0" json-stable-stringify "^1.0.1" - lodash "^4.17.21" + lodash.flatten "^4.4.0" + lodash.merge "^4.6.2" + lodash.sum "^4.0.2" + lodash.zipwith "^4.2.0" redux "^3.7.2" + redux-cli-logger "^2.0.1" redux-saga "1.0.0" - reselect-tree "^1.3.7" - semver "7.5.2" - web3 "1.10.0" - web3-eth-abi "1.10.0" + remote-redux-devtools "^0.5.12" + reselect-tree "^1.3.4" + semver "^7.3.4" + web3 "1.3.6" + web3-eth-abi "1.3.6" + +"@truffle/error@^0.0.12": + version "0.0.12" + resolved "https://registry.yarnpkg.com/@truffle/error/-/error-0.0.12.tgz#83e02e6ffe1d154fe274141d90038a91fd1e186d" + integrity sha512-kZqqnPR9YDJG7KCDOcN1qH16Qs0oz1PzF0Y93AWdhXuL9S9HYo/RUUeqGKbPpRBEZldQUS8aa4EzfK08u5pu6g== + +"@truffle/error@^0.1.1": + version "0.1.1" + resolved "https://registry.yarnpkg.com/@truffle/error/-/error-0.1.1.tgz#e52026ac8ca7180d83443dca73c03e07ace2a301" + integrity sha512-sE7c9IHIGdbK4YayH4BC8i8qMjoAOeg6nUXUDZZp8wlU21/EMpaG+CLx+KqcIPyR+GSWIW3Dm0PXkr2nlggFDA== "@truffle/error@^0.2.1": version "0.2.1" @@ -1423,10 +2037,19 @@ ethereumjs-wallet "^1.0.1" source-map-support "^0.5.19" -"@truffle/interface-adapter@^0.5.34": - version "0.5.34" - resolved "https://registry.yarnpkg.com/@truffle/interface-adapter/-/interface-adapter-0.5.34.tgz#a45edc23d6ace0e01ebf237b668119f456729643" - integrity sha512-gPxabfMi2TueE4VxnNuyeudOfvGJQ1ofVC02PFw14cnRQhzH327JikjjQbZ1bT6S7kWl9H6P3hQPFeYFMHdm1g== +"@truffle/interface-adapter@^0.4.19": + version "0.4.24" + resolved "https://registry.yarnpkg.com/@truffle/interface-adapter/-/interface-adapter-0.4.24.tgz#5d6d4f10c756e967f19ac2ad1620d11d25c034bb" + integrity sha512-2Zho4dJbm/XGwNleY7FdxcjXiAR3SzdGklgrAW4N/YVmltaJv6bT56ACIbPNN6AdzkTSTO65OlsB/63sfSa/VA== + dependencies: + bn.js "^5.1.3" + ethers "^4.0.32" + web3 "1.3.6" + +"@truffle/interface-adapter@^0.5.36": + version "0.5.36" + resolved "https://registry.yarnpkg.com/@truffle/interface-adapter/-/interface-adapter-0.5.36.tgz#feef9f75f32f1298c2912ec78a23a704917b3ba7" + integrity sha512-e6ECu9YDJuQjjxphdtJatHGgjlGsX/a3rQwqG2kBrK4cynGJdlejoo2iv/7Dj97wgIMIr1qH3L2WDrfTNF+K1g== dependencies: bn.js "^5.1.3" ethers "^4.0.32" @@ -1437,23 +2060,23 @@ resolved "https://registry.yarnpkg.com/@truffle/promise-tracker/-/promise-tracker-0.1.6.tgz#daecee974e8271387031f32765a414f76e727a79" integrity sha512-oUZ4Mc6Yt/qTvFZ/yD4nnUIN8pXhrBN0h4/SZ4e8W1TcHNvQkV6gUkkYkn8fZRvFwTMYjvWot+eAHHNRsSl/eA== -"@truffle/provider@^0.3.10": - version "0.3.10" - resolved "https://registry.yarnpkg.com/@truffle/provider/-/provider-0.3.10.tgz#d146e20f3a6facdfa1d7fbaa920dc1fca91ada39" - integrity sha512-oT7WKlxj1BrZBnCh9Dd4ex623yPG5ASAW5wK9kscS81MSkPYpSjld2B3tEZH9F6Lyz6lluQO1TcssuzZUek5Qg== +"@truffle/provider@^0.3.12": + version "0.3.12" + resolved "https://registry.yarnpkg.com/@truffle/provider/-/provider-0.3.12.tgz#693484fbd27b73514e82cf0bec68d7ad761c4a84" + integrity sha512-R7zS1eyY8H37ExkMxmMqa6bFN4UYLiYSCc+4g/8IjEU2lZXhKWUXA2gWzuMPT+XWfAu+IrC1PqLIHQgYe05EVA== dependencies: "@truffle/error" "^0.2.1" - "@truffle/interface-adapter" "^0.5.34" + "@truffle/interface-adapter" "^0.5.36" debug "^4.3.1" web3 "1.10.0" -"@truffle/source-map-utils@^1.3.115": - version "1.3.115" - resolved "https://registry.yarnpkg.com/@truffle/source-map-utils/-/source-map-utils-1.3.115.tgz#e5d200be6268d6ae66b453a359afcb3ec8ef80bf" - integrity sha512-NUOj45yLkMjv0x376N4ibgPh9pIp2rMKd2HxmXRbbIv3VHy18WPtgbW7+fkll/rAeTYhXzcyvwu67+dtgVholw== +"@truffle/source-map-utils@^1.3.41": + version "1.3.118" + resolved "https://registry.yarnpkg.com/@truffle/source-map-utils/-/source-map-utils-1.3.118.tgz#5f477ed44e190d29e476cf8dc64c6cb1e46b3a69" + integrity sha512-SGOW6moch7MqsjSoUf7e/y6xuCZi5M6kEtSG1aaZQJrxrj/ZuooYpShXxISyiOAB89QZ5txCRVsN2EnNYgjjkA== dependencies: "@truffle/code-utils" "^3.0.3" - "@truffle/codec" "^0.16.0" + "@truffle/codec" "^0.17.2" debug "^4.3.1" json-pointer "^0.6.1" node-interval-tree "^1.3.3" @@ -1466,26 +2089,25 @@ dependencies: "@trufflesuite/spinnies" "^0.1.1" -"@trufflesuite/bigint-buffer@1.1.10": - version "1.1.10" - resolved "https://registry.yarnpkg.com/@trufflesuite/bigint-buffer/-/bigint-buffer-1.1.10.tgz#a1d9ca22d3cad1a138b78baaf15543637a3e1692" - integrity sha512-pYIQC5EcMmID74t26GCC67946mgTJFiLXOT/BYozgrd4UEY2JHEGLhWi9cMiQCt5BSqFEvKkCHNnoj82SRjiEw== - dependencies: - node-gyp-build "4.4.0" - -"@trufflesuite/chromafi@^3.0.0": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@trufflesuite/chromafi/-/chromafi-3.0.0.tgz#f6956408c1af6a38a6ed1657783ce59504a1eb8b" - integrity sha512-oqWcOqn8nT1bwlPPfidfzS55vqcIDdpfzo3HbU9EnUmcSTX+I8z0UyUFI3tZQjByVJulbzxHxUGS3ZJPwK/GPQ== +"@trufflesuite/chromafi@^2.2.2": + version "2.2.2" + resolved "https://registry.yarnpkg.com/@trufflesuite/chromafi/-/chromafi-2.2.2.tgz#d3fc507aa8504faffc50fb892cedcfe98ff57f77" + integrity sha512-mItQwVBsb8qP/vaYHQ1kDt2vJLhjoEXJptT6y6fJGvFophMFhOI/NsTVUa0nJL1nyMeFiS6hSYuNVdpQZzB1gA== dependencies: + ansi-mark "^1.0.0" + ansi-regex "^3.0.0" + array-uniq "^1.0.3" camelcase "^4.1.0" chalk "^2.3.2" cheerio "^1.0.0-rc.2" detect-indent "^5.0.0" + he "^1.1.1" highlight.js "^10.4.1" lodash.merge "^4.6.2" + min-indent "^1.0.0" strip-ansi "^4.0.0" strip-indent "^2.0.0" + super-split "^1.1.0" "@trufflesuite/eth-json-rpc-filters@^4.1.2-1": version "4.1.2-1" @@ -1546,22 +2168,11 @@ cli-cursor "^3.1.0" strip-ansi "^6.0.0" -"@trufflesuite/uws-js-unofficial@20.10.0-unofficial.2": - version "20.10.0-unofficial.2" - resolved "https://registry.yarnpkg.com/@trufflesuite/uws-js-unofficial/-/uws-js-unofficial-20.10.0-unofficial.2.tgz#7ed613ce3260cd5d1773a4d5787a2a106acd1a91" - integrity sha512-oQQlnS3oNeGsgS4K3KCSSavJgSb0W9D5ktZs4FacX9VbM7b+NlhjH96d6/G4fMrz+bc5MXRyco419on0X0dvRA== - dependencies: - ws "8.2.3" - optionalDependencies: - bufferutil "4.0.5" - utf-8-validate "5.0.7" - -"@trufflesuite/web3-provider-engine@15.0.14": - version "15.0.14" - resolved "https://registry.yarnpkg.com/@trufflesuite/web3-provider-engine/-/web3-provider-engine-15.0.14.tgz#8f9696f434585cc0ab2e57c312090c1f138bc471" - integrity sha512-6/LoWvNMxYf0oaYzJldK2a9AdnkAdIeJhHW4nuUBAeO29eK9xezEaEYQ0ph1QRTaICxGxvn+1Azp4u8bQ8NEZw== +"@trufflesuite/web3-provider-engine@15.0.13-1": + version "15.0.13-1" + resolved "https://registry.yarnpkg.com/@trufflesuite/web3-provider-engine/-/web3-provider-engine-15.0.13-1.tgz#f6a7f7131a2fdc4ab53976318ed13ce83e8e4bcb" + integrity sha512-6u3x/iIN5fyj8pib5QTUDmIOUiwAGhaqdSTXdqCu6v9zo2BEwdCqgEJd1uXDh3DBmPRDfiZ/ge8oUPy7LerpHg== dependencies: - "@ethereumjs/tx" "^3.3.0" "@trufflesuite/eth-json-rpc-filters" "^4.1.2-1" "@trufflesuite/eth-json-rpc-infura" "^4.0.3-0" "@trufflesuite/eth-json-rpc-middleware" "^4.4.2-1" @@ -1573,6 +2184,7 @@ eth-block-tracker "^4.4.2" eth-json-rpc-errors "^2.0.2" ethereumjs-block "^1.2.2" + ethereumjs-tx "^1.2.0" ethereumjs-util "^5.1.5" ethereumjs-vm "^2.3.4" json-stable-stringify "^1.0.1" @@ -1604,14 +2216,14 @@ resolved "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.3.tgz" integrity sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ== -"@types/accepts@^1.3.5": +"@types/accepts@*", "@types/accepts@^1.3.5": version "1.3.5" resolved "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.5.tgz" integrity sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ== dependencies: "@types/node" "*" -"@types/bn.js@^4.11.3": +"@types/bn.js@^4.11.3", "@types/bn.js@^4.11.4", "@types/bn.js@^4.11.5": version "4.11.6" resolved "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz" integrity sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg== @@ -1640,15 +2252,15 @@ "@types/connect" "*" "@types/node" "*" -"@types/body-parser@1.19.2": - version "1.19.2" - resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.2.tgz#aea2059e28b7658639081347ac4fab3de166e6f0" - integrity sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g== +"@types/body-parser@1.19.0": + version "1.19.0" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.0.tgz#0685b3c47eb3006ffed117cdd55164b61f80538f" + integrity sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ== dependencies: "@types/connect" "*" "@types/node" "*" -"@types/cacheable-request@^6.0.1", "@types/cacheable-request@^6.0.2": +"@types/cacheable-request@^6.0.2": version "6.0.3" resolved "https://registry.yarnpkg.com/@types/cacheable-request/-/cacheable-request-6.0.3.tgz#a430b3260466ca7b5ca5bfd735693b36e7a9d183" integrity sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw== @@ -1679,36 +2291,43 @@ dependencies: "@types/node" "*" -"@types/cors@2.8.12": - version "2.8.12" - resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.12.tgz#6b2c510a7ad7039e98e7b8d3d6598f4359e5c080" - integrity sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw== +"@types/content-disposition@*": + version "0.5.5" + resolved "https://registry.yarnpkg.com/@types/content-disposition/-/content-disposition-0.5.5.tgz#650820e95de346e1f84e30667d168c8fd25aa6e3" + integrity sha512-v6LCdKfK6BwcqMo+wYW05rLS12S0ZO0Fl4w1h4aaZMD7bqT3gVUns6FvLJKGZHQmYn3SX55JWGpziwJRwVgutA== -"@types/express-serve-static-core@4.17.31": - version "4.17.31" - resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.31.tgz#a1139efeab4e7323834bb0226e62ac019f474b2f" - integrity sha512-DxMhY+NAsTwMMFHBTtJFNp5qiHKJ7TeqOo23zVEM9alT1Ml27Q3xcTH0xwxn7Q0BbMcVEJOs/7aQtUWupUQN3Q== +"@types/cookies@*": + version "0.7.7" + resolved "https://registry.yarnpkg.com/@types/cookies/-/cookies-0.7.7.tgz#7a92453d1d16389c05a5301eef566f34946cfd81" + integrity sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA== dependencies: + "@types/connect" "*" + "@types/express" "*" + "@types/keygrip" "*" "@types/node" "*" - "@types/qs" "*" - "@types/range-parser" "*" -"@types/express-serve-static-core@^4.17.18": - version "4.17.24" - resolved "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.24.tgz" - integrity sha512-3UJuW+Qxhzwjq3xhwXm2onQcFHn76frIYVbTu+kn24LFxI+dEhdfISDFovPB8VpEgW8oQCTpRuCe+0zJxB7NEA== +"@types/cors@2.8.10": + version "2.8.10" + resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.10.tgz#61cc8469849e5bcdd0c7044122265c39cec10cf4" + integrity sha512-C7srjHiVG3Ey1nR6d511dtDkCEjxuN9W1HWAEjGq8kpcwmNM6JJkpC0xvabM7BXTG2wDq8Eu33iH9aQKa7IvLQ== + +"@types/express-serve-static-core@^4.17.21", "@types/express-serve-static-core@^4.17.33": + version "4.17.36" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.36.tgz#baa9022119bdc05a4adfe740ffc97b5f9360e545" + integrity sha512-zbivROJ0ZqLAtMzgzIUC4oNqDG9iF0lSsAqpOD9kbs5xcIM3dTiyuHvBc7R8MtWBp3AAWGaovJa+wzWPjLYW7Q== dependencies: "@types/node" "*" "@types/qs" "*" "@types/range-parser" "*" + "@types/send" "*" -"@types/express@4.17.14": - version "4.17.14" - resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.14.tgz#143ea0557249bc1b3b54f15db4c81c3d4eb3569c" - integrity sha512-TEbt+vaPFQ+xpxFLFssxUDXj5cWCxZJjIcB7Yg0k0GMHGtgtQgpvx/MUQUeAkNbA9AAGrwkAsoeItdTgS7FMyg== +"@types/express@*", "@types/express@^4.17.12": + version "4.17.17" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.17.tgz#01d5437f6ef9cfa8668e616e13c2f2ac9a491ae4" + integrity sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q== dependencies: "@types/body-parser" "*" - "@types/express-serve-static-core" "^4.17.18" + "@types/express-serve-static-core" "^4.17.33" "@types/qs" "*" "@types/serve-static" "*" @@ -1719,11 +2338,33 @@ dependencies: "@types/node" "*" +"@types/fs-capacitor@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@types/fs-capacitor/-/fs-capacitor-2.0.0.tgz#17113e25817f584f58100fb7a08eed288b81956e" + integrity sha512-FKVPOCFbhCvZxpVAMhdBdTfVfXUpsh15wFHgqOKxh9N9vzWZVuWCSijZ5T4U34XYNnuj2oduh6xcs1i+LPI+BQ== + dependencies: + "@types/node" "*" + +"@types/http-assert@*": + version "1.5.3" + resolved "https://registry.yarnpkg.com/@types/http-assert/-/http-assert-1.5.3.tgz#ef8e3d1a8d46c387f04ab0f2e8ab8cb0c5078661" + integrity sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA== + "@types/http-cache-semantics@*": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.1.tgz#0ea7b61496902b95890dc4c3a116b60cb8dae812" integrity sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ== +"@types/http-errors@*": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@types/http-errors/-/http-errors-2.0.1.tgz#20172f9578b225f6c7da63446f56d4ce108d5a65" + integrity sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ== + +"@types/keygrip@*": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@types/keygrip/-/keygrip-1.0.2.tgz#513abfd256d7ad0bf1ee1873606317b33b1b2a72" + integrity sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw== + "@types/keyv@^3.1.4": version "3.1.4" resolved "https://registry.yarnpkg.com/@types/keyv/-/keyv-3.1.4.tgz#3ccdb1c6751b0c7e52300bcdacd5bcbf8faa75b6" @@ -1731,6 +2372,27 @@ dependencies: "@types/node" "*" +"@types/koa-compose@*": + version "3.2.5" + resolved "https://registry.yarnpkg.com/@types/koa-compose/-/koa-compose-3.2.5.tgz#85eb2e80ac50be95f37ccf8c407c09bbe3468e9d" + integrity sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ== + dependencies: + "@types/koa" "*" + +"@types/koa@*": + version "2.13.8" + resolved "https://registry.yarnpkg.com/@types/koa/-/koa-2.13.8.tgz#4302d2f2712348aadb6c0b03eb614f30afde486b" + integrity sha512-Ugmxmgk/yPRW3ptBTh9VjOLwsKWJuGbymo1uGX0qdaqqL18uJiiG1ZoV0rxCOYSaDGhvEp5Ece02Amx0iwaxQQ== + dependencies: + "@types/accepts" "*" + "@types/content-disposition" "*" + "@types/cookies" "*" + "@types/http-assert" "*" + "@types/http-errors" "*" + "@types/keygrip" "*" + "@types/koa-compose" "*" + "@types/node" "*" + "@types/long@^4.0.0": version "4.0.1" resolved "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz" @@ -1741,11 +2403,6 @@ resolved "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz" integrity sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA== -"@types/lru-cache@5.1.1": - version "5.1.1" - resolved "https://registry.yarnpkg.com/@types/lru-cache/-/lru-cache-5.1.1.tgz#c48c2e27b65d2a153b19bfc1a317e30872e01eef" - integrity sha512-ssE3Vlrys7sdIzs5LOxCzTVMsU7i9oa/IaW92wF32JFb3CVczqOkru2xspuKczHEbG3nvmPY7IFqVmGGHdNbYw== - "@types/mime@^1": version "1.3.2" resolved "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz" @@ -1766,12 +2423,12 @@ resolved "https://registry.npmjs.org/@types/node/-/node-18.16.3.tgz" integrity sha512-OPs5WnnT1xkCBiuQrZA4+YAV4HEJejmHneyraIaxsbev5yCEr6KMwINNFP9wQeFIw8FWcoTqF3vQsa5CDaI+8Q== -"@types/node@^10.0.3", "@types/node@^10.1.0": +"@types/node@^10.0.3", "@types/node@^10.1.0", "@types/node@^10.12.18": version "10.17.60" resolved "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz" integrity sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw== -"@types/node@^12.12.54": +"@types/node@^12.12.54", "@types/node@^12.6.1": version "12.20.55" resolved "https://registry.npmjs.org/@types/node/-/node-12.20.55.tgz" integrity sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ== @@ -1822,10 +2479,13 @@ dependencies: "@types/node" "*" -"@types/seedrandom@3.0.1": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@types/seedrandom/-/seedrandom-3.0.1.tgz#1254750a4fec4aff2ebec088ccd0bb02e91fedb4" - integrity sha512-giB9gzDeiCeloIXDgzFBCgjj1k4WxcDrZtGl6h1IqmUPlxF+Nx8Ve+96QCyDZ/HseB/uvDsKbpib9hU5cU53pw== +"@types/send@*": + version "0.17.1" + resolved "https://registry.yarnpkg.com/@types/send/-/send-0.17.1.tgz#ed4932b8a2a805f1fe362a70f4e62d0ac994e301" + integrity sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q== + dependencies: + "@types/mime" "^1" + "@types/node" "*" "@types/serve-static@*": version "1.13.10" @@ -1835,7 +2495,14 @@ "@types/mime" "^1" "@types/node" "*" -"@types/ws@^7.4.4": +"@types/websocket@1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@types/websocket/-/websocket-1.0.2.tgz#d2855c6a312b7da73ed16ba6781815bf30c6187a" + integrity sha512-B5m9aq7cbbD/5/jThEr33nUY8WEfVi6A2YKCTOvw5Ldy7mtsOkqRvGjnzy6g7iMMDsgu7xREuCzqATLDLQVKcQ== + dependencies: + "@types/node" "*" + +"@types/ws@^7.0.0", "@types/ws@^7.4.4": version "7.4.7" resolved "https://registry.npmjs.org/@types/ws/-/ws-7.4.7.tgz" integrity sha512-JQbbmxZTZehdc2iszGKs5oC3NFnjeay7mtAWrdt7qNtAVK0g19muApzAy4bm9byz79xa2ZnO/BOBC2R8RC5Lww== @@ -1869,6 +2536,13 @@ fast-url-parser "^1.1.3" tslib "^2.3.1" +"@wry/context@^0.7.0", "@wry/context@^0.7.3": + version "0.7.3" + resolved "https://registry.yarnpkg.com/@wry/context/-/context-0.7.3.tgz#240f6dfd4db5ef54f81f6597f6714e58d4f476a1" + integrity sha512-Nl8WTesHp89RF803Se9X3IiHjdmLBrIvPMaJkl+rKVJAYyPsz1TEUbu89943HpvujtSJgDUx9W4vZw3K1Mr3sA== + dependencies: + tslib "^2.3.0" + "@wry/equality@^0.1.2": version "0.1.11" resolved "https://registry.npmjs.org/@wry/equality/-/equality-0.1.11.tgz" @@ -1876,6 +2550,20 @@ dependencies: tslib "^1.9.3" +"@wry/equality@^0.5.6": + version "0.5.6" + resolved "https://registry.yarnpkg.com/@wry/equality/-/equality-0.5.6.tgz#cd4a533c72c3752993ab8cbf682d3d20e3cb601e" + integrity sha512-D46sfMTngaYlrH+OspKf8mIJETntFnf6Hsjb0V41jAXJ7Bx2kB8Rv8RCUujuVWYttFtHkUNp7g+FwxNQAr6mXA== + dependencies: + tslib "^2.3.0" + +"@wry/trie@^0.4.3": + version "0.4.3" + resolved "https://registry.yarnpkg.com/@wry/trie/-/trie-0.4.3.tgz#077d52c22365871bf3ffcbab8e95cb8bc5689af4" + integrity sha512-I6bHwH0fSf6RqQcnnXLJKhkSXG45MFral3GxPaY4uAl0LYDZM+YDVDAiU9bYwjTuysy1S0IeecWtmq1SZA3M1w== + dependencies: + tslib "^2.3.0" + JSONStream@1.3.2: version "1.3.2" resolved "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.2.tgz" @@ -1892,6 +2580,11 @@ JSONStream@^1.3.5: jsonparse "^1.2.0" through ">=2.2.7 <3" +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + abort-controller@3.0.0, abort-controller@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz" @@ -1899,36 +2592,11 @@ abort-controller@3.0.0, abort-controller@^3.0.0: dependencies: event-target-shim "^5.0.0" -abortcontroller-polyfill@^1.7.3: +abortcontroller-polyfill@^1.7.3, abortcontroller-polyfill@^1.7.5: version "1.7.5" resolved "https://registry.yarnpkg.com/abortcontroller-polyfill/-/abortcontroller-polyfill-1.7.5.tgz#6738495f4e901fbb57b6c0611d0c75f76c485bed" integrity sha512-JMJ5soJWP18htbbxJjG7bG6yuI6pRhgJ0scHHTfkUjf6wjP912xZWvM+A4sJK3gqd9E8fcPbDnOefbA9Th/FIQ== -abstract-level@1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/abstract-level/-/abstract-level-1.0.3.tgz#78a67d3d84da55ee15201486ab44c09560070741" - integrity sha512-t6jv+xHy+VYwc4xqZMn2Pa9DjcdzvzZmQGRjTFc8spIbRGHgBrEKbPq+rYXc7CCo0lxgYvSgKVg9qZAhpVQSjA== - dependencies: - buffer "^6.0.3" - catering "^2.1.0" - is-buffer "^2.0.5" - level-supports "^4.0.0" - level-transcoder "^1.0.1" - module-error "^1.0.1" - queue-microtask "^1.2.3" - -abstract-leveldown@7.2.0, abstract-leveldown@^7.2.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-7.2.0.tgz#08d19d4e26fb5be426f7a57004851b39e1795a2e" - integrity sha512-DnhQwcFEaYsvYDnACLZhMmCWd3rkOeEvglpa4q5i/5Jlm3UIsWaxVzuXvDLFCSCWRO3yy2/+V/G7FusFgejnfQ== - dependencies: - buffer "^6.0.3" - catering "^2.0.0" - is-buffer "^2.0.5" - level-concat-iterator "^3.0.0" - level-supports "^2.0.1" - queue-microtask "^1.2.3" - abstract-leveldown@^6.2.1: version "6.3.0" resolved "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.3.0.tgz" @@ -1954,7 +2622,15 @@ abstract-leveldown@~2.7.1: dependencies: xtend "~4.0.0" -abstract-leveldown@~6.2.1, abstract-leveldown@~6.2.3: +abstract-leveldown@~6.0.0, abstract-leveldown@~6.0.1: + version "6.0.3" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-6.0.3.tgz#b4b6159343c74b0c5197b2817854782d8f748c4a" + integrity sha512-jzewKKpZbaYUa6HTThnrl+GrJhzjEAeuc7hTVpZdzg7kupXZFoqQDFwyOwLNbmJKJlmzw8yiipMPkDiuKkT06Q== + dependencies: + level-concat-iterator "~2.0.0" + xtend "~4.0.0" + +abstract-leveldown@~6.2.1: version "6.2.3" resolved "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.2.3.tgz" integrity sha512-BsLm5vFMRUrrLeCcRc+G0t2qOaTzpoJQLOubq2XM72eNpjF5UdU5o/5NvlNhx95XHcAvcl8OMXr4mlg/fRgUXQ== @@ -1988,9 +2664,9 @@ aes-js@3.0.0: resolved "https://registry.npmjs.org/aes-js/-/aes-js-3.0.0.tgz" integrity sha1-4h3xCtbCBTKVvLuNq0Cwnb6ofk0= -aes-js@^3.1.2: +aes-js@^3.1.1: version "3.1.2" - resolved "https://registry.npmjs.org/aes-js/-/aes-js-3.1.2.tgz" + resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.1.2.tgz#db9aabde85d5caabbfc0d4f2a4446960f627146a" integrity sha512-e5pEa2kBnBOgR4Y/p20pskXI74UEz7de8ZGVo58asOtvSVG5YAbJeELPZxOmt+Bnz3rX753YKhfIn4X4l1PPRQ== ajv-formats@^2.1.1: @@ -2047,6 +2723,17 @@ ansi-escapes@^4.3.2: dependencies: type-fest "^0.21.3" +ansi-mark@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/ansi-mark/-/ansi-mark-1.0.4.tgz#1cd4ba8d57f15f109d6aaf6ec9ca9786c8a4ee6c" + integrity sha512-3UG9PiYp/eyLuOojJbcQfiH0s8VmHV4M/K9e24q5EBD8PDoDy0Ii4y/VYnTgF+0hWjm8WXz4ybmX6kftbeelFw== + dependencies: + ansi-regex "^3.0.0" + array-uniq "^1.0.3" + chalk "^2.3.2" + strip-ansi "^4.0.0" + super-split "^1.1.0" + ansi-regex@^2.0.0: version "2.1.1" resolved "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz" @@ -2072,7 +2759,7 @@ ansi-styles@^2.2.1: resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz" integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= -ansi-styles@^3.2.1: +ansi-styles@^3.2.0, ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz" integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== @@ -2091,6 +2778,11 @@ ansicolors@~0.3.2: resolved "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz" integrity sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg== +any-promise@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" + integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A== + any-signal@^2.1.2: version "2.1.2" resolved "https://registry.npmjs.org/any-signal/-/any-signal-2.1.2.tgz" @@ -2104,7 +2796,7 @@ any-signal@^3.0.0: resolved "https://registry.npmjs.org/any-signal/-/any-signal-3.0.1.tgz" integrity sha512-xgZgJtKEa9YmDqXodIgl7Fl1C8yNXr8w6gXjqK3LW4GcEiYT+6AQfJSE/8SPsEpLLmcvbv8YU+qet94UewHxqg== -anymatch@~3.1.2: +anymatch@~3.1.1, anymatch@~3.1.2: version "3.1.3" resolved "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz" integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== @@ -2112,20 +2804,28 @@ anymatch@~3.1.2: normalize-path "^3.0.0" picomatch "^2.0.4" -apisauce@^2.1.5: +apisauce@^2.0.1, apisauce@^2.1.5: version "2.1.6" resolved "https://registry.npmjs.org/apisauce/-/apisauce-2.1.6.tgz" integrity sha512-MdxR391op/FucS2YQRfB/NMRyCnHEPDd4h17LRIuVYi0BpGmMhpxc0shbOpfs5ahABuBEffNCGal5EcsydbBWg== dependencies: axios "^0.21.4" -apollo-datasource@^3.3.2: - version "3.3.2" - resolved "https://registry.yarnpkg.com/apollo-datasource/-/apollo-datasource-3.3.2.tgz#5711f8b38d4b7b53fb788cb4dbd4a6a526ea74c8" - integrity sha512-L5TiS8E2Hn/Yz7SSnWIVbZw0ZfEIXZCa5VUiVxD9P53JvSrf4aStvsFDlGWPvpIdCR+aly2CfoB79B9/JjKFqg== +apollo-cache-control@^0.15.0: + version "0.15.0" + resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.15.0.tgz#45897ed318b883bf964960e6fcd7516a50b4e4d4" + integrity sha512-U2uYvHZsWmR6s6CD5zlq3PepfbUAM8953CeVM2Y2QYMtJ8i4CYplEPbIWb3zTIXSPbIPeWGddM56pChI6Iz3zA== + dependencies: + apollo-server-env "^3.2.0" + apollo-server-plugin-base "^0.14.0" + +apollo-datasource@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/apollo-datasource/-/apollo-datasource-0.10.0.tgz#5450fc88f23f73b36ba04b4f7a6d00ef2f5364a9" + integrity sha512-wrLhuoM2MtA0KA0+3qyioe0H2FjAxjTvuFOlNCk6WberA887m0MQlWULZImCWTkKuN+zEAMerHfxN+F+W8+lBA== dependencies: - "@apollo/utils.keyvaluecache" "^1.0.1" - apollo-server-env "^4.2.1" + apollo-server-caching "^0.7.0" + apollo-server-env "^3.2.0" apollo-fetch@^0.7.0: version "0.7.0" @@ -2134,7 +2834,16 @@ apollo-fetch@^0.7.0: dependencies: cross-fetch "^1.0.0" -apollo-link@^1.2.14: +apollo-graphql@^0.9.0: + version "0.9.7" + resolved "https://registry.yarnpkg.com/apollo-graphql/-/apollo-graphql-0.9.7.tgz#33185093b497a578f2df61ab8ecc6447d700ae64" + integrity sha512-bezL9ItUWUGHTm1bI/XzIgiiZbhXpsC7uxk4UxFPmcVJwJsDc3ayZ99oXxAaK+3Rbg/IoqrHckA6CwmkCsbaSA== + dependencies: + core-js-pure "^3.10.2" + lodash.sortby "^4.7.0" + sha.js "^2.4.11" + +apollo-link@1.2.14, apollo-link@^1.2.14: version "1.2.14" resolved "https://registry.npmjs.org/apollo-link/-/apollo-link-1.2.14.tgz" integrity sha512-p67CMEFP7kOG1JZ0ZkYZwRDa369w5PIjtMjvrQd/HnIV8FRsHRqLqK+oAZQnFa1DDdZtOtHTi+aMIW6EatC2jg== @@ -2144,97 +2853,131 @@ apollo-link@^1.2.14: tslib "^1.9.3" zen-observable-ts "^0.8.21" -apollo-reporting-protobuf@^3.4.0: - version "3.4.0" - resolved "https://registry.yarnpkg.com/apollo-reporting-protobuf/-/apollo-reporting-protobuf-3.4.0.tgz#6edd31f09d4a3704d9e808d1db30eca2229ded26" - integrity sha512-h0u3EbC/9RpihWOmcSsvTW2O6RXVaD/mPEjfrPkxRPTEPWqncsgOoRJw+wih4OqfH3PvTJvoEIf4LwKrUaqWog== +apollo-reporting-protobuf@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/apollo-reporting-protobuf/-/apollo-reporting-protobuf-0.8.0.tgz#ae9d967934d3d8ed816fc85a0d8068ef45c371b9" + integrity sha512-B3XmnkH6Y458iV6OsA7AhfwvTgeZnFq9nPVjbxmLKnvfkEl8hYADtz724uPa0WeBiD7DSFcnLtqg9yGmCkBohg== dependencies: - "@apollo/protobufjs" "1.2.6" + "@apollo/protobufjs" "1.2.2" -apollo-server-core@^3.12.0: - version "3.12.0" - resolved "https://registry.yarnpkg.com/apollo-server-core/-/apollo-server-core-3.12.0.tgz#8aa2a7329ce6fe1823290c45168c749db01548df" - integrity sha512-hq7iH6Cgldgmnjs9FVSZeKWRpi0/ZR+iJ1arzeD2VXGxxgk1mAm/cz1Tx0TYgegZI+FvvrRl0UhKEx7sLnIxIg== - dependencies: - "@apollo/utils.keyvaluecache" "^1.0.1" - "@apollo/utils.logger" "^1.0.0" - "@apollo/utils.usagereporting" "^1.0.0" - "@apollographql/apollo-tools" "^0.5.3" - "@apollographql/graphql-playground-html" "1.6.29" - "@graphql-tools/mock" "^8.1.2" - "@graphql-tools/schema" "^8.0.0" +apollo-server-caching@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/apollo-server-caching/-/apollo-server-caching-0.7.0.tgz#e6d1e68e3bb571cba63a61f60b434fb771c6ff39" + integrity sha512-MsVCuf/2FxuTFVhGLK13B+TZH9tBd2qkyoXKKILIiGcZ5CDUEBO14vIV63aNkMkS1xxvK2U4wBcuuNj/VH2Mkw== + dependencies: + lru-cache "^6.0.0" + +apollo-server-core@^2.26.2: + version "2.26.2" + resolved "https://registry.yarnpkg.com/apollo-server-core/-/apollo-server-core-2.26.2.tgz#5391bb93654194a5d6b83cf1855f229c94d5b3b1" + integrity sha512-r8jOhf1jElaxsNsALFMy/MLiJCqSa1ZiwxkerVYbsEkyWrpD1Khy0extDkTBrfa6uK8CatX7xK9U413bYNhJFA== + dependencies: + "@apollographql/apollo-tools" "^0.5.0" + "@apollographql/graphql-playground-html" "1.6.27" + "@apollographql/graphql-upload-8-fork" "^8.1.4" "@josephg/resolvable" "^1.0.0" - apollo-datasource "^3.3.2" - apollo-reporting-protobuf "^3.4.0" - apollo-server-env "^4.2.1" - apollo-server-errors "^3.3.1" - apollo-server-plugin-base "^3.7.2" - apollo-server-types "^3.8.0" + "@types/ws" "^7.0.0" + apollo-cache-control "^0.15.0" + apollo-datasource "^0.10.0" + apollo-graphql "^0.9.0" + apollo-reporting-protobuf "^0.8.0" + apollo-server-caching "^0.7.0" + apollo-server-env "^3.2.0" + apollo-server-errors "^2.5.0" + apollo-server-plugin-base "^0.14.0" + apollo-server-types "^0.10.0" + apollo-tracing "^0.16.0" async-retry "^1.2.1" - fast-json-stable-stringify "^2.1.0" + fast-json-stable-stringify "^2.0.0" + graphql-extensions "^0.16.0" graphql-tag "^2.11.0" - loglevel "^1.6.8" + graphql-tools "^4.0.8" + loglevel "^1.6.7" lru-cache "^6.0.0" - node-abort-controller "^3.0.1" sha.js "^2.4.11" - uuid "^9.0.0" - whatwg-mimetype "^3.0.0" + subscriptions-transport-ws "^0.9.19" + uuid "^8.0.0" -apollo-server-env@^4.2.1: - version "4.2.1" - resolved "https://registry.yarnpkg.com/apollo-server-env/-/apollo-server-env-4.2.1.tgz#ea5b1944accdbdba311f179e4dfaeca482c20185" - integrity sha512-vm/7c7ld+zFMxibzqZ7SSa5tBENc4B0uye9LTfjJwGoQFY5xsUPH5FpO5j0bMUDZ8YYNbrF9SNtzc5Cngcr90g== +apollo-server-env@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/apollo-server-env/-/apollo-server-env-3.2.0.tgz#0572e307da4784c5d7633a0ade1f45e231da28e3" + integrity sha512-V+kO5e6vUo2JwqV1/Ng71ZE3J6x1hCOC+nID2/++bCYl0/fPY9iLChbBNSgN/uoFcjhgmBchOv+m4o0Nie/TFQ== dependencies: - node-fetch "^2.6.7" + node-fetch "^2.6.1" + util.promisify "^1.0.0" -apollo-server-errors@^3.3.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/apollo-server-errors/-/apollo-server-errors-3.3.1.tgz#ba5c00cdaa33d4cbd09779f8cb6f47475d1cd655" - integrity sha512-xnZJ5QWs6FixHICXHxUfm+ZWqqxrNuPlQ+kj5m6RtEgIpekOPssH/SD9gf2B4HuWV0QozorrygwZnux8POvyPA== +apollo-server-errors@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/apollo-server-errors/-/apollo-server-errors-2.5.0.tgz#5d1024117c7496a2979e3e34908b5685fe112b68" + integrity sha512-lO5oTjgiC3vlVg2RKr3RiXIIQ5pGXBFxYGGUkKDhTud3jMIhs+gel8L8zsEjKaKxkjHhCQAA/bcEfYiKkGQIvA== -apollo-server-express@^3.12.0: - version "3.12.0" - resolved "https://registry.yarnpkg.com/apollo-server-express/-/apollo-server-express-3.12.0.tgz#a6e392bb0427544b8c7e5d841ef07f7691b0c105" - integrity sha512-m8FaGPUfDOEGSm7QRWRmUUGjG/vqvpQoorkId9/FXkC57fz/A59kEdrzkMt9538Xgsa5AV+X4MEWLJhTvlW3LQ== +apollo-server-express@^2.26.2: + version "2.26.2" + resolved "https://registry.yarnpkg.com/apollo-server-express/-/apollo-server-express-2.26.2.tgz#e1df5cc88e3214b4301e754f1f03addfeb0b33ce" + integrity sha512-8KaDwc6/DMK6e5KmP4AGH/NNY7OhEOFxusz3JZ/Du08a+PN8c/JmaEAwQ0aTNpySb5PWpv6xeXRPPwNfaPK9IQ== dependencies: + "@apollographql/graphql-playground-html" "1.6.27" "@types/accepts" "^1.3.5" - "@types/body-parser" "1.19.2" - "@types/cors" "2.8.12" - "@types/express" "4.17.14" - "@types/express-serve-static-core" "4.17.31" + "@types/body-parser" "1.19.0" + "@types/cors" "2.8.10" + "@types/express" "^4.17.12" + "@types/express-serve-static-core" "^4.17.21" accepts "^1.3.5" - apollo-server-core "^3.12.0" - apollo-server-types "^3.8.0" - body-parser "^1.19.0" + apollo-server-core "^2.26.2" + apollo-server-types "^0.10.0" + body-parser "^1.18.3" cors "^2.8.5" - parseurl "^1.3.3" + express "^4.17.1" + graphql-subscriptions "^1.0.0" + graphql-tools "^4.0.8" + parseurl "^1.3.2" + subscriptions-transport-ws "^0.9.19" + type-is "^1.6.16" -apollo-server-plugin-base@^3.7.2: - version "3.7.2" - resolved "https://registry.yarnpkg.com/apollo-server-plugin-base/-/apollo-server-plugin-base-3.7.2.tgz#c19cd137bc4c993ba2490ba2b571b0f3ce60a0cd" - integrity sha512-wE8dwGDvBOGehSsPTRZ8P/33Jan6/PmL0y0aN/1Z5a5GcbFhDaaJCjK5cav6npbbGL2DPKK0r6MPXi3k3N45aw== +apollo-server-plugin-base@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/apollo-server-plugin-base/-/apollo-server-plugin-base-0.14.0.tgz#f59b6ab7780304162d2e4fd9ee29ed0696b174ef" + integrity sha512-nTNSFuBhZURGjtWptdVqwemYUOdsvABj/GSKzeNvepiEubiv4N0rt4Gvy1inHDiMbo98wQTdF/7XohNcB9A77g== dependencies: - apollo-server-types "^3.8.0" + apollo-server-types "^0.10.0" -apollo-server-types@^3.8.0: - version "3.8.0" - resolved "https://registry.yarnpkg.com/apollo-server-types/-/apollo-server-types-3.8.0.tgz#d976b6967878681f715fe2b9e4dad9ba86b1346f" - integrity sha512-ZI/8rTE4ww8BHktsVpb91Sdq7Cb71rdSkXELSwdSR0eXu600/sY+1UXhTWdiJvk+Eq5ljqoHLwLbY2+Clq2b9A== +apollo-server-types@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/apollo-server-types/-/apollo-server-types-0.10.0.tgz#af578bf507151a0e86fbdf188f9673ece3f8f164" + integrity sha512-LsB3epw1X3Co/HGiKHCGtzWG35J59gG8Ypx0p22+wgdM9AVDm1ylsNGZy+osNIVJc1lUJf3nF5kZ90vA866K/w== + dependencies: + apollo-reporting-protobuf "^0.8.0" + apollo-server-caching "^0.7.0" + apollo-server-env "^3.2.0" + +apollo-server@^2.18.2: + version "2.26.2" + resolved "https://registry.yarnpkg.com/apollo-server/-/apollo-server-2.26.2.tgz#30a9b1737e9899289bc8e9bc55b5897a48bd83cf" + integrity sha512-VgEvOSqcGgMofUVji97nsHnUCXPaMg9efBbM3xI/HbTn3baKQHoRjqfnR4xZeEd9DkhgxwrmSZHBjWCjGOYExg== + dependencies: + apollo-server-core "^2.26.2" + apollo-server-express "^2.26.2" + express "^4.0.0" + graphql-subscriptions "^1.0.0" + graphql-tools "^4.0.8" + stoppable "^1.1.0" + +apollo-tracing@^0.16.0: + version "0.16.0" + resolved "https://registry.yarnpkg.com/apollo-tracing/-/apollo-tracing-0.16.0.tgz#8542ca40ae4a3f84f899e749631b65833557ceb1" + integrity sha512-Oy8kTggB+fJ/hHXwHyMpuTl5KW7u1XetKFDErZVOobUKc2zjc/NgWiC/s7SGYZCgfLodBjvwfa6rMcvLkz7c0w== dependencies: - "@apollo/utils.keyvaluecache" "^1.0.1" - "@apollo/utils.logger" "^1.0.0" - apollo-reporting-protobuf "^3.4.0" - apollo-server-env "^4.2.1" + apollo-server-env "^3.2.0" + apollo-server-plugin-base "^0.14.0" -apollo-server@^3.11.0: - version "3.12.0" - resolved "https://registry.yarnpkg.com/apollo-server/-/apollo-server-3.12.0.tgz#bd43d6dadb8cb16ed34edfc6ad97a81b1fab748a" - integrity sha512-wZHLgBoIdGxr/YpPTG5RwNnS+B2y70T/nCegCnU6Yl+H3PXB92OIguLMhdJIZVjukIOhiQT12dNIehqLQ+1hMQ== +apollo-upload-client@14.1.2: + version "14.1.2" + resolved "https://registry.yarnpkg.com/apollo-upload-client/-/apollo-upload-client-14.1.2.tgz#7a72b000f1cd67eaf8f12b4bda2796d0898c0dae" + integrity sha512-ozaW+4tnVz1rpfwiQwG3RCdCcZ93RV/37ZQbRnObcQ9mjb+zur58sGDPVg9Ef3fiujLmiE/Fe9kdgvIMA3VOjA== dependencies: - "@types/express" "4.17.14" - apollo-server-core "^3.12.0" - apollo-server-express "^3.12.0" - express "^4.17.1" + "@apollo/client" "^3.1.5" + "@babel/runtime" "^7.11.2" + extract-files "^9.0.0" apollo-utilities@^1.0.1, apollo-utilities@^1.3.0: version "1.3.4" @@ -2251,6 +2994,19 @@ app-module-path@^2.2.0: resolved "https://registry.npmjs.org/app-module-path/-/app-module-path-2.2.0.tgz" integrity sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ== +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== + +are-we-there-yet@~1.1.2: + version "1.1.7" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.7.tgz#b15474a932adab4ff8a50d9adfa7e4e926f21146" + integrity sha512-nxwy40TuMiUGqMyRHgCSWZ9FM4VAoRP4xUYSTv5ImRog+h9yISPbVH7H8fASCIzYn9wlEv4zvFL7uKDMCFQm3g== + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + arg@^4.1.0: version "4.1.3" resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz" @@ -2268,11 +3024,19 @@ argparse@^2.0.1: resolved "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== -argsarray@0.0.1: +argsarray@0.0.1, argsarray@^0.0.1: version "0.0.1" resolved "https://registry.npmjs.org/argsarray/-/argsarray-0.0.1.tgz" integrity sha1-bnIHtOzbObCviDA/pa4ivajfYcs= +array-buffer-byte-length@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz#fabe8bc193fea865f317fe7807085ee0dee5aead" + integrity sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A== + dependencies: + call-bind "^1.0.2" + is-array-buffer "^3.0.1" + array-filter@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/array-filter/-/array-filter-1.0.0.tgz" @@ -2288,11 +3052,60 @@ array-union@^2.1.0: resolved "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz" integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== -asap@~2.0.6: +array-uniq@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q== + +array.prototype.map@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/array.prototype.map/-/array.prototype.map-1.0.5.tgz#6e43c2fee6c0fb5e4806da2dc92eb00970809e55" + integrity sha512-gfaKntvwqYIuC7mLLyv2wzZIJqrRhn5PZ9EfFejSx6a78sV7iDsGpG9P+3oUPtm1Rerqm6nrKS4FYuTIvWfo3g== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + es-abstract "^1.20.4" + es-array-method-boxes-properly "^1.0.0" + is-string "^1.0.7" + +array.prototype.reduce@^1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/array.prototype.reduce/-/array.prototype.reduce-1.0.6.tgz#63149931808c5fc1e1354814923d92d45f7d96d5" + integrity sha512-UW+Mz8LG/sPSU8jRDCjVr6J/ZKAGpHfwrZ6kWTG5qCxIEiXdVshqGnu5vEZA8S1y6X4aCSbQZ0/EEsfvEvBiSg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-array-method-boxes-properly "^1.0.0" + is-string "^1.0.7" + +arraybuffer.prototype.slice@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.1.tgz#9b5ea3868a6eebc30273da577eb888381c0044bb" + integrity sha512-09x0ZWFEjj4WD8PDbykUwo3t9arLn8NIzmmYEJFpYekOAQjpkGSyrQhNoRTcwwcFRu+ycWF78QZ63oWTqSjBcw== + dependencies: + array-buffer-byte-length "^1.0.0" + call-bind "^1.0.2" + define-properties "^1.2.0" + get-intrinsic "^1.2.1" + is-array-buffer "^3.0.2" + is-shared-array-buffer "^1.0.2" + +asap@~2.0.3, asap@~2.0.6: version "2.0.6" resolved "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz" integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= +asn1.js@^5.2.0: + version "5.4.1" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-5.4.1.tgz#11a980b84ebb91781ce35b0fdc2ee294e3783f07" + integrity sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA== + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + safer-buffer "^2.1.0" + asn1@~0.2.3: version "0.2.6" resolved "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz" @@ -2331,7 +3144,7 @@ assert-plus@1.0.0, assert-plus@^1.0.0: resolved "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz" integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== -async-eventemitter@0.2.4, async-eventemitter@^0.2.2: +async-eventemitter@^0.2.2: version "0.2.4" resolved "https://registry.npmjs.org/async-eventemitter/-/async-eventemitter-0.2.4.tgz" integrity sha512-pd20BwL7Yt1zwDFy+8MX8F1+WCT8aQeKj0kQnTrH9WaeRETlRamVhD0JtRPmrV4GfOJ2F9CvdQkZeZhnh2TuHw== @@ -2488,29 +3301,34 @@ babel-messages@^6.23.0: dependencies: babel-runtime "^6.22.0" -babel-plugin-polyfill-corejs2@^0.4.3: - version "0.4.3" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz#75044d90ba5043a5fb559ac98496f62f3eb668fd" - integrity sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw== +babel-plugin-polyfill-corejs2@^0.1.4: + version "0.1.10" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.1.10.tgz#a2c5c245f56c0cac3dbddbf0726a46b24f0f81d1" + integrity sha512-DO95wD4g0A8KRaHKi0D51NdGXzvpqVLnLu5BTvDlpqUEpTmeEtypgC1xqesORaWmiUOQI14UHKlzNd9iZ2G3ZA== dependencies: - "@babel/compat-data" "^7.17.7" - "@babel/helper-define-polyfill-provider" "^0.4.0" + "@babel/compat-data" "^7.13.0" + "@babel/helper-define-polyfill-provider" "^0.1.5" semver "^6.1.1" -babel-plugin-polyfill-corejs3@^0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz#39248263c38191f0d226f928d666e6db1b4b3a8a" - integrity sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q== +babel-plugin-polyfill-corejs3@^0.1.3: + version "0.1.7" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.1.7.tgz#80449d9d6f2274912e05d9e182b54816904befd0" + integrity sha512-u+gbS9bbPhZWEeyy1oR/YaaSpod/KDT07arZHb80aTpl8H5ZBq+uN1nN9/xtX7jQyfLdPfoqI4Rue/MQSWJquw== dependencies: - "@babel/helper-define-polyfill-provider" "^0.4.0" - core-js-compat "^3.30.1" + "@babel/helper-define-polyfill-provider" "^0.1.5" + core-js-compat "^3.8.1" -babel-plugin-polyfill-regenerator@^0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz#e7344d88d9ef18a3c47ded99362ae4a757609380" - integrity sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g== +babel-plugin-polyfill-regenerator@^0.1.2: + version "0.1.6" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.1.6.tgz#0fe06a026fe0faa628ccc8ba3302da0a6ce02f3f" + integrity sha512-OUrYG9iKPKz8NxswXbRAdSwF0GhRdIEMTloQATJi4bDuFqrXaXcCUT/VGNrr8pBcjMh1RxZ7Xt9cytVJTJfvMg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.4.0" + "@babel/helper-define-polyfill-provider" "^0.1.5" + +babel-plugin-syntax-trailing-function-commas@^7.0.0-beta.0: + version "7.0.0-beta.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-7.0.0-beta.0.tgz#aa213c1435e2bffeb6fca842287ef534ad05d5cf" + integrity sha512-Xj9XuRuz3nTSbaTXWv3itLOcxyF4oPD8douBBmj7U9BBC6nEBYfyOJYQMf/8PJAFotC62UY5dFfIGEPr7WswzQ== babel-polyfill@^6.26.0: version "6.26.0" @@ -2521,6 +3339,39 @@ babel-polyfill@^6.26.0: core-js "^2.5.0" regenerator-runtime "^0.10.5" +babel-preset-fbjs@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/babel-preset-fbjs/-/babel-preset-fbjs-3.4.0.tgz#38a14e5a7a3b285a3f3a86552d650dca5cf6111c" + integrity sha512-9ywCsCvo1ojrw0b+XYk7aFvTH6D9064t0RIL1rtMf3nsa02Xw41MS7sZw216Im35xj/UY0PDBQsa1brUDDF1Ow== + dependencies: + "@babel/plugin-proposal-class-properties" "^7.0.0" + "@babel/plugin-proposal-object-rest-spread" "^7.0.0" + "@babel/plugin-syntax-class-properties" "^7.0.0" + "@babel/plugin-syntax-flow" "^7.0.0" + "@babel/plugin-syntax-jsx" "^7.0.0" + "@babel/plugin-syntax-object-rest-spread" "^7.0.0" + "@babel/plugin-transform-arrow-functions" "^7.0.0" + "@babel/plugin-transform-block-scoped-functions" "^7.0.0" + "@babel/plugin-transform-block-scoping" "^7.0.0" + "@babel/plugin-transform-classes" "^7.0.0" + "@babel/plugin-transform-computed-properties" "^7.0.0" + "@babel/plugin-transform-destructuring" "^7.0.0" + "@babel/plugin-transform-flow-strip-types" "^7.0.0" + "@babel/plugin-transform-for-of" "^7.0.0" + "@babel/plugin-transform-function-name" "^7.0.0" + "@babel/plugin-transform-literals" "^7.0.0" + "@babel/plugin-transform-member-expression-literals" "^7.0.0" + "@babel/plugin-transform-modules-commonjs" "^7.0.0" + "@babel/plugin-transform-object-super" "^7.0.0" + "@babel/plugin-transform-parameters" "^7.0.0" + "@babel/plugin-transform-property-literals" "^7.0.0" + "@babel/plugin-transform-react-display-name" "^7.0.0" + "@babel/plugin-transform-react-jsx" "^7.0.0" + "@babel/plugin-transform-shorthand-properties" "^7.0.0" + "@babel/plugin-transform-spread" "^7.0.0" + "@babel/plugin-transform-template-literals" "^7.0.0" + babel-plugin-syntax-trailing-function-commas "^7.0.0-beta.0" + babel-register@^6.26.0: version "6.26.0" resolved "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz" @@ -2619,15 +3470,10 @@ bcrypt-pbkdf@^1.0.0: dependencies: tweetnacl "^0.14.3" -bech32@1.1.4, bech32@^1.1.3: - version "1.1.4" - resolved "https://registry.yarnpkg.com/bech32/-/bech32-1.1.4.tgz#e38c9f37bf179b8eb16ae3a772b40c356d4832e9" - integrity sha512-s0IrSOzLlbvX7yp4WBfPITzpAU8sqQcpsmwXDiKwrG4r491vwCO/XpejasRNl0piBMe/DvP4Tz0mIS/X1DPJBQ== - -big-integer@1.6.36: - version "1.6.36" - resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.36.tgz#78631076265d4ae3555c04f85e7d9d2f3a071a36" - integrity sha512-t70bfa7HYEA1D9idDbmuv7YbsbVkQ+Hp+8KFSul4aE5e/i1bjCNIRYJZlA8Q8p0r9T8cF/RVvwUgRA//FydEyg== +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== big.js@^6.0.3: version "6.2.1" @@ -2673,6 +3519,13 @@ binaryen@102.0.0-nightly.20211028: resolved "https://registry.npmjs.org/binaryen/-/binaryen-102.0.0-nightly.20211028.tgz" integrity sha512-GCJBVB5exbxzzvyt8MGDv/MeUjs6gkXDvf4xOIItRBptYl0Tz5sm1o/uG95YK0L0VeG5ajDu3hRtkBP2kzqC5w== +bindings@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" + integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== + dependencies: + file-uri-to-path "1.0.0" + bl@^1.0.0: version "1.2.3" resolved "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz" @@ -2693,9 +3546,9 @@ blob-to-it@^1.0.1: dependencies: browser-readablestream-to-it "^1.0.3" -bluebird@^3.5.0, bluebird@^3.5.2: +bluebird@^3.4.7, bluebird@^3.5.0: version "3.7.2" - resolved "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== bn.js@4.11.6: @@ -2703,12 +3556,17 @@ bn.js@4.11.6: resolved "https://registry.npmjs.org/bn.js/-/bn.js-4.11.6.tgz" integrity sha512-XWwnNNFCuuSQ0m3r3C4LE3EiORltHd9M05pq6FOlVeiophzRbMo50Sbz1ehl8K3Z+jw9+vmgnXefY1hz8X+2wA== -bn.js@^4.11.0, bn.js@^4.11.1, bn.js@^4.11.6, bn.js@^4.11.8, bn.js@^4.11.9: +bn.js@4.11.8: + version "4.11.8" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" + integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA== + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.11.0, bn.js@^4.11.1, bn.js@^4.11.6, bn.js@^4.11.8, bn.js@^4.11.9, bn.js@^4.4.0: version "4.12.0" resolved "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz" integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA== -bn.js@^5.1.2, bn.js@^5.2.0, bn.js@^5.2.1: +bn.js@^5.0.0, bn.js@^5.1.1, bn.js@^5.1.2, bn.js@^5.2.0, bn.js@^5.2.1: version "5.2.1" resolved "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz" integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== @@ -2736,7 +3594,7 @@ body-parser@1.20.1: type-is "~1.6.18" unpipe "1.0.0" -body-parser@^1.16.0, body-parser@^1.19.0: +body-parser@^1.16.0, body-parser@^1.18.3: version "1.20.2" resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd" integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA== @@ -2781,7 +3639,7 @@ braces@^3.0.2, braces@~3.0.2: dependencies: fill-range "^7.0.1" -brorand@^1.1.0: +brorand@^1.0.1, brorand@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz" integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= @@ -2796,7 +3654,7 @@ browser-stdout@1.3.1: resolved "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== -browserify-aes@^1.2.0: +browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz" integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== @@ -2808,17 +3666,59 @@ browserify-aes@^1.2.0: inherits "^2.0.1" safe-buffer "^5.0.1" -browserslist@^4.21.3, browserslist@^4.21.5: - version "4.21.9" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.9.tgz#e11bdd3c313d7e2a9e87e8b4b0c7872b13897635" - integrity sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg== +browserify-cipher@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" + integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w== + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c" + integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A== + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +browserify-rsa@^4.0.0, browserify-rsa@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.1.0.tgz#b2fd06b5b75ae297f7ce2dc651f918f5be158c8d" + integrity sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog== + dependencies: + bn.js "^5.0.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.2.1.tgz#eaf4add46dd54be3bb3b36c0cf15abbeba7956c3" + integrity sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg== + dependencies: + bn.js "^5.1.1" + browserify-rsa "^4.0.1" + create-hash "^1.2.0" + create-hmac "^1.1.7" + elliptic "^6.5.3" + inherits "^2.0.4" + parse-asn1 "^5.1.5" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +browserslist@^4.21.10, browserslist@^4.21.9: + version "4.21.10" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.10.tgz#dbbac576628c13d3b2231332cb2ec5a46e015bb0" + integrity sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ== dependencies: - caniuse-lite "^1.0.30001503" - electron-to-chromium "^1.4.431" - node-releases "^2.0.12" + caniuse-lite "^1.0.30001517" + electron-to-chromium "^1.4.477" + node-releases "^2.0.13" update-browserslist-db "^1.0.11" -bs58@^4.0.0, bs58@^4.0.1: +bs58@^4.0.0: version "4.0.1" resolved "https://registry.npmjs.org/bs58/-/bs58-4.0.1.tgz" integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== @@ -2834,6 +3734,13 @@ bs58check@^2.1.2: create-hash "^1.1.0" safe-buffer "^5.1.2" +bser@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" + integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ== + dependencies: + node-int64 "^0.4.0" + btoa@^1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/btoa/-/btoa-1.2.1.tgz" @@ -2857,7 +3764,17 @@ buffer-fill@^1.0.0: resolved "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz" integrity sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ== -buffer-from@1.1.2, buffer-from@^1.0.0: +buffer-from@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + integrity sha512-c5mRlguI/Pe2dSZmpER62rSCu0ryKmWddzRYsuXc50U2/g8jMOulc31VZMa4mYx31U5xsmSOpDCgH88Vl9cDGQ== + +buffer-from@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== + +buffer-from@^1.0.0: version "1.1.2" resolved "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== @@ -2872,15 +3789,7 @@ buffer-xor@^1.0.3: resolved "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz" integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== -buffer@6.0.3, buffer@^6.0.1, buffer@^6.0.3: - version "6.0.3" - resolved "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz" - integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== - dependencies: - base64-js "^1.3.1" - ieee754 "^1.2.1" - -buffer@^5.0.5, buffer@^5.5.0, buffer@^5.6.0: +buffer@^5.0.5, buffer@^5.2.1, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: version "5.7.1" resolved "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz" integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== @@ -2888,12 +3797,13 @@ buffer@^5.0.5, buffer@^5.5.0, buffer@^5.6.0: base64-js "^1.3.1" ieee754 "^1.1.13" -bufferutil@4.0.5: - version "4.0.5" - resolved "https://registry.yarnpkg.com/bufferutil/-/bufferutil-4.0.5.tgz#da9ea8166911cc276bf677b8aed2d02d31f59028" - integrity sha512-HTm14iMQKK2FjFLRTM5lAVcyaUzOnqbPtesFIvREgXpJHdQm8bWS+GkQgIkfaBYRHuCnea7w8UVNfwiAQhlr9A== +buffer@^6.0.1, buffer@^6.0.3: + version "6.0.3" + resolved "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz" + integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== dependencies: - node-gyp-build "^4.3.0" + base64-js "^1.3.1" + ieee754 "^1.2.1" bufferutil@^4.0.1: version "4.0.7" @@ -2902,6 +3812,13 @@ bufferutil@^4.0.1: dependencies: node-gyp-build "^4.3.0" +busboy@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.3.1.tgz#170899274c5bf38aae27d5c62b71268cd585fd1b" + integrity sha512-y7tTxhGKXcyBxRKAni+awqx8uqaJKrSFSNFSeRG5CsWNdmy2BIK+6VGWEW7TZnIO/533mtMEA4rOevQV815YJw== + dependencies: + dicer "0.3.0" + busboy@^1.6.0: version "1.6.0" resolved "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz" @@ -2914,11 +3831,6 @@ bytes@3.1.2: resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== -cacheable-lookup@^5.0.3: - version "5.0.4" - resolved "https://registry.yarnpkg.com/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz#5a6b865b2c44357be3d5ebc2a467b032719a7005" - integrity sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA== - cacheable-lookup@^6.0.4: version "6.1.0" resolved "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz" @@ -2932,6 +3844,19 @@ cacheable-lookup@^6.0.4: normalize-url "^4.1.0" responselike "^1.0.2" +cacheable-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" + integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== + dependencies: + clone-response "^1.0.2" + get-stream "^5.1.0" + http-cache-semantics "^4.0.0" + keyv "^3.0.0" + lowercase-keys "^2.0.0" + normalize-url "^4.1.0" + responselike "^1.0.2" + cacheable-request@^7.0.2: version "7.0.4" resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-7.0.4.tgz#7a33ebf08613178b403635be7b899d3e69bbe817" @@ -2958,6 +3883,22 @@ callsites@^3.0.0: resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== +camel-case@4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.1.tgz#1fc41c854f00e2f7d0139dfeba1542d6896fe547" + integrity sha512-7fa2WcG4fYFkclIvEmxBbTvmibwF2/agfEBc6q3lOpVu0A13ltLsA+Hr/8Hp6kp5f+G7hKi6t8lys6XxP+1K6Q== + dependencies: + pascal-case "^3.1.1" + tslib "^1.10.0" + +camel-case@4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.2.tgz#9728072a954f805228225a6deea6b38461e1bd5a" + integrity sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw== + dependencies: + pascal-case "^3.1.2" + tslib "^2.0.3" + camel-case@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz" @@ -2966,30 +3907,20 @@ camel-case@^3.0.0: no-case "^2.2.0" upper-case "^1.1.1" -camelcase@^3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/camelcase/-/camelcase-3.0.0.tgz" - integrity sha1-MvxLn82vhF/N9+c7uXysImHwqwo= - camelcase@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz" integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0= -camelcase@^5.0.0: +camelcase@^5.0.0, camelcase@^5.3.1: version "5.3.1" resolved "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz" integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== -camelcase@^6.0.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" - integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== - -caniuse-lite@^1.0.30001503: - version "1.0.30001509" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001509.tgz#2b7ad5265392d6d2de25cd8776d1ab3899570d14" - integrity sha512-2uDDk+TRiTX5hMcUYT/7CSyzMZxjfGu0vAUjS2g0LSD8UoXOv0LtpH4LxGMemsiPq6LCVIUjNwVM0erkOkGCDA== +caniuse-lite@^1.0.30001517: + version "1.0.30001525" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001525.tgz#d2e8fdec6116ffa36284ca2c33ef6d53612fe1c8" + integrity sha512-/3z+wB4icFt3r0USMwxujAqRvaD/B7rvGTsKhbhSQErVrJvkZCLhgNLJxU8MevahQVH6hCU9FsHdNUFbiwmE7Q== cardinal@^2.1.1: version "2.1.1" @@ -3004,14 +3935,9 @@ caseless@^0.12.0, caseless@~0.12.0: resolved "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz" integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== -catering@^2.0.0, catering@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/catering/-/catering-2.1.1.tgz#66acba06ed5ee28d5286133982a927de9a04b510" - integrity sha512-K7Qy8O9p76sL3/3m7/zLKbRkyOlSZAgzEaLhyj2mXS8PsCud2Eo4hAb8aLtZqHh0QGqLcb9dlJSu6lHRVENm1w== - -cbor@^5.2.0: +cbor@^5.1.0, cbor@^5.2.0: version "5.2.0" - resolved "https://registry.npmjs.org/cbor/-/cbor-5.2.0.tgz" + resolved "https://registry.yarnpkg.com/cbor/-/cbor-5.2.0.tgz#4cca67783ccd6de7b50ab4ed62636712f287a67c" integrity sha512-5IMhi9e1QU76ppa5/ajP1BmMWZ2FHkhAhjeVKQ/EFCgYSEaeVaoGtL7cxJskf9oCCk+XjzaIdc3IuU/dbA/o2A== dependencies: bignumber.js "^9.0.1" @@ -3041,7 +3967,7 @@ chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^2.0.0, chalk@^2.3.2, chalk@^2.4.2: +chalk@^2.3.2, chalk@^2.4.2: version "2.4.2" resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -3050,7 +3976,7 @@ chalk@^2.0.0, chalk@^2.3.2, chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" -chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.2: +chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.2: version "4.1.2" resolved "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -3113,6 +4039,21 @@ cheerio@^1.0.0-rc.2: parse5 "^6.0.0" parse5-htmlparser2-tree-adapter "^6.0.0" +chokidar@3.4.2: + version "3.4.2" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.4.2.tgz#38dc8e658dec3809741eb3ef7bb0a47fe424232d" + integrity sha512-IZHaDeBeI+sZJRX7lGcXsdzgvZqKv6sECqsbErJA4mHWfpRrD8B97kSFN4cQz6nGBGiuFia1MKR4d6c1o8Cv7A== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.4.0" + optionalDependencies: + fsevents "~2.1.2" + chokidar@3.5.3: version "3.5.3" resolved "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz" @@ -3208,23 +4149,23 @@ cli-table3@~0.5.0: optionalDependencies: colors "^1.1.2" -cliui@^3.2.0: - version "3.2.0" - resolved "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz" - integrity sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0= +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" - wrap-ansi "^2.0.0" + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" -cliui@^7.0.2: - version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" - integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== dependencies: string-width "^4.2.0" strip-ansi "^6.0.0" - wrap-ansi "^7.0.0" + wrap-ansi "^6.2.0" clone-buffer@1.0.0: version "1.0.0" @@ -3238,6 +4179,11 @@ clone-response@^1.0.2: dependencies: mimic-response "^1.0.0" +clone@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb" + integrity sha512-h5FLmEMFHeuzqmpVRcDayNlVZ+k4uK1niyKQN6oUMe7ieJihv44Vc3dY/kDnnWX4PDQSwes48s965PG/D4GntQ== + clone@^1.0.2: version "1.0.4" resolved "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz" @@ -3294,15 +4240,20 @@ command-exists@^1.2.8: resolved "https://registry.npmjs.org/command-exists/-/command-exists-1.2.9.tgz" integrity sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w== +commander@3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" + integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== + commander@^2.20.3: version "2.20.3" resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== -commander@^8.1.0: - version "8.3.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" - integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== +component-emitter@1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" + integrity sha512-jPatnhd33viNplKjqXKRkGU345p263OIWzDL2wH3LGIGp5Kojo+uXizHmOADRvhGFFTnJqX3jBAKP6vvmSDKcA== concat-map@0.0.1: version "0.0.1" @@ -3335,6 +4286,11 @@ conf@^10.1.2: pkg-up "^3.1.0" semver "^7.3.5" +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + integrity sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ== + constant-case@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/constant-case/-/constant-case-2.0.0.tgz" @@ -3376,6 +4332,11 @@ convert-source-map@^1.5.1: dependencies: safe-buffer "~5.1.1" +convert-source-map@^1.7.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" + integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A== + cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz" @@ -3386,12 +4347,22 @@ cookie@0.5.0: resolved "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz" integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== -core-js-compat@^3.30.1: - version "3.31.0" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.31.0.tgz#4030847c0766cc0e803dcdfb30055d7ef2064bf1" - integrity sha512-hM7YCu1cU6Opx7MXNu0NuumM0ezNeAeRKadixyiQELWY3vT3De9S4J5ZBMraWV2vZnrE1Cirl0GtFtDtMUXzPw== +cookiejar@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cookiejar/-/cookiejar-2.1.4.tgz#ee669c1fea2cf42dc31585469d193fef0d65771b" + integrity sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw== + +core-js-compat@^3.8.1: + version "3.32.1" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.32.1.tgz#55f9a7d297c0761a8eb1d31b593e0f5b6ffae964" + integrity sha512-GSvKDv4wE0bPnQtjklV101juQ85g6H3rm5PDP20mqlS5j0kXF3pP97YvAu5hl+uFHqMictp3b2VxOHljWMAtuA== dependencies: - browserslist "^4.21.5" + browserslist "^4.21.10" + +core-js-pure@^3.10.2: + version "3.32.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.32.1.tgz#5775b88f9062885f67b6d7edce59984e89d276f3" + integrity sha512-f52QZwkFVDPf7UEQZGHKx6NYxsxmVGJe5DIvbzOdRMJlmT6yv0KDjR8rmy3ngr/t5wU54c7Sp/qIJH0ppbhVpQ== core-js@^2.4.0, core-js@^2.5.0: version "2.6.12" @@ -3443,6 +4414,14 @@ crc-32@^1.2.0: resolved "https://registry.yarnpkg.com/crc-32/-/crc-32-1.2.2.tgz#3cad35a934b8bf71f25ca524b6da51fb7eace2ff" integrity sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ== +create-ecdh@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.4.tgz#d6e7f4bffa66736085a0762fd3a632684dabcc4e" + integrity sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A== + dependencies: + bn.js "^4.1.0" + elliptic "^6.5.3" + create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz" @@ -3454,7 +4433,7 @@ create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: ripemd160 "^2.0.1" sha.js "^2.4.0" -create-hmac@^1.1.4, create-hmac@^1.1.7: +create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: version "1.1.7" resolved "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz" integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== @@ -3471,6 +4450,20 @@ create-require@^1.1.0: resolved "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz" integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== +cross-fetch@3.0.6: + version "3.0.6" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.0.6.tgz#3a4040bc8941e653e0e9cf17f29ebcd177d3365c" + integrity sha512-KBPUbqgFjzWlVcURG+Svp9TlhA5uliYtiNx/0r8nv0pdypeQCRJ9IaSIc3q/x3q8t3F75cHuwxVql1HFGHCNJQ== + dependencies: + node-fetch "2.6.1" + +cross-fetch@3.1.4: + version "3.1.4" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.4.tgz#9723f3a3a247bf8b89039f3a380a9244e8fa2f39" + integrity sha512-1eAtFWdIubi6T4XPy6ei9iUFoKpUkIF971QLN8lIvvvwueI65+Nw5haMNKUwfJxabqlIIDODJKGrQ66gxC0PbQ== + dependencies: + node-fetch "2.6.1" + cross-fetch@^1.0.0: version "1.1.1" resolved "https://registry.npmjs.org/cross-fetch/-/cross-fetch-1.1.1.tgz" @@ -3494,6 +4487,20 @@ cross-fetch@^3.1.4: dependencies: node-fetch "^2.6.11" +cross-fetch@^3.1.5: + version "3.1.8" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.8.tgz#0327eba65fd68a7d119f8fb2bf9334a1a7956f82" + integrity sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg== + dependencies: + node-fetch "^2.6.12" + +cross-fetch@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-4.0.0.tgz#f037aef1580bb3a1a35164ea2a848ba81b445983" + integrity sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g== + dependencies: + node-fetch "^2.6.12" + cross-spawn@7.0.3, cross-spawn@^7.0.0, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz" @@ -3514,18 +4521,22 @@ cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -crypto-addr-codec@^0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/crypto-addr-codec/-/crypto-addr-codec-0.1.7.tgz#e16cea892730178fe25a38f6d15b680cab3124ae" - integrity sha512-X4hzfBzNhy4mAc3UpiXEC/L0jo5E8wAa9unsnA8nNXYzXjCcGk83hfC5avJWCSGT8V91xMnAS9AKMHmjw5+XCg== +crypto-browserify@3.12.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg== dependencies: - base-x "^3.0.8" - big-integer "1.6.36" - blakejs "^1.1.0" - bs58 "^4.0.1" - ripemd160-min "0.0.6" - safe-buffer "^5.2.0" - sha3 "^2.1.1" + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" css-select@^3.1.2: version "3.1.2" @@ -3543,11 +4554,6 @@ css-what@^4.0.0: resolved "https://registry.npmjs.org/css-what/-/css-what-4.0.0.tgz" integrity sha512-teijzG7kwYfNVsUh2H/YN62xW3KK9YhXEgSlbxMlcyjPNvdKJqFx5lrwlJgoFP1ZHlB89iGDlo/JyshKeRhv5A== -css-what@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/css-what/-/css-what-6.1.0.tgz#fb5effcf76f1ddea2c81bdfaa4de44e79bac70f4" - integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw== - cssfilter@0.0.10: version "0.0.10" resolved "https://registry.npmjs.org/cssfilter/-/cssfilter-0.0.10.tgz" @@ -3568,10 +4574,10 @@ dashdash@^1.12.0: dependencies: assert-plus "^1.0.0" -dataloader@2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/dataloader/-/dataloader-2.1.0.tgz#c69c538235e85e7ac6c6c444bae8ecabf5de9df7" - integrity sha512-qTcEYLen3r7ojZNgVUaRggOI+KM7jrKxXeSHhogh/TWxYMeONEMqY+hmkobiYQozsGIyg9OYVzO4ZIfoB4I0pQ== +dataloader@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dataloader/-/dataloader-2.0.0.tgz#41eaf123db115987e21ca93c005cd7753c55fe6f" + integrity sha512-YzhyDAwA4TaQIhM5go+vCLmU0UikghC/t9DTQYZR2M/UvZ1MdOhPezSDZcjj9uqQJOMqjLcpWtyW2iNINdlatQ== debounce-fn@^4.0.0: version "4.0.0" @@ -3594,7 +4600,14 @@ debug@3.1.0: dependencies: ms "2.0.0" -debug@4.3.4, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4: +debug@4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== + dependencies: + ms "^2.1.1" + +debug@4.3.4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4: version "4.3.4" resolved "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== @@ -3608,22 +4621,17 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" -decamelize@^1.1.1, decamelize@^1.2.0: +decamelize@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz" integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= -decamelize@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" - integrity sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ== - decode-uri-component@^0.2.0: version "0.2.0" resolved "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz" integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= -decompress-response@^3.3.0: +decompress-response@^3.2.0, decompress-response@^3.3.0: version "3.3.0" resolved "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz" integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= @@ -3649,7 +4657,12 @@ defaults@^1.0.3: dependencies: clone "^1.0.2" -defer-to-connect@^2.0.0, defer-to-connect@^2.0.1: +defer-to-connect@^1.0.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" + integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== + +defer-to-connect@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-2.0.1.tgz#8016bdb4143e4632b77a3449c6236277de520587" integrity sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg== @@ -3661,6 +4674,14 @@ deferred-leveldown@~1.2.1: dependencies: abstract-leveldown "~2.6.0" +deferred-leveldown@~5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/deferred-leveldown/-/deferred-leveldown-5.0.1.tgz#1642eb18b535dfb2b6ac4d39fb10a9cbcfd13b09" + integrity sha512-BXohsvTedWOLkj2n/TY+yqVlrCWa2Zs8LSxh3uCAgFOru7/pjxKyZAexGa1j83BaKloER4PqUyQ9rGPJLt9bqA== + dependencies: + abstract-leveldown "~6.0.0" + inherits "^2.0.3" + deferred-leveldown@~5.3.0: version "5.3.0" resolved "https://registry.npmjs.org/deferred-leveldown/-/deferred-leveldown-5.3.0.tgz" @@ -3669,6 +4690,14 @@ deferred-leveldown@~5.3.0: abstract-leveldown "~6.2.1" inherits "^2.0.3" +define-properties@^1.1.2, define-properties@^1.1.3, define-properties@^1.1.4, define-properties@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.0.tgz#52988570670c9eacedd8064f4a990f2405849bd5" + integrity sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA== + dependencies: + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + delay@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz" @@ -3679,16 +4708,34 @@ delayed-stream@~1.0.0: resolved "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz" integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + integrity sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ== + depd@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz" integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ== + deprecated-decorator@^0.1.6: version "0.1.6" resolved "https://registry.npmjs.org/deprecated-decorator/-/deprecated-decorator-0.1.6.tgz" integrity sha1-AJZjF7ehL+kvPMgx91g68ym4bDc= +des.js@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.1.0.tgz#1d37f5766f3bbff4ee9638e871a8768c173b81da" + integrity sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg== + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + destroy@1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz" @@ -3706,16 +4753,32 @@ detect-indent@^5.0.0: resolved "https://registry.npmjs.org/detect-indent/-/detect-indent-5.0.0.tgz" integrity sha1-OHHMCmoALow+Wzz38zYmRnXwa50= -diff@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" - integrity sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w== +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + integrity sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg== -diff@^4.0.1: +dicer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/dicer/-/dicer-0.3.0.tgz#eacd98b3bfbf92e8ab5c2fdb71aaac44bb06b872" + integrity sha512-MdceRRWqltEG2dZqO769g27N/3PXfcKl04VhYnBlo2YhH7zPi88VebsjTKclaOyiuMaGU72hTfw3VkUitGcVCA== + dependencies: + streamsearch "0.1.2" + +diff@4.0.2, diff@^4.0.1: version "4.0.2" resolved "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== +diffie-hellman@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" + integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg== + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + dir-glob@^3.0.1: version "3.0.1" resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz" @@ -3767,15 +4830,6 @@ dom-serializer@^1.0.1, dom-serializer@~1.2.0: domhandler "^4.0.0" entities "^2.0.0" -dom-serializer@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" - integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg== - dependencies: - domelementtype "^2.3.0" - domhandler "^5.0.2" - entities "^4.2.0" - dom-walk@^0.1.0: version "0.1.2" resolved "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz" @@ -3786,7 +4840,7 @@ domelementtype@^2.0.1, domelementtype@^2.1.0: resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-2.1.0.tgz" integrity sha512-LsTgx/L5VpD+Q8lmsXSHW2WpA+eBlZ9HPf3erD1IoPF00/3JKHZ3BknUVA2QGDNu69ZNmyFmCWBSO45XjYKC5w== -domelementtype@^2.3.0: +domelementtype@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== @@ -3798,12 +4852,21 @@ domhandler@^4.0.0: dependencies: domelementtype "^2.1.0" -domhandler@^5.0.2, domhandler@^5.0.3: - version "5.0.3" - resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" - integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== +domhandler@^4.2.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.3.1.tgz#8d792033416f59d68bc03a5aa7b018c1ca89279c" + integrity sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ== + dependencies: + domelementtype "^2.2.0" + +domutils@^2.4.3: + version "2.8.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.8.0.tgz#4437def5db6e2d1f5d6ee859bd95ca7d02048135" + integrity sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A== dependencies: - domelementtype "^2.3.0" + dom-serializer "^1.0.1" + domelementtype "^2.2.0" + domhandler "^4.2.0" domutils@^2.4.4: version "2.4.4" @@ -3814,15 +4877,6 @@ domutils@^2.4.4: domelementtype "^2.0.1" domhandler "^4.0.0" -domutils@^3.0.1: - version "3.1.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.1.0.tgz#c47f551278d3dc4b0b1ab8cbb42d751a6f0d824e" - integrity sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA== - dependencies: - dom-serializer "^2.0.0" - domelementtype "^2.3.0" - domhandler "^5.0.3" - dot-case@^2.1.0: version "2.1.1" resolved "https://registry.npmjs.org/dot-case/-/dot-case-2.1.1.tgz" @@ -3842,6 +4896,11 @@ double-ended-queue@2.1.0-0: resolved "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz" integrity sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw= +duplexer3@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.5.tgz#0b5e4d7bad5de8901ea4440624c8e1d20099217e" + integrity sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA== + ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz" @@ -3881,12 +4940,25 @@ electron-fetch@^1.7.2: dependencies: encoding "^0.1.13" -electron-to-chromium@^1.4.431: - version "1.4.442" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.442.tgz#455f4c5bd6ae73afb634dcffee6f356c26c8e294" - integrity sha512-RkrZF//Ya+0aJq2NM3OdisNh5ZodZq1rdXOS96G8DdDgpDKqKE81yTbbQ3F/4CKm1JBPsGu1Lp/akkna2xO06Q== +electron-to-chromium@^1.4.477: + version "1.4.506" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.506.tgz#59f64a211102db4c3ebae2f39cc0e8e1b12b3a07" + integrity sha512-xxGct4GPAKSRlrLBtJxJFYy74W11zX6PO9GyHgl/U+2s3Dp0ZEwAklDfNHXOWcvH7zWMpsmgbR0ggEuaYAVvHA== + +elliptic@6.5.3: + version "6.5.3" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.3.tgz#cb59eb2efdaf73a0bd78ccd7015a62ad6e0f93d6" + integrity sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw== + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" -elliptic@6.5.4, elliptic@^6.4.0, elliptic@^6.5.2, elliptic@^6.5.4: +elliptic@6.5.4, elliptic@^6.4.0, elliptic@^6.5.2, elliptic@^6.5.3, elliptic@^6.5.4: version "6.5.4" resolved "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz" integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ== @@ -3899,16 +4971,16 @@ elliptic@6.5.4, elliptic@^6.4.0, elliptic@^6.5.2, elliptic@^6.5.4: minimalistic-assert "^1.0.1" minimalistic-crypto-utils "^1.0.1" -emittery@0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.10.0.tgz#bb373c660a9d421bb44706ec4967ed50c02a8026" - integrity sha512-AGvFfs+d0JKCJQ4o01ASQLGPmSCxgfU9RFXvzPvZdjKK8oscynksuJhWrSTSw7j7Ep/sZct5b5ZhYCi8S/t0HQ== - emittery@^0.4.1: version "0.4.1" resolved "https://registry.npmjs.org/emittery/-/emittery-0.4.1.tgz" integrity sha512-r4eRSeStEGf6M5SKdrQhhLK5bOwOBxQhIE3YSTnZE3GpKiLfnnhE+tPtrJE79+eDJgm39BM6LSoI8SCx4HbwlQ== +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + emoji-regex@^8.0.0: version "8.0.0" resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz" @@ -3969,11 +5041,6 @@ entities@^2.0.0: resolved "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@^4.2.0, entities@^4.4.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" - integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== - entities@~2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz" @@ -3996,13 +5063,96 @@ errno@~0.1.1: dependencies: prr "~1.0.1" -error-ex@^1.2.0, error-ex@^1.3.1: +error-ex@^1.3.1: version "1.3.2" resolved "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz" integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== dependencies: is-arrayish "^0.2.1" +es-abstract@^1.17.0-next.1, es-abstract@^1.18.0-next.1, es-abstract@^1.18.0-next.2, es-abstract@^1.20.4, es-abstract@^1.21.2, es-abstract@^1.22.1: + version "1.22.1" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.1.tgz#8b4e5fc5cefd7f1660f0f8e1a52900dfbc9d9ccc" + integrity sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw== + dependencies: + array-buffer-byte-length "^1.0.0" + arraybuffer.prototype.slice "^1.0.1" + available-typed-arrays "^1.0.5" + call-bind "^1.0.2" + es-set-tostringtag "^2.0.1" + es-to-primitive "^1.2.1" + function.prototype.name "^1.1.5" + get-intrinsic "^1.2.1" + get-symbol-description "^1.0.0" + globalthis "^1.0.3" + gopd "^1.0.1" + has "^1.0.3" + has-property-descriptors "^1.0.0" + has-proto "^1.0.1" + has-symbols "^1.0.3" + internal-slot "^1.0.5" + is-array-buffer "^3.0.2" + is-callable "^1.2.7" + is-negative-zero "^2.0.2" + is-regex "^1.1.4" + is-shared-array-buffer "^1.0.2" + is-string "^1.0.7" + is-typed-array "^1.1.10" + is-weakref "^1.0.2" + object-inspect "^1.12.3" + object-keys "^1.1.1" + object.assign "^4.1.4" + regexp.prototype.flags "^1.5.0" + safe-array-concat "^1.0.0" + safe-regex-test "^1.0.0" + string.prototype.trim "^1.2.7" + string.prototype.trimend "^1.0.6" + string.prototype.trimstart "^1.0.6" + typed-array-buffer "^1.0.0" + typed-array-byte-length "^1.0.0" + typed-array-byte-offset "^1.0.0" + typed-array-length "^1.0.4" + unbox-primitive "^1.0.2" + which-typed-array "^1.1.10" + +es-array-method-boxes-properly@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz#873f3e84418de4ee19c5be752990b2e44718d09e" + integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA== + +es-get-iterator@^1.0.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/es-get-iterator/-/es-get-iterator-1.1.3.tgz#3ef87523c5d464d41084b2c3c9c214f1199763d6" + integrity sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.3" + has-symbols "^1.0.3" + is-arguments "^1.1.1" + is-map "^2.0.2" + is-set "^2.0.2" + is-string "^1.0.7" + isarray "^2.0.5" + stop-iteration-iterator "^1.0.0" + +es-set-tostringtag@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz#338d502f6f674301d710b80c8592de8a15f09cd8" + integrity sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg== + dependencies: + get-intrinsic "^1.1.3" + has "^1.0.3" + has-tostringtag "^1.0.0" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + es5-ext@^0.10.35, es5-ext@^0.10.50: version "0.10.53" resolved "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz" @@ -4012,7 +5162,12 @@ es5-ext@^0.10.35, es5-ext@^0.10.50: es6-symbol "~3.1.3" next-tick "~1.0.0" -es6-iterator@^2.0.3: +es6-denodeify@^0.1.1: + version "0.1.5" + resolved "https://registry.yarnpkg.com/es6-denodeify/-/es6-denodeify-0.1.5.tgz#31d4d5fe9c5503e125460439310e16a2a3f39c1f" + integrity sha512-731Rf4NqlPvhkT1pIF7r8vZxESJlWocNpXLuyPlVnfEGXlwuJaMvU5WpyyDjpudDC2cgXVX849xljzvQqBg1QQ== + +es6-iterator@~2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.3.tgz#a7de889141a05a94b0854403b2d0a0fbfa98f3b7" integrity sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g== @@ -4033,7 +5188,7 @@ es6-promisify@^5.0.0: dependencies: es6-promise "^4.0.3" -es6-symbol@^3.1.1, es6-symbol@^3.1.3: +es6-symbol@^3.1.1, es6-symbol@~3.1.3: version "3.1.3" resolved "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz" integrity sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA== @@ -4088,10 +5243,10 @@ eth-block-tracker@^4.4.2: pify "^3.0.0" safe-event-emitter "^1.0.1" -eth-ens-namehash@2.0.8, eth-ens-namehash@^2.0.8: +eth-ens-namehash@2.0.8, eth-ens-namehash@^2.0.0: version "2.0.8" - resolved "https://registry.npmjs.org/eth-ens-namehash/-/eth-ens-namehash-2.0.8.tgz" - integrity sha1-IprEbsqG1S4MmR58sq74P/D2i88= + resolved "https://registry.yarnpkg.com/eth-ens-namehash/-/eth-ens-namehash-2.0.8.tgz#229ac46eca86d52e0c991e7cb2aef83ff0f68bcf" + integrity sha512-VWEI1+KJfz4Km//dadyvBBoBeSQ0MHTXPvr8UIXiLW6IanxvAV+DmlZAijZwAyggqGUfwQBeHf7tc9wzc1piSw== dependencies: idna-uts46-hx "^2.3.1" js-sha3 "^0.5.7" @@ -4110,7 +5265,16 @@ eth-json-rpc-errors@^2.0.2: dependencies: fast-safe-stringify "^2.0.6" -eth-lib@0.2.8: +eth-lib@0.2.7: + version "0.2.7" + resolved "https://registry.yarnpkg.com/eth-lib/-/eth-lib-0.2.7.tgz#2f93f17b1e23aec3759cd4a3fe20c1286a3fc1ca" + integrity sha512-VqEBQKH92jNsaE8lG9CTq8M/bc12gdAfb5MY8Ro1hVyXkh7rOtY3m5tRHK3Hus5HqIAAwU2ivcUjTLVwsvf/kw== + dependencies: + bn.js "^4.11.6" + elliptic "^6.4.0" + xhr-request-promise "^0.1.2" + +eth-lib@0.2.8, eth-lib@^0.2.8: version "0.2.8" resolved "https://registry.npmjs.org/eth-lib/-/eth-lib-0.2.8.tgz" integrity sha512-ArJ7x1WcWOlSpzdoTBX8vkwlkSQ85CjjifSZtV4co64vWxSV8geWfPI9x4SVYu3DSxnX4yWFVTtGL+j9DUFLNw== @@ -4144,17 +5308,7 @@ eth-rpc-errors@^3.0.0: resolved "https://registry.npmjs.org/eth-rpc-errors/-/eth-rpc-errors-3.0.0.tgz" integrity sha512-iPPNHPrLwUlR9xCSYm7HHQjWBasor3+KZfRvwEWxMz3ca0yqnlBeJrnyphkGIXZ4J7AMAaOLmwy4AWhnxOiLxg== dependencies: - fast-safe-stringify "^2.0.6" - -eth-sig-util@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/eth-sig-util/-/eth-sig-util-3.0.1.tgz#8753297c83a3f58346bd13547b59c4b2cd110c96" - integrity sha512-0Us50HiGGvZgjtWTyAI/+qTzYPMLy5Q451D0Xy68bxq1QMWdoOddDwGvsqcFT27uohKgalM9z/yxplyt+mY2iQ== - dependencies: - ethereumjs-abi "^0.6.8" - ethereumjs-util "^5.1.1" - tweetnacl "^1.0.3" - tweetnacl-util "^0.15.0" + fast-safe-stringify "^2.0.6" ethereum-bloom-filters@^1.0.6: version "1.0.10" @@ -4194,6 +5348,28 @@ ethereum-cryptography@^0.1.3: secp256k1 "^4.0.1" setimmediate "^1.0.5" +ethereum-cryptography@^2.0.0, ethereum-cryptography@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-2.1.2.tgz#18fa7108622e56481157a5cb7c01c0c6a672eb67" + integrity sha512-Z5Ba0T0ImZ8fqXrJbpHcbpAvIswRte2wGNR/KePnu8GbbvgJ47lMxT/ZZPG6i9Jaht4azPDop4HaM00J0J59ug== + dependencies: + "@noble/curves" "1.1.0" + "@noble/hashes" "1.3.1" + "@scure/bip32" "1.3.1" + "@scure/bip39" "1.2.1" + +ethereum-ens@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/ethereum-ens/-/ethereum-ens-0.8.0.tgz#6d0f79acaa61fdbc87d2821779c4e550243d4c57" + integrity sha512-a8cBTF4AWw1Q1Y37V1LSCS9pRY4Mh3f8vCg5cbXCCEJ3eno1hbI/+Ccv9SZLISYpqQhaglP3Bxb/34lS4Qf7Bg== + dependencies: + bluebird "^3.4.7" + eth-ens-namehash "^2.0.0" + js-sha3 "^0.5.7" + pako "^1.0.4" + underscore "^1.8.3" + web3 "^1.0.0-beta.34" + ethereum-protocol@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/ethereum-protocol/-/ethereum-protocol-1.0.1.tgz" @@ -4238,14 +5414,14 @@ ethereumjs-block@~2.2.0: ethereumjs-util "^5.0.0" merkle-patricia-tree "^2.1.2" -ethereumjs-common@^1.1.0, ethereumjs-common@^1.5.0: +ethereumjs-common@^1.1.0, ethereumjs-common@^1.3.2, ethereumjs-common@^1.5.0: version "1.5.2" resolved "https://registry.npmjs.org/ethereumjs-common/-/ethereumjs-common-1.5.2.tgz" integrity sha512-hTfZjwGX52GS2jcVO6E2sx4YuFnf0Fhp5ylo4pEPhEffNln7vS59Hr5sLnp3/QCazFLluuBZ+FZ6J5HTp0EqCA== -ethereumjs-tx@^1.2.2, ethereumjs-tx@^1.3.7: +ethereumjs-tx@^1.0.0, ethereumjs-tx@^1.2.0, ethereumjs-tx@^1.2.2, ethereumjs-tx@^1.3.7: version "1.3.7" - resolved "https://registry.npmjs.org/ethereumjs-tx/-/ethereumjs-tx-1.3.7.tgz" + resolved "https://registry.yarnpkg.com/ethereumjs-tx/-/ethereumjs-tx-1.3.7.tgz#88323a2d875b10549b8347e09f4862b546f3d89a" integrity sha512-wvLMxzt1RPhAQ9Yi3/HKZTn0FZYpnsmQdbKYfUUpi4j1SEIcbkd9tndVjcPrufY3V7j2IebOpC00Zp2P/Ay2kA== dependencies: ethereum-common "^0.0.18" @@ -4285,7 +5461,7 @@ ethereumjs-util@^6.0.0, ethereumjs-util@^6.1.0: ethjs-util "0.1.6" rlp "^2.2.3" -ethereumjs-util@^7.1.0, ethereumjs-util@^7.1.1, ethereumjs-util@^7.1.2, ethereumjs-util@^7.1.5: +ethereumjs-util@^7.0.2, ethereumjs-util@^7.1.0, ethereumjs-util@^7.1.1, ethereumjs-util@^7.1.2, ethereumjs-util@^7.1.5: version "7.1.5" resolved "https://registry.npmjs.org/ethereumjs-util/-/ethereumjs-util-7.1.5.tgz" integrity sha512-SDl5kKrQAudFBUe5OJM9Ac6WmMyYmXX/6sTmLZ3ffG2eY6ZIGBes3pEDxNN6V72WyOw4CPD5RomKdsa8DAAwLg== @@ -4342,42 +5518,6 @@ ethers@^4.0.32: uuid "2.0.1" xmlhttprequest "1.8.0" -ethers@^5.0.13: - version "5.7.2" - resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.2.tgz#3a7deeabbb8c030d4126b24f84e525466145872e" - integrity sha512-wswUsmWo1aOK8rR7DIKiWSw9DbLWe6x98Jrn8wcTflTVvaXhAMaB5zGAXy0GYQEQp9iO1iSHWVyARQm11zUtyg== - dependencies: - "@ethersproject/abi" "5.7.0" - "@ethersproject/abstract-provider" "5.7.0" - "@ethersproject/abstract-signer" "5.7.0" - "@ethersproject/address" "5.7.0" - "@ethersproject/base64" "5.7.0" - "@ethersproject/basex" "5.7.0" - "@ethersproject/bignumber" "5.7.0" - "@ethersproject/bytes" "5.7.0" - "@ethersproject/constants" "5.7.0" - "@ethersproject/contracts" "5.7.0" - "@ethersproject/hash" "5.7.0" - "@ethersproject/hdnode" "5.7.0" - "@ethersproject/json-wallets" "5.7.0" - "@ethersproject/keccak256" "5.7.0" - "@ethersproject/logger" "5.7.0" - "@ethersproject/networks" "5.7.1" - "@ethersproject/pbkdf2" "5.7.0" - "@ethersproject/properties" "5.7.0" - "@ethersproject/providers" "5.7.2" - "@ethersproject/random" "5.7.0" - "@ethersproject/rlp" "5.7.0" - "@ethersproject/sha2" "5.7.0" - "@ethersproject/signing-key" "5.7.0" - "@ethersproject/solidity" "5.7.0" - "@ethersproject/strings" "5.7.0" - "@ethersproject/transactions" "5.7.0" - "@ethersproject/units" "5.7.0" - "@ethersproject/wallet" "5.7.0" - "@ethersproject/web" "5.7.1" - "@ethersproject/wordlists" "5.7.0" - ethjs-unit@0.1.6: version "0.1.6" resolved "https://registry.npmjs.org/ethjs-unit/-/ethjs-unit-0.1.6.tgz" @@ -4399,22 +5539,27 @@ event-target-shim@^5.0.0: resolved "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz" integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== +eventemitter3@3.1.2, eventemitter3@^3.1.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7" + integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q== + eventemitter3@4.0.4: version "4.0.4" resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.4.tgz" integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== -eventemitter3@^3.1.0: - version "3.1.2" - resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-3.1.2.tgz" - integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q== +eventemitter3@^4.0.0: + version "4.0.7" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" + integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== events@^3.0.0: version "3.3.0" resolved "https://registry.npmjs.org/events/-/events-3.3.0.tgz" integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== -evp_bytestokey@^1.0.3: +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: version "1.0.3" resolved "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz" integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== @@ -4453,7 +5598,7 @@ execa@^3.0.0: signal-exit "^3.0.2" strip-final-newline "^2.0.0" -express@^4.14.0, express@^4.17.1: +express@^4.0.0, express@^4.14.0, express@^4.17.1: version "4.18.2" resolved "https://registry.npmjs.org/express/-/express-4.18.2.tgz" integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== @@ -4502,6 +5647,11 @@ extend@~3.0.2: resolved "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz" integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== +extract-files@9.0.0, extract-files@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/extract-files/-/extract-files-9.0.0.tgz#8a7744f2437f81f5ed3250ed9f1550de902fe54a" + integrity sha512-CvdFfHkC95B4bBBk36hcEmvdR2awOdhhVUYH6S/zrVj3477zven/fJMYg7121h4T1xHZC+tetUpubpAhxwI7hQ== + extsprintf@1.3.0: version "1.3.0" resolved "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz" @@ -4524,6 +5674,11 @@ fake-merkle-patricia-tree@^1.0.1: dependencies: checkpoint-store "^1.1.0" +faker@5.5.3, faker@^5.3.1: + version "5.5.3" + resolved "https://registry.yarnpkg.com/faker/-/faker-5.5.3.tgz#c57974ee484431b25205c2c8dc09fda861e51e0e" + integrity sha512-wLTv2a28wjUyWkbnX7u/ABZBkUkIF2fCd73V6P2oFqEGEktDfzWx4UxrSqtPRw0xPRAcjeAOIiJWqZm3pP4u3g== + fast-check@3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/fast-check/-/fast-check-3.1.1.tgz#72c5ae7022a4e86504762e773adfb8a5b0b01252" @@ -4531,6 +5686,13 @@ fast-check@3.1.1: dependencies: pure-rand "^5.0.1" +fast-check@^2.12.1: + version "2.25.0" + resolved "https://registry.yarnpkg.com/fast-check/-/fast-check-2.25.0.tgz#5146601851bf3be0953bd17eb2b7d547936c6561" + integrity sha512-wRUT2KD2lAmT75WNIJIHECawoUUMHM0I5jrlLXGtGeqmPL8jl/EldUDjY1VCp6fDY8yflyfUeIOsOBrIbIiArg== + dependencies: + pure-rand "^5.0.1" + fast-decode-uri-component@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/fast-decode-uri-component/-/fast-decode-uri-component-1.0.1.tgz" @@ -4546,6 +5708,22 @@ fast-fifo@^1.0.0: resolved "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.2.0.tgz" integrity sha512-NcvQXt7Cky1cNau15FWy64IjuO8X0JijhTBBrJj1YlxlDfRkJXNaK9RFUjwpfDPzMdv7wB38jr53l9tkNLxnWg== +fast-future@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fast-future/-/fast-future-1.0.2.tgz#8435a9aaa02d79248d17d704e76259301d99280a" + integrity sha512-ZdgcQC4CDq0OlirlbmaV1Hvl9hrQBKwSJokpemb2Y82uzQ3mFaGxBCCTbr78t3obRsgjqrJAsEaYWZSK3oEcJQ== + +fast-glob@^3.1.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" + integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + fast-glob@^3.2.9: version "3.2.12" resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz" @@ -4557,7 +5735,7 @@ fast-glob@^3.2.9: merge2 "^1.3.0" micromatch "^4.0.4" -fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0: +fast-json-stable-stringify@^2.0.0: version "2.1.0" resolved "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== @@ -4588,13 +5766,46 @@ fastq@^1.6.0: dependencies: reusify "^1.0.4" -fetch-cookie@0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/fetch-cookie/-/fetch-cookie-0.11.0.tgz#e046d2abadd0ded5804ce7e2cae06d4331c15407" - integrity sha512-BQm7iZLFhMWFy5CZ/162sAGjBfdNWb7a8LEqqnzsHFhxT/X/SVj/z2t2nu3aJvjlbQkrAlTUApplPRjWyH4mhA== +fb-watchman@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.2.tgz#e9524ee6b5c77e9e5001af0f85f3adbb8623255c" + integrity sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA== + dependencies: + bser "2.1.1" + +fbjs-css-vars@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz#216551136ae02fe255932c3ec8775f18e2c078b8" + integrity sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ== + +fbjs@^3.0.0: + version "3.0.5" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-3.0.5.tgz#aa0edb7d5caa6340011790bd9249dbef8a81128d" + integrity sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg== + dependencies: + cross-fetch "^3.1.5" + fbjs-css-vars "^1.0.0" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^1.0.35" + +fetch-cookie@0.10.1: + version "0.10.1" + resolved "https://registry.yarnpkg.com/fetch-cookie/-/fetch-cookie-0.10.1.tgz#5ea88f3d36950543c87997c27ae2aeafb4b5c4d4" + integrity sha512-beB+VEd4cNeVG1PY+ee74+PkuCQnik78pgLi5Ah/7qdUfov8IctU0vLUbBT8/10Ma5GMBeI4wtxhGrEfKNYs2g== dependencies: tough-cookie "^2.3.3 || ^3.0.1 || ^4.0.0" +fetch-cookie@0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/fetch-cookie/-/fetch-cookie-0.7.0.tgz#a6fc137ad8363aa89125864c6451b86ecb7de802" + integrity sha512-Mm5pGlT3agW6t71xVM7vMZPIvI7T4FaTuFW4jari6dVzYHFDb3WZZsGpN22r/o3XMdkM0E7sPd1EGeyVbH2Tgg== + dependencies: + es6-denodeify "^0.1.1" + tough-cookie "^2.3.1" + fetch-ponyfill@^4.0.0: version "4.1.0" resolved "https://registry.npmjs.org/fetch-ponyfill/-/fetch-ponyfill-4.1.0.tgz" @@ -4602,6 +5813,11 @@ fetch-ponyfill@^4.0.0: dependencies: node-fetch "~1.7.1" +file-uri-to-path@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" + integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== + filelist@^1.0.1, filelist@^1.0.4: version "1.0.4" resolved "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz" @@ -4637,14 +5853,6 @@ find-up@5.0.0: locate-path "^6.0.0" path-exists "^4.0.0" -find-up@^1.0.0: - version "1.1.2" - resolved "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz" - integrity sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8= - dependencies: - path-exists "^2.0.0" - pinkie-promise "^2.0.0" - find-up@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz" @@ -4659,10 +5867,20 @@ find-up@^3.0.0: dependencies: locate-path "^3.0.0" -flat@^5.0.2: - version "5.0.2" - resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" - integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== +find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +flat@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/flat/-/flat-4.1.1.tgz#a392059cc382881ff98642f5da4dde0a959f309b" + integrity sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA== + dependencies: + is-buffer "~2.0.3" follow-redirects@^1.12.1: version "1.14.8" @@ -4691,6 +5909,11 @@ foreach@^2.0.4: resolved "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz" integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k= +foreach@^2.0.5: + version "2.0.6" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.6.tgz#87bcc8a1a0e74000ff2bf9802110708cfb02eb6e" + integrity sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg== + forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz" @@ -4701,16 +5924,16 @@ form-data-encoder@1.7.1: resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.1.tgz#ac80660e4f87ee0d3d3c3638b7da8278ddb8ec96" integrity sha512-EFRDrsMm/kyqbTQocNvRXMLjc7Es2Vk+IQFx/YW7hkUH1eBl4J1fqiP34l74Yt0pFLCNpc06fkbVk00008mzjg== -form-data@^2.2.0: - version "2.5.1" - resolved "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz" - integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== +form-data@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.0.tgz#31b7e39c85f1355b7139ee0c647cf0de7f83c682" + integrity sha512-CKMFDglpbMi6PyN+brwB9Q/GOw0eAnsrEZDgcsH5Krhz5Od/haKHAX0NmQfha2zPPz0JpWzA7GJHGSnvCRLWsg== dependencies: asynckit "^0.4.0" - combined-stream "^1.0.6" + combined-stream "^1.0.8" mime-types "^2.1.12" -form-data@^4.0.0: +form-data@4.0.0, form-data@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== @@ -4719,6 +5942,15 @@ form-data@^4.0.0: combined-stream "^1.0.8" mime-types "^2.1.12" +form-data@^2.2.0: + version "2.5.1" + resolved "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz" + integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + form-data@~2.3.2: version "2.3.3" resolved "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz" @@ -4738,6 +5970,11 @@ fresh@0.5.2: resolved "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz" integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== +fs-capacitor@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/fs-capacitor/-/fs-capacitor-2.0.4.tgz#5a22e72d40ae5078b4fe64fe4d08c0d3fc88ad3c" + integrity sha512-8S4f4WsCryNw2mJJchi46YgB6CR5Ze+4L1h8ewl9tEpL4SJ3ZO+c/bS4BWhB8bK+O3TMqhuZarTitd0S0eh2pA== + fs-constants@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz" @@ -4808,6 +6045,11 @@ fs.realpath@^1.0.0: resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz" integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== +fsevents@~2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" + integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== + fsevents@~2.3.2: version "2.3.2" resolved "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz" @@ -4818,40 +6060,48 @@ function-bind@^1.1.1: resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz" integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +function.prototype.name@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" + integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + functions-have-names "^1.2.3" + functional-red-black-tree@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz" integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= -ganache@7.8.0: - version "7.8.0" - resolved "https://registry.yarnpkg.com/ganache/-/ganache-7.8.0.tgz#02154384f246b66e98974cbcbb18e8372df3c2e0" - integrity sha512-IrUYvsaE/m2/NaVIZ7D/gCnsmyU/buechnH6MhUipzG1qJcZIwIp/DoP/LZUcHyhy0Bv0NKZD2pGOjpRhn7l7A== +functions-have-names@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" + integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + integrity sha512-14x4kjc6lkD3ltw589k0NrPD6cCNTD6CWoVUNpB85+DrtONoZn+Rug6xZU5RvSC4+TZPxA5AnBibQYAvZn41Hg== dependencies: - "@trufflesuite/bigint-buffer" "1.1.10" - "@trufflesuite/uws-js-unofficial" "20.10.0-unofficial.2" - "@types/bn.js" "^5.1.0" - "@types/lru-cache" "5.1.1" - "@types/seedrandom" "3.0.1" - abstract-level "1.0.3" - abstract-leveldown "7.2.0" - async-eventemitter "0.2.4" - emittery "0.10.0" - keccak "3.0.2" - leveldown "6.1.0" - secp256k1 "4.0.3" - optionalDependencies: - bufferutil "4.0.5" - utf-8-validate "5.0.7" + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" -get-caller-file@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" - integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== -get-caller-file@^2.0.5: +get-caller-file@^2.0.1: version "2.0.5" - resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== get-intrinsic@^1.0.2: @@ -4863,7 +6113,7 @@ get-intrinsic@^1.0.2: has "^1.0.3" has-symbols "^1.0.3" -get-intrinsic@^1.1.3: +get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@^1.2.0, get-intrinsic@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.1.tgz#d295644fed4505fc9cde952c37ee12b477a83d82" integrity sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw== @@ -4883,11 +6133,28 @@ get-package-type@^0.1.0: resolved "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz" integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== +get-params@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/get-params/-/get-params-0.1.2.tgz#bae0dfaba588a0c60d7834c0d8dc2ff60eeef2fe" + integrity sha512-41eOxtlGgHQRbFyA8KTH+w+32Em3cRdfBud7j67ulzmIfmaHX9doq47s0fa4P5o9H64BZX9nrYI6sJvk46Op+Q== + get-port@^3.1.0: version "3.2.0" resolved "https://registry.npmjs.org/get-port/-/get-port-3.2.0.tgz" integrity sha512-x5UJKlgeUiNT8nyo/AcnwLnZuZNcSjSw0kogRB+Whd1fjjFq4B1hySFxSFWWSn4mIBzg3sRNUDFYc4g5gjPoLg== +get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ== + +get-stream@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== + dependencies: + pump "^3.0.0" + get-stream@^5.0.0, get-stream@^5.1.0: version "5.2.0" resolved "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz" @@ -4900,6 +6167,14 @@ get-stream@^6.0.0, get-stream@^6.0.1: resolved "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== +get-symbol-description@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" + integrity sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.1" + getpass@^0.1.1: version "0.1.7" resolved "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz" @@ -4907,17 +6182,17 @@ getpass@^0.1.1: dependencies: assert-plus "^1.0.0" -glob-parent@^5.1.2, glob-parent@~5.1.2: +glob-parent@^5.1.2, glob-parent@~5.1.0, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== dependencies: is-glob "^4.0.1" -glob@7.2.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" - integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== +glob@7.1.6: + version "7.1.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" + integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" @@ -4936,7 +6211,7 @@ glob@9.3.5: minipass "^4.2.4" path-scurry "^1.6.1" -glob@^7.1.3: +glob@^7.1.1, glob@^7.1.3: version "7.2.3" resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz" integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== @@ -4956,12 +6231,36 @@ global@~4.4.0: min-document "^2.19.0" process "^0.11.10" +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + globals@^9.18.0: version "9.18.0" resolved "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz" integrity sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ== -globby@^11.1.0: +globalthis@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.3.tgz#5852882a52b80dc301b0660273e1ed082f0b6ccf" + integrity sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA== + dependencies: + define-properties "^1.1.3" + +globby@11.0.3: + version "11.0.3" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.3.tgz#9b1f0cb523e171dd1ad8c7b2a9fb4b644b9593cb" + integrity sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + +globby@11.1.0, globby@^11.1.0: version "11.1.0" resolved "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz" integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== @@ -5072,28 +6371,57 @@ got@12.1.0: p-cancelable "^3.0.0" responselike "^2.0.0" -got@^11.8.5: - version "11.8.6" - resolved "https://registry.yarnpkg.com/got/-/got-11.8.6.tgz#276e827ead8772eddbcfc97170590b841823233a" - integrity sha512-6tfZ91bOr7bOXnK7PRDCGBLa1H4U080YHNaAQ2KsMGlLEzRbk44nsZF2E1IeRc3vtJHPVbKCYgdFbaGO2ljd8g== +got@9.6.0: + version "9.6.0" + resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" + integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== dependencies: - "@sindresorhus/is" "^4.0.0" - "@szmarczak/http-timer" "^4.0.5" - "@types/cacheable-request" "^6.0.1" - "@types/responselike" "^1.0.0" - cacheable-lookup "^5.0.3" - cacheable-request "^7.0.2" - decompress-response "^6.0.0" - http2-wrapper "^1.0.0-beta.5.2" - lowercase-keys "^2.0.0" - p-cancelable "^2.0.0" - responselike "^2.0.0" + "@sindresorhus/is" "^0.14.0" + "@szmarczak/http-timer" "^1.1.2" + cacheable-request "^6.0.0" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^4.1.0" + lowercase-keys "^1.0.1" + mimic-response "^1.0.1" + p-cancelable "^1.0.0" + to-readable-stream "^1.0.0" + url-parse-lax "^3.0.0" + +got@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/got/-/got-7.1.0.tgz#05450fd84094e6bbea56f451a43a9c289166385a" + integrity sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw== + dependencies: + decompress-response "^3.2.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-plain-obj "^1.1.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + p-cancelable "^0.3.0" + p-timeout "^1.1.1" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + url-parse-lax "^1.0.0" + url-to-options "^1.0.1" graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== +graphql-extensions@^0.16.0: + version "0.16.0" + resolved "https://registry.yarnpkg.com/graphql-extensions/-/graphql-extensions-0.16.0.tgz#32669fde0a2f115de60e5dda818ae457c1d71bb8" + integrity sha512-rZQc/USoEIw437BGRUwoHoLPR1LA791Ltj6axONqgKIyyx2sqIO3YT9kTbB/eIUdJBrCozp4KuUeZ09xKeQDxg== + dependencies: + "@apollographql/apollo-tools" "^0.5.0" + apollo-server-env "^3.2.0" + apollo-server-types "^0.10.0" + graphql-import-node@^0.0.5: version "0.0.5" resolved "https://registry.npmjs.org/graphql-import-node/-/graphql-import-node-0.0.5.tgz" @@ -5124,6 +6452,40 @@ graphql-tools@^4.0.8: iterall "^1.1.3" uuid "^3.1.0" +graphql-tools@^6.2.4: + version "6.2.6" + resolved "https://registry.yarnpkg.com/graphql-tools/-/graphql-tools-6.2.6.tgz#557c6d32797a02988f214bd596dec2abd12425dd" + integrity sha512-OyhSvK5ALVVD6bFiWjAqv2+lRyvjIRfb6Br5Tkjrv++rxnXDodPH/zhMbDGRw+W3SD5ioGEEz84yO48iPiN7jA== + dependencies: + "@graphql-tools/batch-delegate" "^6.2.6" + "@graphql-tools/code-file-loader" "^6.2.4" + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/git-loader" "^6.2.4" + "@graphql-tools/github-loader" "^6.2.4" + "@graphql-tools/graphql-file-loader" "^6.2.4" + "@graphql-tools/graphql-tag-pluck" "^6.2.4" + "@graphql-tools/import" "^6.2.4" + "@graphql-tools/json-file-loader" "^6.2.4" + "@graphql-tools/links" "^6.2.4" + "@graphql-tools/load" "^6.2.4" + "@graphql-tools/load-files" "^6.2.4" + "@graphql-tools/merge" "^6.2.4" + "@graphql-tools/mock" "^6.2.4" + "@graphql-tools/module-loader" "^6.2.4" + "@graphql-tools/relay-operation-optimizer" "^6.2.4" + "@graphql-tools/resolvers-composition" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/stitch" "^6.2.4" + "@graphql-tools/url-loader" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + "@graphql-tools/wrap" "^6.2.4" + tslib "~2.0.1" + +graphql-ws@^4.4.1: + version "4.9.0" + resolved "https://registry.yarnpkg.com/graphql-ws/-/graphql-ws-4.9.0.tgz#5cfd8bb490b35e86583d8322f5d5d099c26e365c" + integrity sha512-sHkK9+lUm20/BGawNEWNtVAeJzhZeBg21VmvmLoT5NdGVeZWv5PdIhkcayQIAgjSyyQ17WMKmbDijIPG2On+Ag== + graphql@15.5.0: version "15.5.0" resolved "https://registry.npmjs.org/graphql/-/graphql-15.5.0.tgz" @@ -5139,6 +6501,11 @@ graphql@^16.6.0: resolved "https://registry.npmjs.org/graphql/-/graphql-16.6.0.tgz" integrity sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw== +growl@1.10.5: + version "1.10.5" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" + integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== + har-schema@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz" @@ -5159,6 +6526,11 @@ has-ansi@^2.0.0: dependencies: ansi-regex "^2.0.0" +has-bigints@^1.0.1, has-bigints@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" + integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ== + has-flag@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz" @@ -5169,20 +6541,39 @@ has-flag@^4.0.0: resolved "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== +has-property-descriptors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz#610708600606d36961ed04c196193b6a607fa861" + integrity sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ== + dependencies: + get-intrinsic "^1.1.1" + has-proto@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + integrity sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw== + +has-symbols@^1.0.0, has-symbols@^1.0.1, has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + has-symbols@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz" integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== -has-symbols@^1.0.3: - version "1.0.3" - resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz" - integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + integrity sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw== + dependencies: + has-symbol-support-x "^1.4.1" has-tostringtag@^1.0.0: version "1.0.0" @@ -5191,6 +6582,11 @@ has-tostringtag@^1.0.0: dependencies: has-symbols "^1.0.2" +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + integrity sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ== + has@^1.0.3: version "1.0.3" resolved "https://registry.npmjs.org/has/-/has-1.0.3.tgz" @@ -5223,7 +6619,7 @@ hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: inherits "^2.0.3" minimalistic-assert "^1.0.1" -he@1.2.0: +he@1.2.0, he@^1.1.1: version "1.2.0" resolved "https://registry.npmjs.org/he/-/he-1.2.0.tgz" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== @@ -5241,12 +6637,12 @@ highlight.js@^10.4.1: resolved "https://registry.npmjs.org/highlight.js/-/highlight.js-10.6.0.tgz" integrity sha512-8mlRcn5vk/r4+QcqerapwBYTe+iPL5ih6xrNylxrnBdHQiijDETfXX7VIxC3UiCRiINBJfANBAsPzAvRQj8RpQ== -highlightjs-solidity@^2.0.6: +highlightjs-solidity@^2.0.2: version "2.0.6" resolved "https://registry.yarnpkg.com/highlightjs-solidity/-/highlightjs-solidity-2.0.6.tgz#e7a702a2b05e0a97f185e6ba39fd4846ad23a990" integrity sha512-DySXWfQghjm2l6a/flF+cteroJqD4gI8GSdL4PtvxZSsAHie8m3yVe2JFoRg03ROKT6hp2Lc/BxXkqerNmtQYg== -hmac-drbg@^1.0.1: +hmac-drbg@^1.0.0, hmac-drbg@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz" integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE= @@ -5255,6 +6651,13 @@ hmac-drbg@^1.0.1: minimalistic-assert "^1.0.0" minimalistic-crypto-utils "^1.0.1" +hoist-non-react-statics@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + home-or-tmp@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz" @@ -5263,11 +6666,6 @@ home-or-tmp@^2.0.0: os-homedir "^1.0.0" os-tmpdir "^1.0.1" -hosted-git-info@^2.1.4: - version "2.8.8" - resolved "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz" - integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg== - htmlparser2@^6.0.0: version "6.0.1" resolved "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.0.1.tgz" @@ -5304,6 +6702,17 @@ http-errors@2.0.0: statuses "2.0.1" toidentifier "1.0.1" +http-errors@^1.7.3: + version "1.8.1" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.1.tgz#7c3f28577cbc8a207388455dbd62295ed07bd68c" + integrity sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g== + dependencies: + depd "~1.1.2" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.1" + http-https@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/http-https/-/http-https-1.0.0.tgz" @@ -5325,14 +6734,6 @@ http-signature@~1.2.0: jsprim "^1.2.2" sshpk "^1.7.0" -http2-wrapper@^1.0.0-beta.5.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/http2-wrapper/-/http2-wrapper-1.0.3.tgz#b8f55e0c1f25d4ebd08b3b0c2c079f9590800b3d" - integrity sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg== - dependencies: - quick-lru "^5.1.1" - resolve-alpn "^1.0.0" - http2-wrapper@^2.1.10: version "2.2.0" resolved "https://registry.yarnpkg.com/http2-wrapper/-/http2-wrapper-2.2.0.tgz#b80ad199d216b7d3680195077bd7b9060fa9d7f3" @@ -5356,7 +6757,7 @@ hyperlinker@^1.0.0: resolved "https://registry.npmjs.org/hyperlinker/-/hyperlinker-1.0.0.tgz" integrity sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ== -iconv-lite@0.4.24: +iconv-lite@0.4.24, iconv-lite@^0.4.4: version "0.4.24" resolved "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz" integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== @@ -5382,21 +6783,43 @@ ieee754@^1.1.13, ieee754@^1.2.1: resolved "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz" integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== -ignore@^5.2.0: +ignore-walk@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.4.tgz#c9a09f69b7c7b479a5d74ac1a3c0d4236d2a6335" + integrity sha512-PY6Ii8o1jMRA1z4F2hRkH/xN59ox43DavKvD3oDpfurRlOJyAHpifIwpbdv1n4jt4ov0jSpw3kQ4GhJnpBL6WQ== + dependencies: + minimatch "^3.0.4" + +ignore@^5.1.4, ignore@^5.2.0: version "5.2.4" resolved "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz" integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== -immediate@3.3.0, immediate@^3.2.3: +immediate@3.0.6: + version "3.0.6" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.0.6.tgz#9db1dbd0faf8de6fbe0f5dd5e56bb606280de69b" + integrity sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ== + +immediate@3.3.0, immediate@^3.2.2, immediate@^3.2.3: version "3.3.0" resolved "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz" integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q== +immediate@~3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" + integrity sha512-RrGCXRm/fRVqMIhqXrGEX9rRADavPiDFSoMb/k64i9XMk8uH4r/Omi5Ctierj6XzNecwDbO4WuFbDD1zmpl3Tg== + immutable@4.2.1: version "4.2.1" resolved "https://registry.npmjs.org/immutable/-/immutable-4.2.1.tgz" integrity sha512-7WYV7Q5BTs0nlQm7tl92rDYYoyELLKHoDMBKhrxEoiV4mrfVdRz8hzPiYOzH7yWjzoVEamxRuAqhxL2PLRwZYQ== +immutable@~3.7.6: + version "3.7.6" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" + integrity sha512-AizQPcaofEtO11RZhPPHBOJRdo/20MKQF9mBLnVkBoyHi1/zXK8fzVdnEpSV9gxqtnh6Qomfp3F0xT5qP/vThw== + import-fresh@^3.1.0, import-fresh@^3.2.1: version "3.3.0" resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz" @@ -5405,6 +6828,13 @@ import-fresh@^3.1.0, import-fresh@^3.2.1: parent-module "^1.0.0" resolve-from "^4.0.0" +import-from@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/import-from/-/import-from-3.0.0.tgz#055cfec38cd5a27d8057ca51376d7d3bf0891966" + integrity sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ== + dependencies: + resolve-from "^5.0.0" + indent-string@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz" @@ -5423,6 +6853,11 @@ inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, i resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== + ini@~1.3.0: version "1.3.8" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" @@ -5442,18 +6877,22 @@ interface-store@^2.0.2: resolved "https://registry.npmjs.org/interface-store/-/interface-store-2.0.2.tgz" integrity sha512-rScRlhDcz6k199EkHqT8NpM87ebN89ICOzILoBHgaG36/WX50N32BnU/kpZgCGPLhARRAWUUX5/cyaIjt7Kipg== -invariant@^2.2.2: +internal-slot@^1.0.4, internal-slot@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.5.tgz#f2a2ee21f668f8627a4667f309dc0f4fb6674986" + integrity sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ== + dependencies: + get-intrinsic "^1.2.0" + has "^1.0.3" + side-channel "^1.0.4" + +invariant@^2.2.2, invariant@^2.2.4: version "2.2.4" resolved "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz" integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== dependencies: loose-envify "^1.0.0" -invert-kv@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz" - integrity sha1-EEqOSqym09jNFXqO+L+rLXo//bY= - ip-regex@^4.0.0: version "4.3.0" resolved "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz" @@ -5561,11 +7000,35 @@ is-arguments@^1.0.4: dependencies: call-bind "^1.0.0" +is-arguments@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.1.tgz#15b3f88fda01f2a97fec84ca761a560f123efa9b" + integrity sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-array-buffer@^3.0.1, is-array-buffer@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.2.tgz#f2653ced8412081638ecb0ebbd0c41c6e0aecbbe" + integrity sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.0" + is-typed-array "^1.1.10" + is-arrayish@^0.2.1: version "0.2.1" resolved "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz" integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== +is-bigint@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" + integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg== + dependencies: + has-bigints "^1.0.1" + is-binary-path@~2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz" @@ -5573,9 +7036,17 @@ is-binary-path@~2.1.0: dependencies: binary-extensions "^2.0.0" -is-buffer@^2.0.5: +is-boolean-object@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" + integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-buffer@~2.0.3: version "2.0.5" - resolved "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== is-callable@^1.1.3: @@ -5583,13 +7054,25 @@ is-callable@^1.1.3: resolved "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz" integrity sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w== -is-core-module@^2.11.0: - version "2.12.1" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.12.1.tgz#0c0b6885b6f80011c71541ce15c8d66cf5a4f9fd" - integrity sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg== +is-callable@^1.1.4, is-callable@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" + integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== + +is-core-module@^2.2.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.0.tgz#bb52aa6e2cbd49a30c2ba68c42bf3435ba6072db" + integrity sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ== dependencies: has "^1.0.3" +is-date-object@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" + integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== + dependencies: + has-tostringtag "^1.0.0" + is-docker@^2.0.0: version "2.2.1" resolved "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz" @@ -5642,6 +7125,13 @@ is-generator-function@^1.0.7: resolved "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.8.tgz" integrity sha512-2Omr/twNtufVZFr1GhxjOMFPAj2sjc/dKaIqBhvo4qciXfJmITGH6ZGd8eZYNHza8t1y0e01AuqRhJwfWp26WQ== +is-glob@4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== + dependencies: + is-extglob "^2.1.1" + is-glob@^4.0.1, is-glob@~4.0.1: version "4.0.3" resolved "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz" @@ -5673,6 +7163,23 @@ is-lower-case@^1.1.0: dependencies: lower-case "^1.1.0" +is-map@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127" + integrity sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg== + +is-negative-zero@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz#7bf6f03a28003b8b3965de3ac26f664d765f3150" + integrity sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA== + +is-number-object@^1.0.4: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc" + integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ== + dependencies: + has-tostringtag "^1.0.0" + is-number@^7.0.0: version "7.0.0" resolved "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz" @@ -5683,12 +7190,52 @@ is-obj@^2.0.0: resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== +is-object@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.2.tgz#a56552e1c665c9e950b4a025461da87e72f86fcf" + integrity sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA== + +is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg== + is-plain-obj@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz" integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== -is-stream@^1.0.1: +is-promise@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-4.0.0.tgz#42ff9f84206c1991d26debf520dd5c01042dd2f3" + integrity sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ== + +is-regex@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" + integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-retry-allowed@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" + integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== + +is-set@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.2.tgz#90755fa4c2562dc1c5d4024760d6119b94ca18ec" + integrity sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g== + +is-shared-array-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79" + integrity sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA== + dependencies: + call-bind "^1.0.2" + +is-stream@^1.0.0, is-stream@^1.0.1: version "1.1.0" resolved "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz" integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= @@ -5698,6 +7245,20 @@ is-stream@^2.0.0: resolved "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz" integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== +is-string@^1.0.5, is-string@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" + integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + dependencies: + has-tostringtag "^1.0.0" + +is-symbol@^1.0.2, is-symbol@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" + integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== + dependencies: + has-symbols "^1.0.2" + is-typed-array@^1.1.10: version "1.1.10" resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.10.tgz#36a5b5cb4189b575d1a3e4b08536bfb485801e3f" @@ -5720,16 +7281,18 @@ is-typed-array@^1.1.3: foreach "^2.0.5" has-symbols "^1.0.1" +is-typed-array@^1.1.9: + version "1.1.12" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.12.tgz#d0bab5686ef4a76f7a73097b95470ab199c57d4a" + integrity sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg== + dependencies: + which-typed-array "^1.1.11" + is-typedarray@^1.0.0, is-typedarray@~1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz" integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= -is-unicode-supported@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" - integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== - is-upper-case@^1.1.0: version "1.1.2" resolved "https://registry.npmjs.org/is-upper-case/-/is-upper-case-1.1.2.tgz" @@ -5737,10 +7300,12 @@ is-upper-case@^1.1.0: dependencies: upper-case "^1.1.0" -is-utf8@^0.2.0: - version "0.2.1" - resolved "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz" - integrity sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI= +is-weakref@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2" + integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ== + dependencies: + call-bind "^1.0.2" is-wsl@^2.2.0: version "2.2.0" @@ -5754,6 +7319,11 @@ isarray@0.0.1: resolved "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + isarray@~1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz" @@ -5769,7 +7339,7 @@ iso-url@^1.1.5: resolved "https://registry.npmjs.org/iso-url/-/iso-url-1.2.1.tgz" integrity sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng== -isomorphic-ws@^4.0.1: +isomorphic-ws@4.0.1, isomorphic-ws@^4.0.1: version "4.0.1" resolved "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz" integrity sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w== @@ -5779,6 +7349,14 @@ isstream@~0.1.2: resolved "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz" integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + integrity sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w== + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + it-all@^1.0.4: version "1.0.6" resolved "https://registry.npmjs.org/it-all/-/it-all-1.0.6.tgz" @@ -5829,6 +7407,19 @@ iterall@^1.1.3, iterall@^1.2.1, iterall@^1.3.0: resolved "https://registry.npmjs.org/iterall/-/iterall-1.3.0.tgz" integrity sha512-QZ9qOMdF+QLHxy1QIpUHUU1D5pS2CG2P69LF6L6CPjPYA/XMOmKV3PZpawHoAjHNyB0swdVTRxdYT4tbBbxqwg== +iterate-iterator@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/iterate-iterator/-/iterate-iterator-1.0.2.tgz#551b804c9eaa15b847ea6a7cdc2f5bf1ec150f91" + integrity sha512-t91HubM4ZDQ70M9wqp+pcNpu8OyJ9UAtXntT/Bcsvp5tZMnz9vRa+IunKXeI8AnfZMTv0jNuVEmGeLSMjVvfPw== + +iterate-value@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/iterate-value/-/iterate-value-1.0.2.tgz#935115bd37d006a52046535ebc8d07e9c9337f57" + integrity sha512-A6fMAio4D2ot2r/TYzr4yUWrmwNdsN5xL7+HUiyACE4DXm+q8HtPcnFTp+NnW3k4N05tZ7FVYFFb2CR13NxyHQ== + dependencies: + es-get-iterator "^1.0.2" + iterate-iterator "^1.0.1" + jake@^10.6.1: version "10.8.6" resolved "https://registry.npmjs.org/jake/-/jake-10.8.6.tgz" @@ -5887,6 +7478,14 @@ js-tokens@^3.0.2: resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz" integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= +js-yaml@3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" + integrity sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + js-yaml@3.14.1, js-yaml@^3.14.1: version "3.14.1" resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz" @@ -5895,13 +7494,18 @@ js-yaml@3.14.1, js-yaml@^3.14.1: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@4.1.0, js-yaml@^4.1.0: +js-yaml@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz" integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== dependencies: argparse "^2.0.1" +jsan@^3.1.13: + version "3.1.14" + resolved "https://registry.yarnpkg.com/jsan/-/jsan-3.1.14.tgz#197fee2d260b85acacb049c1ffa41bd09fb1f213" + integrity sha512-wStfgOJqMv4QKktuH273f5fyi3D3vy2pHOiSDGPvpcS/q+wb/M7AK3vkCcaHbkZxDOlDU/lDJgccygKSG2OhtA== + jsbn@~0.1.0: version "0.1.1" resolved "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz" @@ -5912,6 +7516,16 @@ jsesc@^1.3.0: resolved "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz" integrity sha1-RsP+yMGJKxKwgz25vHYiF226s0s= +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ== + json-buffer@3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" @@ -5922,7 +7536,7 @@ json-parse-even-better-errors@^2.3.0: resolved "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz" integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== -json-pointer@^0.6.1: +json-pointer@^0.6.0, json-pointer@^0.6.1: version "0.6.2" resolved "https://registry.yarnpkg.com/json-pointer/-/json-pointer-0.6.2.tgz#f97bd7550be5e9ea901f8c9264c9d436a22a93cd" integrity sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw== @@ -5979,6 +7593,19 @@ json5@^0.5.1: resolved "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz" integrity sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE= +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + +jsondown@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/jsondown/-/jsondown-1.0.0.tgz#c5cc5cda65f515d2376136a104b5f535534f26e3" + integrity sha512-p6XxPaq59aXwcdDQV3ISMA5xk+1z6fJuctcwwSdR9iQgbYOcIrnknNrhcMGG+0FaUfKHGkdDpQNaZrovfBoyOw== + dependencies: + memdown "1.4.1" + mkdirp "0.5.1" + jsonfile@^2.1.0: version "2.4.0" resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz" @@ -6002,7 +7629,7 @@ jsonfile@^6.0.1: optionalDependencies: graceful-fs "^4.1.6" -jsonify@^0.0.1: +jsonify@~0.0.0: version "0.0.1" resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.1.tgz#2aa3111dae3d34a0f151c63f3a45d995d9420978" integrity sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg== @@ -6022,15 +7649,6 @@ jsprim@^1.2.2: json-schema "0.4.0" verror "1.10.0" -keccak@3.0.2: - version "3.0.2" - resolved "https://registry.npmjs.org/keccak/-/keccak-3.0.2.tgz" - integrity sha512-PyKKjkH53wDMLGrvmRGSNWgmSxZOUqbnXwKL9tmgbFYA1iAYqW21kfR7mZXV0MlESiefxQQE9X9fTa3X+2MPDQ== - dependencies: - node-addon-api "^2.0.0" - node-gyp-build "^4.2.0" - readable-stream "^3.6.0" - keccak@^3.0.0: version "3.0.3" resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.3.tgz#4bc35ad917be1ef54ff246f904c2bbbf9ac61276" @@ -6040,6 +7658,13 @@ keccak@^3.0.0: node-gyp-build "^4.2.0" readable-stream "^3.6.0" +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== + dependencies: + json-buffer "3.0.0" + keyv@^4.0.0: version "4.5.2" resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.2.tgz#0e310ce73bf7851ec702f2eaf46ec4e3805cce56" @@ -6054,12 +7679,10 @@ klaw@^1.0.0: optionalDependencies: graceful-fs "^4.1.9" -lcid@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz" - integrity sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU= - dependencies: - invert-kv "^1.0.0" +level-codec@9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/level-codec/-/level-codec-9.0.1.tgz#042f4aa85e56d4328ace368c950811ba802b7247" + integrity sha512-ajFP0kJ+nyq4i6kptSM+mAvJKLOg1X5FiFPtLG9M5gCEZyBmgDi3FkDrvlMkEzrUn1cWxtvVmrvoS4ASyO/q+Q== level-codec@9.0.2, level-codec@^9.0.0: version "9.0.2" @@ -6073,13 +7696,6 @@ level-codec@~7.0.0: resolved "https://registry.npmjs.org/level-codec/-/level-codec-7.0.1.tgz" integrity sha512-Ua/R9B9r3RasXdRmOtd+t9TCOEIIlts+TN/7XTT2unhDaL6sJn83S3rUyljbr6lVtw49N3/yA0HHjpV6Kzb2aQ== -level-concat-iterator@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/level-concat-iterator/-/level-concat-iterator-3.1.0.tgz#5235b1f744bc34847ed65a50548aa88d22e881cf" - integrity sha512-BWRCMHBxbIqPxJ8vHOvKUsaO0v1sLYZtjN3K2iZJsRBYtp+ONsY6Jfi6hy9K3+zolgQRryhIn2NRZjZnWJ9NmQ== - dependencies: - catering "^2.1.0" - level-concat-iterator@~2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/level-concat-iterator/-/level-concat-iterator-2.0.1.tgz" @@ -6125,34 +7741,25 @@ level-iterator-stream@~4.0.0: readable-stream "^3.4.0" xtend "^4.0.2" -level-js@^5.0.0: - version "5.0.2" - resolved "https://registry.yarnpkg.com/level-js/-/level-js-5.0.2.tgz#5e280b8f93abd9ef3a305b13faf0b5397c969b55" - integrity sha512-SnBIDo2pdO5VXh02ZmtAyPP6/+6YTJg2ibLtl9C34pWvmtMEmRTWpra+qO/hifkUtBTOtfx6S9vLDjBsBK4gRg== +level-js@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/level-js/-/level-js-4.0.2.tgz#fa51527fa38b87c4d111b0d0334de47fcda38f21" + integrity sha512-PeGjZsyMG4O89KHiez1zoMJxStnkM+oBIqgACjoo5PJqFiSUUm3GNod/KcbqN5ktyZa8jkG7I1T0P2u6HN9lIg== dependencies: - abstract-leveldown "~6.2.3" - buffer "^5.5.0" + abstract-leveldown "~6.0.1" + immediate "~3.2.3" inherits "^2.0.3" ltgt "^2.1.2" + typedarray-to-buffer "~3.1.5" -level-packager@^5.1.0: +level-packager@^5.0.0: version "5.1.1" - resolved "https://registry.npmjs.org/level-packager/-/level-packager-5.1.1.tgz" + resolved "https://registry.yarnpkg.com/level-packager/-/level-packager-5.1.1.tgz#323ec842d6babe7336f70299c14df2e329c18939" integrity sha512-HMwMaQPlTC1IlcwT3+swhqf/NUO+ZhXVz6TY1zZIIZlIR0YSn8GtAAWmIvKjNY16ZkEg/JcpAuQskxsXqC0yOQ== dependencies: encoding-down "^6.3.0" levelup "^4.3.2" -level-supports@^2.0.1: - version "2.1.0" - resolved "https://registry.yarnpkg.com/level-supports/-/level-supports-2.1.0.tgz#9af908d853597ecd592293b2fad124375be79c5f" - integrity sha512-E486g1NCjW5cF78KGPrMDRBYzPuueMZ6VBXHT6gC7A8UYWGiM14fGgp+s/L1oFfDWSPV/+SFkYCmZ0SiESkRKA== - -level-supports@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/level-supports/-/level-supports-4.0.1.tgz#431546f9d81f10ff0fea0e74533a0e875c08c66a" - integrity sha512-PbXpve8rKeNcZ9C1mUicC9auIYFyGpkV9/i6g76tLgANwWhtG2v7I4xNBUlkn3lE2/dZF3Pi0ygYGtLc4RXXdA== - level-supports@~1.0.0: version "1.0.1" resolved "https://registry.npmjs.org/level-supports/-/level-supports-1.0.1.tgz" @@ -6160,14 +7767,6 @@ level-supports@~1.0.0: dependencies: xtend "^4.0.2" -level-transcoder@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/level-transcoder/-/level-transcoder-1.0.1.tgz#f8cef5990c4f1283d4c86d949e73631b0bc8ba9c" - integrity sha512-t7bFwFtsQeD8cl8NIoQ2iwxA0CL/9IFw7/9gAjOonH0PWTTiRfY7Hq+Ejbsxh86tXobDQ6IOiddjNYIfOBs06w== - dependencies: - buffer "^6.0.3" - module-error "^1.0.1" - level-write-stream@1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/level-write-stream/-/level-write-stream-1.0.0.tgz" @@ -6183,32 +7782,44 @@ level-ws@0.0.0: readable-stream "~1.0.15" xtend "~2.1.1" -level@6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/level/-/level-6.0.1.tgz#dc34c5edb81846a6de5079eac15706334b0d7cd6" - integrity sha512-psRSqJZCsC/irNhfHzrVZbmPYXDcEYhA5TVNwr+V92jF44rbf86hqGp8fiT702FyiArScYIlPSBTDUASCVNSpw== +level@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/level/-/level-5.0.1.tgz#8528cc1ee37ac413270129a1eab938c610be3ccb" + integrity sha512-wcak5OQeA4rURGacqS62R/xNHjCYnJSQDBOlm4KNUGJVE9bWv2B04TclqReYejN+oD65PzD4FsqeWoI5wNC5Lg== + dependencies: + level-js "^4.0.0" + level-packager "^5.0.0" + leveldown "^5.0.0" + opencollective-postinstall "^2.0.0" + +leveldown@5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/leveldown/-/leveldown-5.0.2.tgz#c8edc2308c8abf893ffc81e66ab6536111cae92c" + integrity sha512-Ib6ygFYBleS8x2gh3C1AkVsdrUShqXpe6jSTnZ6sRycEXKhqVf+xOSkhgSnjidpPzyv0d95LJVFrYQ4NuXAqHA== dependencies: - level-js "^5.0.0" - level-packager "^5.1.0" - leveldown "^5.4.0" + abstract-leveldown "~6.0.0" + fast-future "~1.0.2" + napi-macros "~1.8.1" + node-gyp-build "~3.8.0" -leveldown@5.6.0, leveldown@^5.4.0: +leveldown@^5.0.0: version "5.6.0" - resolved "https://registry.npmjs.org/leveldown/-/leveldown-5.6.0.tgz" + resolved "https://registry.yarnpkg.com/leveldown/-/leveldown-5.6.0.tgz#16ba937bb2991c6094e13ac5a6898ee66d3eee98" integrity sha512-iB8O/7Db9lPaITU1aA2txU/cBEXAt4vWwKQRrrWuS6XDgbP4QZGj9BL2aNbwb002atoQ/lIotJkfyzz+ygQnUQ== dependencies: abstract-leveldown "~6.2.1" napi-macros "~2.0.0" node-gyp-build "~4.1.0" -leveldown@6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/leveldown/-/leveldown-6.1.0.tgz#7ab1297706f70c657d1a72b31b40323aa612b9ee" - integrity sha512-8C7oJDT44JXxh04aSSsfcMI8YiaGRhOFI9/pMEL7nWJLVsWajDPTRxsSHTM2WcTVY5nXM+SuRHzPPi0GbnDX+w== +levelup@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/levelup/-/levelup-4.0.2.tgz#bcb8d28d0a82ee97f1c6d00f20ea6d32c2803c5b" + integrity sha512-cx9PmLENwbGA3svWBEbeO2HazpOSOYSXH4VA+ahVpYyurvD+SDSfURl29VBY2qgyk+Vfy2dJd71SBRckj/EZVA== dependencies: - abstract-leveldown "^7.2.0" - napi-macros "~2.0.0" - node-gyp-build "^4.3.0" + deferred-leveldown "~5.0.0" + level-errors "~2.0.0" + level-iterator-stream "~4.0.0" + xtend "~4.0.0" levelup@4.4.0, levelup@^4.3.2: version "4.4.0" @@ -6239,16 +7850,10 @@ lines-and-columns@^1.1.6: resolved "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== -load-json-file@^1.0.0: - version "1.1.0" - resolved "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz" - integrity sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA= - dependencies: - graceful-fs "^4.1.2" - parse-json "^2.2.0" - pify "^2.0.0" - pinkie-promise "^2.0.0" - strip-bom "^2.0.0" +linked-list@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/linked-list/-/linked-list-0.1.0.tgz#798b0ff97d1b92a4fd08480f55aea4e9d49d37bf" + integrity sha512-Zr4ovrd0ODzF3ut2TWZMdHIxb8iFdJc/P3QM4iCJdlxxGHXo69c9hGIHzLo8/FtuR9E6WUZc5irKhtPUgOKMAg== locate-path@^2.0.0: version "2.0.0" @@ -6266,6 +7871,13 @@ locate-path@^3.0.0: p-locate "^3.0.0" path-exists "^3.0.0" +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + locate-path@^6.0.0: version "6.0.0" resolved "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz" @@ -6278,26 +7890,36 @@ lodash-es@^4.2.1: resolved "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz" integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== -lodash.assign@^4.0.3, lodash.assign@^4.0.6: - version "4.2.0" - resolved "https://registry.npmjs.org/lodash.assign/-/lodash.assign-4.2.0.tgz" - integrity sha1-DZnzzNem0mHRm9rrkkUAXShYCOc= - lodash.camelcase@^4.3.0: version "4.3.0" resolved "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz" integrity sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA== +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + integrity sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ== + lodash.debounce@^4.0.8: version "4.0.8" resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz" integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168= +lodash.escaperegexp@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz#64762c48618082518ac3df4ccf5d5886dae20347" + integrity sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw== + lodash.flatmap@^4.5.0: version "4.5.0" resolved "https://registry.npmjs.org/lodash.flatmap/-/lodash.flatmap-4.5.0.tgz" integrity sha1-74y/QI9uSCaGYzRTBcaswLd4cC4= +lodash.flatten@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" + integrity sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g== + lodash.kebabcase@^4.1.1: version "4.1.1" resolved "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz" @@ -6333,6 +7955,11 @@ lodash.padstart@^4.6.1: resolved "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz" integrity sha512-sW73O6S8+Tg66eY56DBk85aQzzUJDtpoXFBgELMd5P/SotAguo+1kYO6RuYgXxA4HJH3LFTFPASX6ET6bjfriw== +lodash.partition@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.partition/-/lodash.partition-4.6.0.tgz#a38e46b73469e0420b0da1212e66d414be364ba4" + integrity sha512-35L3dSF3Q6V1w5j6V3NhNlQjzsRDC/pYKCTdYTmwqSib+Q8ponkAmt/PwEOq3EmI38DSCl+SkIVwLd+uSlVdrg== + lodash.repeat@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/lodash.repeat/-/lodash.repeat-4.1.0.tgz" @@ -6353,6 +7980,11 @@ lodash.startcase@^4.4.0: resolved "https://registry.npmjs.org/lodash.startcase/-/lodash.startcase-4.4.0.tgz" integrity sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg== +lodash.sum@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/lodash.sum/-/lodash.sum-4.0.2.tgz#ad90e397965d803d4f1ff7aa5b2d0197f3b4637b" + integrity sha512-1GcLFsMpU0K7uGPjo5FePq0fNlL3sSTZtSWwQUxr9VL8T3xeJiprwbfjDptTUPtkB8cUBpAnMtvQpju5XXaBeA== + lodash.trim@^4.5.1: version "4.5.1" resolved "https://registry.npmjs.org/lodash.trim/-/lodash.trim-4.5.1.tgz" @@ -6378,18 +8010,22 @@ lodash.upperfirst@^4.3.1: resolved "https://registry.npmjs.org/lodash.upperfirst/-/lodash.upperfirst-4.3.1.tgz" integrity sha512-sReKOYJIJf74dhJONhU4e0/shzi1trVbSWDOhKYE5XV2O+H7Sb2Dihwuc7xWxVl+DgFPyTqIN3zMfT9cq5iWDg== -lodash@^4.17.14, lodash@^4.17.21, lodash@^4.17.4, lodash@^4.2.1: +lodash.zipwith@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.zipwith/-/lodash.zipwith-4.2.0.tgz#afacf03fd2f384af29e263c3c6bda3b80e3f51fd" + integrity sha512-CbmO4780a7/p1XUHiIL/v0JoxBMZfvq5+qAjK9PDdVnKnsEJN0i88bPZ9dXr72l108K/EWdJFtk/5HBAby4R5Q== + +lodash@4.17.21, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.21, lodash@^4.17.4, lodash@^4.2.1: version "4.17.21" resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== -log-symbols@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" - integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== +log-symbols@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" + integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== dependencies: - chalk "^4.1.0" - is-unicode-supported "^0.1.0" + chalk "^4.0.0" log-symbols@^3.0.0: version "3.0.0" @@ -6398,7 +8034,7 @@ log-symbols@^3.0.0: dependencies: chalk "^2.4.2" -loglevel@^1.6.8: +loglevel@^1.6.7: version "1.8.1" resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.8.1.tgz#5c621f83d5b48c54ae93b6156353f555963377b4" integrity sha512-tCRIJM51SHjAayKwC+QAg8hT8vg6z7GSgLJKGvzuPb1Wc+hLzqtuVLxp6/HzSPOozuK+8ErAhy7U/sVzw8Dgfg== @@ -6413,7 +8049,7 @@ long@^5.2.0: resolved "https://registry.npmjs.org/long/-/long-5.2.3.tgz" integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== -loose-envify@^1.0.0, loose-envify@^1.1.0: +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz" integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== @@ -6432,6 +8068,18 @@ lower-case@^1.1.0, lower-case@^1.1.1, lower-case@^1.1.2: resolved "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz" integrity sha1-miyr0bno4K6ZOkv31YdcOcQujqw= +lower-case@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-2.0.2.tgz#6fa237c63dbdc4a82ca0fd882e4722dc5e634e28" + integrity sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg== + dependencies: + tslib "^2.0.3" + +lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== + lowercase-keys@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz" @@ -6442,11 +8090,6 @@ lowercase-keys@^3.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-3.0.0.tgz#c5e7d442e37ead247ae9db117a9d0a467c89d4f2" integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== -"lru-cache@7.10.1 - 7.13.1": - version "7.13.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.13.1.tgz#267a81fbd0881327c46a81c5922606a2cfe336c4" - integrity sha512-CHqbAq7NFlW3RSnoWXLJBxCWaZVBrfa9UEHId2M3AW8iEBurbqduNexEUCGc3SHc6iCYXNJCDi903LajSVAEPQ== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -6461,10 +8104,10 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" -"lru-cache@^9.1.1 || ^10.0.0": - version "10.0.0" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.0.0.tgz#b9e2a6a72a129d81ab317202d93c7691df727e61" - integrity sha512-svTf/fzsKHffP42sujkO/Rjs37BCIsQVRCeNYIm9WN8rgT7ffoUnRtZCqU+6BqcSBdv8gwJeTz8knJpgACeQMw== +lru-cache@^9.0.0: + version "9.1.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-9.1.2.tgz#255fdbc14b75589d6d0e73644ca167a8db506835" + integrity sha512-ERJq3FOzJTxBbFjZ7iDs+NiK4VI9Wz+RdrrAB8dio1oV+YvdPzUEE4QNiT2VD51DkIbCYRUUzCRkssXCHqSnKQ== ltgt@2.2.1, ltgt@^2.1.2, ltgt@~2.2.0: version "2.2.1" @@ -6543,11 +8186,21 @@ merkle-patricia-tree@^2.1.2, merkle-patricia-tree@^2.3.2: rlp "^2.0.0" semaphore ">=1.0.1" +meros@1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/meros/-/meros-1.1.4.tgz#c17994d3133db8b23807f62bec7f0cb276cfd948" + integrity sha512-E9ZXfK9iQfG9s73ars9qvvvbSIkJZF5yOo9j4tcwM5tN8mUKfj/EKN5PzOr3ZH0y5wL7dLAHw3RVEfpQV9Q7VQ== + methods@~1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz" integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== +micro-ftch@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/micro-ftch/-/micro-ftch-0.3.1.tgz#6cb83388de4c1f279a034fb0cf96dfc050853c5f" + integrity sha512-/0LLxhzP0tfiR5hcQebtudP56gUurs2CLkGarnCiB/OqEyUFQ6U3paQi/tgLv0hBJYt2rnr9MNpxz4fiiugstg== + micromatch@^4.0.4: version "4.0.5" resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz" @@ -6556,6 +8209,14 @@ micromatch@^4.0.4: braces "^3.0.2" picomatch "^2.3.1" +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA== + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + mime-db@1.46.0: version "1.46.0" resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz" @@ -6595,7 +8256,7 @@ mimic-fn@^3.0.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-3.1.0.tgz#65755145bbf3e36954b949c16450427451d5ca74" integrity sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ== -mimic-response@^1.0.0: +mimic-response@^1.0.0, mimic-response@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz" integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== @@ -6612,22 +8273,27 @@ min-document@^2.19.0: dependencies: dom-walk "^0.1.0" +min-indent@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/min-indent/-/min-indent-1.0.1.tgz#a63f681673b30571fbe8bc25686ae746eefa9869" + integrity sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg== + minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz" integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== -minimalistic-crypto-utils@^1.0.1: +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz" integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= -minimatch@5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.0.1.tgz#fb9022f7528125187c92bd9e9b6366be1cf3415b" - integrity sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g== +minimatch@3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== dependencies: - brace-expansion "^2.0.1" + brace-expansion "^1.1.7" minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" @@ -6650,6 +8316,11 @@ minimatch@^8.0.2: dependencies: brace-expansion "^2.0.1" +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + integrity sha512-miQKw5Hv4NS1Psg2517mV4e4dYNaO3++hjAvLOAzKqZ61rH8NS1SK+vbfBWZ5PY/Me/bEWhUwqMghEW5Fb9T7Q== + minimist@^1.2.0: version "1.2.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" @@ -6685,11 +8356,6 @@ minipass@^5.0.0: resolved "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz" integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ== -"minipass@^5.0.0 || ^6.0.2": - version "6.0.2" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-6.0.2.tgz#542844b6c4ce95b202c0995b0a471f1229de4c81" - integrity sha512-MzWSV5nYVT7mVyWCwn2o7JH13w2TBRmmSqSRCKzTw+lmft9X4z+3wjvs06Tzijo5z4W/kahUCDpRXTF+ZrmF/w== - minizlib@^1.3.3: version "1.3.3" resolved "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz" @@ -6717,6 +8383,13 @@ mkdirp@*, mkdirp@^1.0.3: resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz" integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== +mkdirp@0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + integrity sha512-SknJC52obPfGQPnjIkXbmA6+5H15E+fR+E4iR2oQ3zzCLbd7/ONua69R/Gw7AgkTLsRG+r5fzksYwWe1AgTyWA== + dependencies: + minimist "0.0.8" + mkdirp@^0.5.1: version "0.5.6" resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" @@ -6731,43 +8404,42 @@ mkdirp@^0.5.5: dependencies: minimist "^1.2.5" -mocha@10.1.0: - version "10.1.0" - resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.1.0.tgz#dbf1114b7c3f9d0ca5de3133906aea3dfc89ef7a" - integrity sha512-vUF7IYxEoN7XhQpFLxQAEMtE4W91acW4B6En9l97MwE9stL1A9gusXfoHZCLVHDUJ/7V5+lbCM6yMqzo5vNymg== +mocha@8.1.2: + version "8.1.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-8.1.2.tgz#d67fad13300e4f5cd48135a935ea566f96caf827" + integrity sha512-I8FRAcuACNMLQn3lS4qeWLxXqLvGf6r2CaLstDpZmMUUSmvW6Cnm1AuHxgbc7ctZVRcfwspCRbDHymPsi3dkJw== dependencies: ansi-colors "4.1.1" browser-stdout "1.3.1" - chokidar "3.5.3" - debug "4.3.4" - diff "5.0.0" + chokidar "3.4.2" + debug "4.1.1" + diff "4.0.2" escape-string-regexp "4.0.0" find-up "5.0.0" - glob "7.2.0" + glob "7.1.6" + growl "1.10.5" he "1.2.0" - js-yaml "4.1.0" - log-symbols "4.1.0" - minimatch "5.0.1" - ms "2.1.3" - nanoid "3.3.3" - serialize-javascript "6.0.0" - strip-json-comments "3.1.1" - supports-color "8.1.1" - workerpool "6.2.1" - yargs "16.2.0" - yargs-parser "20.2.4" - yargs-unparser "2.0.0" + js-yaml "3.14.0" + log-symbols "4.0.0" + minimatch "3.0.4" + ms "2.1.2" + object.assign "4.1.0" + promise.allsettled "1.0.2" + serialize-javascript "4.0.0" + strip-json-comments "3.0.1" + supports-color "7.1.0" + which "2.0.2" + wide-align "1.1.3" + workerpool "6.0.0" + yargs "13.3.2" + yargs-parser "13.1.2" + yargs-unparser "1.6.1" mock-fs@^4.1.0: version "4.13.0" resolved "https://registry.npmjs.org/mock-fs/-/mock-fs-4.13.0.tgz" integrity sha512-DD0vOdofJdoaRNtnWcrXe6RQbpHkPPmtqGq14uRX0F8ZKJ5nv89CVTYl/BZdppDxBDaV0hl75htg3abpEWlPZA== -module-error@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/module-error/-/module-error-1.0.2.tgz#8d1a48897ca883f47a45816d4fb3e3c6ba404d86" - integrity sha512-0yuvsqSCv8LbaOKhnsQ/T5JhyFlCYLPXK3U2sgV10zoKQwzs/MyfuQUOZQ1V/6OCOJsK/TRgNVrPuPDqtdMFtA== - ms@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz" @@ -6852,26 +8524,31 @@ mute-stream@0.0.8: resolved "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz" integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== -nano-base32@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/nano-base32/-/nano-base32-1.0.1.tgz#ba548c879efcfb90da1c4d9e097db4a46c9255ef" - integrity sha512-sxEtoTqAPdjWVGv71Q17koMFGsOMSiHsIFEvzOM7cNp8BXB4AnEwmDabm5dorusJf/v1z7QxaZYxUorU9RKaAw== +nan@^2.12.1: + version "2.17.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.17.0.tgz#c0150a2368a182f033e9aa5195ec76ea41a199cb" + integrity sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ== nano-json-stream-parser@^0.1.2: version "0.1.2" resolved "https://registry.npmjs.org/nano-json-stream-parser/-/nano-json-stream-parser-0.1.2.tgz" integrity sha1-DMj20OK2IrR5xA1JnEbWS3Vcb18= -nanoid@3.3.3: - version "3.3.3" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.3.tgz#fd8e8b7aa761fe807dba2d1b98fb7241bb724a25" - integrity sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w== +nanoid@^2.0.0: + version "2.1.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-2.1.11.tgz#ec24b8a758d591561531b4176a01e3ab4f0f0280" + integrity sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA== nanoid@^3.0.2, nanoid@^3.1.20, nanoid@^3.1.23: version "3.3.6" resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz" integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== +napi-macros@~1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda" + integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg== + napi-macros@~2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/napi-macros/-/napi-macros-2.0.0.tgz" @@ -6892,15 +8569,24 @@ natural-orderby@^2.0.3: resolved "https://registry.npmjs.org/natural-orderby/-/natural-orderby-2.0.3.tgz" integrity sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q== +needle@^2.2.1: + version "2.9.1" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.9.1.tgz#22d1dffbe3490c2b83e301f7709b6736cd8f2684" + integrity sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ== + dependencies: + debug "^3.2.6" + iconv-lite "^0.4.4" + sax "^1.2.4" + negotiator@0.6.3: version "0.6.3" resolved "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz" integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== -next-tick@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.1.0.tgz#1836ee30ad56d67ef281b22bd199f709449b35eb" - integrity sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ== +next-tick@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" + integrity sha512-mc/caHeUcdjnC/boPWJefDr4KUIWQNv+tlnFnJd38QMou86QtxQzBJfxgGRzvx8jazYRqrVlaHarfO72uNxPOg== nice-try@^1.0.4: version "1.0.5" @@ -6914,6 +8600,14 @@ no-case@^2.2.0, no-case@^2.3.2: dependencies: lower-case "^1.1.1" +no-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-3.0.4.tgz#d361fd5c9800f558551a8369fc0dcd4662b6124d" + integrity sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg== + dependencies: + lower-case "^2.0.2" + tslib "^2.0.3" + node-abort-controller@^3.0.1: version "3.1.1" resolved "https://registry.yarnpkg.com/node-abort-controller/-/node-abort-controller-3.1.1.tgz#a94377e964a9a37ac3976d848cb5c765833b8548" @@ -6932,14 +8626,34 @@ node-fetch@1.7.3, node-fetch@~1.7.1: encoding "^0.1.11" is-stream "^1.0.1" -node-fetch@2.6.7: - version "2.6.7" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" - integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== +node-fetch@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.1.2.tgz#ab884e8e7e57e38a944753cec706f788d1768bb5" + integrity sha512-IHLHYskTc2arMYsHZH82PVX8CSKT5lzb7AXeyO06QnjGDKtkv+pv3mEki6S7reB/x1QPo+YPxQRNEVgR5V/w3Q== + +node-fetch@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.4.1.tgz#b2e38f1117b8acbedbe0524f041fb3177188255d" + integrity sha512-P9UbpFK87NyqBZzUuDBDz4f6Yiys8xm8j7ACDbi6usvFm6KItklQUKjeoqTrYS/S1k6I8oaOC2YLLDr/gg26Mw== + +node-fetch@2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.0.tgz#e633456386d4aa55863f676a7ab0daa8fdecb0fd" + integrity sha512-8dG4H5ujfvFiqDmVu9fQ5bOHUC15JMjMY/Zumv26oOvvVJjM67KF8koCWIabKQ1GJIa9r2mMZscBq/TbdOcmNA== + +node-fetch@2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" + integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== + +node-fetch@^2.6.1, node-fetch@^2.6.12: + version "2.7.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" + integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== dependencies: whatwg-url "^5.0.0" -node-fetch@^2.6.11, node-fetch@^2.6.7: +node-fetch@^2.6.11: version "2.6.11" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25" integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w== @@ -6953,11 +8667,6 @@ node-fetch@^2.6.8: dependencies: whatwg-url "^5.0.0" -node-gyp-build@4.4.0: - version "4.4.0" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.4.0.tgz#42e99687ce87ddeaf3a10b99dc06abc11021f3f4" - integrity sha512-amJnQCcgtRVw9SvoebO3BKGESClrfXGCUTX9hSn1OuGQTQBOZmVd0Z0OlecpuRksKvbsUqALE8jls/ErClAPuQ== - node-gyp-build@^4.2.0: version "4.5.0" resolved "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.5.0.tgz" @@ -6968,11 +8677,21 @@ node-gyp-build@^4.3.0: resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.6.0.tgz#0c52e4cbf54bbd28b709820ef7b6a3c2d6209055" integrity sha512-NTZVKn9IylLwUzaKjkas1e4u2DLNcV4rdYagA4PWdPwW87Bi7z+BznyKSRwS/761tV/lzCGXplWsiaMjLqP2zQ== +node-gyp-build@~3.8.0: + version "3.8.0" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.8.0.tgz#0f57efeb1971f404dfcbfab975c284de7c70f14a" + integrity sha512-bYbpIHyRqZ7sVWXxGpz8QIRug5JZc/hzZH4GbdT9HTZi6WmKCZ8GLvP8OZ9TTiIBvwPFKgtGrlWQSXDAvYdsPw== + node-gyp-build@~4.1.0: version "4.1.1" resolved "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.1.1.tgz" integrity sha512-dSq1xmcPDKPZ2EED2S6zw/b9NKsqzXRE6dVr8TVQnI3FJOTteUMuqF3Qqs6LZg+mLGYJWqQzMbIjMtJqTv87nQ== +node-int64@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" + integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== + node-interval-tree@^1.3.3: version "1.3.3" resolved "https://registry.npmjs.org/node-interval-tree/-/node-interval-tree-1.3.3.tgz" @@ -6980,36 +8699,88 @@ node-interval-tree@^1.3.3: dependencies: shallowequal "^1.0.2" -node-releases@^2.0.12: - version "2.0.12" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.12.tgz#35627cc224a23bfb06fb3380f2b3afaaa7eb1039" - integrity sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ== +node-pre-gyp@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.11.0.tgz#db1f33215272f692cd38f03238e3e9b47c5dd054" + integrity sha512-TwWAOZb0j7e9eGaf9esRx3ZcLaE5tQ2lvYy1pb5IAaG1a2e2Kv5Lms1Y4hpj+ciXJRofIxxlt5haeQ/2ANeE0Q== + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.1" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +node-releases@^2.0.13: + version "2.0.13" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d" + integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ== nofilter@^1.0.4: version "1.0.4" resolved "https://registry.npmjs.org/nofilter/-/nofilter-1.0.4.tgz" integrity sha512-N8lidFp+fCz+TD51+haYdbDGrcBWwuHX40F5+z0qkUjMJ5Tp+rdSuAkMJ9N9eoolDlEVTf6u5icM+cNKkKW2mA== -normalize-package-data@^2.3.2: - version "2.5.0" - resolved "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz" - integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA== +noop-fn@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/noop-fn/-/noop-fn-1.0.0.tgz#5f33d47f13d2150df93e0cb036699e982f78ffbf" + integrity sha512-pQ8vODlgXt2e7A3mIbFDlizkr46r75V+BJxVAyat8Jl7YmI513gG5cfyRL0FedKraoZ+VAouI1h4/IWpus5pcQ== + +nopt@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.3.tgz#a375cad9d02fd921278d954c2254d5aa57e15e48" + integrity sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg== + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-path@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + integrity sha512-3pKJwH184Xo/lnH6oyP1q2pMd7HcypqqmRs91/6/i2CGtWwIKGCkOOMTm/zXbgTEWHw1uNpNi/igc3ePOYHb6w== dependencies: - hosted-git-info "^2.1.4" - resolve "^1.10.0" - semver "2 || 3 || 4 || 5" - validate-npm-package-license "^3.0.1" + remove-trailing-separator "^1.0.1" normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== +normalize-url@^4.1.0: + version "4.5.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.1.tgz#0dd90cf1288ee1d1313b87081c9a5932ee48518a" + integrity sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA== + normalize-url@^6.0.1: version "6.1.0" resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-6.1.0.tgz#40d0885b535deffe3f3147bec877d05fe4c5668a" integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A== +npm-bundled@^1.0.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.1.2.tgz#944c78789bd739035b70baa2ca5cc32b8d860bc1" + integrity sha512-x5DHup0SuyQcmL3s7Rx/YQ8sbw/Hzg0rj48eN0dV7hf5cmQq5PXIeioroH3raV1QC1yh3uTYuMThvEQF3iKgGQ== + dependencies: + npm-normalize-package-bin "^1.0.1" + +npm-normalize-package-bin@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz#6e79a41f23fd235c0623218228da7d9c23b8f6e2" + integrity sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA== + +npm-packlist@^1.1.6: + version "1.4.8" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.8.tgz#56ee6cc135b9f98ad3d51c1c95da22bbb9b2ef3e" + integrity sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A== + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + npm-normalize-package-bin "^1.0.1" + npm-run-path@^4.0.0, npm-run-path@^4.0.1: version "4.0.1" resolved "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz" @@ -7017,13 +8788,28 @@ npm-run-path@^4.0.0, npm-run-path@^4.0.1: dependencies: path-key "^3.0.0" -nth-check@^2.0.1: +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +nth-check@^2.0.0: version "2.1.1" resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" integrity sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w== dependencies: boolbase "^1.0.0" +nullthrows@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/nullthrows/-/nullthrows-1.1.1.tgz#7818258843856ae971eae4208ad7d7eb19a431b1" + integrity sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw== + number-is-nan@^1.0.0: version "1.0.1" resolved "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz" @@ -7047,21 +8833,74 @@ object-assign@^4, object-assign@^4.1.0, object-assign@^4.1.1: resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz" integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= +object-inspect@^1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.3.tgz#ba62dffd67ee256c8c086dfae69e016cd1f198b9" + integrity sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g== + object-inspect@^1.9.0: version "1.12.2" resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz" integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ== +object-keys@^1.0.11, object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + object-keys@~0.4.0: version "0.4.0" resolved "https://registry.npmjs.org/object-keys/-/object-keys-0.4.0.tgz" integrity sha1-KKaq50KN0sOpLz2V8hM13SBOAzY= +object-path@^0.11.4: + version "0.11.8" + resolved "https://registry.yarnpkg.com/object-path/-/object-path-0.11.8.tgz#ed002c02bbdd0070b78a27455e8ae01fc14d4742" + integrity sha512-YJjNZrlXJFM42wTBn6zgOJVar9KFJvzx6sTWDte8sWZF//cnjl0BxHNpfZx+ZffXX63A9q0b1zsFiBX4g4X5KA== + object-treeify@^1.1.33: version "1.1.33" resolved "https://registry.npmjs.org/object-treeify/-/object-treeify-1.1.33.tgz" integrity sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A== +object.assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.assign@^4.1.4: + version "4.1.4" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.4.tgz#9673c7c7c351ab8c4d0b516f4343ebf4dfb7799f" + integrity sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + has-symbols "^1.0.3" + object-keys "^1.1.1" + +object.getownpropertydescriptors@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.6.tgz#5e5c384dd209fa4efffead39e3a0512770ccc312" + integrity sha512-lq+61g26E/BgHv0ZTFgRvi7NMEPuAxLkFU7rukXjc/AlwH4Am5xXVnIXy3un1bg/JPbXHrixRkK1itUzzPiIjQ== + dependencies: + array.prototype.reduce "^1.0.5" + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.21.2" + safe-array-concat "^1.0.0" + +oboe@2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/oboe/-/oboe-2.1.4.tgz#20c88cdb0c15371bb04119257d4fdd34b0aa49f6" + integrity sha512-ymBJ4xSC6GBXLT9Y7lirj+xbqBLa+jADGJldGEYG7u8sZbS9GyG+u1Xk9c5cbriKwSpCg41qUhPjvU5xOpvIyQ== + dependencies: + http-https "^1.0.0" + oboe@2.1.5: version "2.1.5" resolved "https://registry.npmjs.org/oboe/-/oboe-2.1.5.tgz" @@ -7090,6 +8929,20 @@ onetime@^5.1.0, onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" +opencollective-postinstall@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz#7a0fff978f6dbfa4d006238fbac98ed4198c3259" + integrity sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q== + +optimism@^0.17.5: + version "0.17.5" + resolved "https://registry.yarnpkg.com/optimism/-/optimism-0.17.5.tgz#a4c78b3ad12c58623abedbebb4f2f2c19b8e8816" + integrity sha512-TEcp8ZwK1RczmvMnvktxHSF2tKgMWjJ71xEFGX5ApLh67VsMSTy1ZUlipJw8W+KaqgOmQ+4pqwkeivY89j+4Vw== + dependencies: + "@wry/context" "^0.7.0" + "@wry/trie" "^0.4.3" + tslib "^2.3.0" + ora@4.0.2: version "4.0.2" resolved "https://registry.npmjs.org/ora/-/ora-4.0.2.tgz" @@ -7127,22 +8980,28 @@ os-homedir@^1.0.0: resolved "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz" integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= -os-locale@^1.4.0: - version "1.4.0" - resolved "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz" - integrity sha1-IPnxeuKe00XoveWDsT0gCYA8FNk= - dependencies: - lcid "^1.0.0" - -os-tmpdir@^1.0.1, os-tmpdir@~1.0.2: +os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz" integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= -p-cancelable@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-2.1.1.tgz#aab7fbd416582fa32a3db49859c122487c5ed2cf" - integrity sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg== +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-cancelable@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.3.0.tgz#b9e123800bcebb7ac13a479be195b507b98d30fa" + integrity sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw== + +p-cancelable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" + integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== p-cancelable@^3.0.0: version "3.0.0" @@ -7162,11 +9021,23 @@ p-fifo@^1.0.0: fast-fifo "^1.0.0" p-defer "^3.0.0" +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + integrity sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow== + p-finally@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/p-finally/-/p-finally-2.0.1.tgz" integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== +p-limit@3.1.0, p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + p-limit@^1.1.0: version "1.3.0" resolved "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz" @@ -7174,20 +9045,13 @@ p-limit@^1.1.0: dependencies: p-try "^1.0.0" -p-limit@^2.0.0: +p-limit@^2.0.0, p-limit@^2.2.0: version "2.3.0" resolved "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz" integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== dependencies: p-try "^2.0.0" -p-limit@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" - integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== - dependencies: - yocto-queue "^0.1.0" - p-locate@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz" @@ -7202,6 +9066,13 @@ p-locate@^3.0.0: dependencies: p-limit "^2.0.0" +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + p-locate@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz" @@ -7209,6 +9080,13 @@ p-locate@^5.0.0: dependencies: p-limit "^3.0.2" +p-timeout@^1.1.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-1.2.1.tgz#5eb3b353b7fce99f101a1038880bb054ebbea386" + integrity sha512-gb0ryzr+K2qFqFv6qi3khoeqMZF/+ajxQipEF6NteZVnvz9tzdsfAVj3lYtn1gAXvH5lfLwfxEII799gt/mRIA== + dependencies: + p-finally "^1.0.0" + p-try@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz" @@ -7219,6 +9097,11 @@ p-try@^2.0.0: resolved "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== +pako@^1.0.4: + version "1.0.11" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" + integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw== + param-case@^2.1.0: version "2.1.1" resolved "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz" @@ -7233,6 +9116,17 @@ parent-module@^1.0.0: dependencies: callsites "^3.0.0" +parse-asn1@^5.0.0, parse-asn1@^5.1.5: + version "5.1.6" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.6.tgz#385080a3ec13cb62a62d39409cb3e88844cdaed4" + integrity sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw== + dependencies: + asn1.js "^5.2.0" + browserify-aes "^1.0.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + safe-buffer "^5.1.1" + parse-cache-control@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/parse-cache-control/-/parse-cache-control-1.0.1.tgz" @@ -7248,13 +9142,6 @@ parse-headers@^2.0.0: resolved "https://registry.npmjs.org/parse-headers/-/parse-headers-2.0.3.tgz" integrity sha512-QhhZ+DCCit2Coi2vmAKbq5RGTRcQUOE2+REgv8vdyu7MnYx2eZztegqtTx99TZ86GTIwqiy3+4nQTWZ2tgmdCA== -parse-json@^2.2.0: - version "2.2.0" - resolved "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz" - integrity sha1-9ID0BDTvgHQfhGkJn43qGPVaTck= - dependencies: - error-ex "^1.2.0" - parse-json@^5.0.0: version "5.2.0" resolved "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz" @@ -7272,19 +9159,12 @@ parse5-htmlparser2-tree-adapter@^6.0.0: dependencies: parse5 "^6.0.1" -parse5@^6.0.0: +parse5@^6.0.0, parse5@^6.0.1: version "6.0.1" resolved "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== -parse5@^7.0.0: - version "7.1.2" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32" - integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw== - dependencies: - entities "^4.4.0" - -parseurl@^1.3.2, parseurl@^1.3.3, parseurl@~1.3.3: +parseurl@^1.3.2, parseurl@~1.3.3: version "1.3.3" resolved "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz" integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== @@ -7297,6 +9177,14 @@ pascal-case@^2.0.0, pascal-case@^2.0.1: camel-case "^3.0.0" upper-case-first "^1.1.0" +pascal-case@^3.1.1, pascal-case@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/pascal-case/-/pascal-case-3.1.2.tgz#b48e0ef2b98e205e7c1dae747d0b1508237660eb" + integrity sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g== + dependencies: + no-case "^3.0.4" + tslib "^2.0.3" + password-prompt@^1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/password-prompt/-/password-prompt-1.1.2.tgz" @@ -7312,13 +9200,6 @@ path-case@^2.1.0: dependencies: no-case "^2.2.0" -path-exists@^2.0.0: - version "2.1.0" - resolved "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz" - integrity sha1-D+tsZPD8UY2adU3V77YscCJ2H0s= - dependencies: - pinkie-promise "^2.0.0" - path-exists@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz" @@ -7344,9 +9225,9 @@ path-key@^3.0.0, path-key@^3.1.0: resolved "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== -path-parse@^1.0.7: +path-parse@^1.0.6: version "1.0.7" - resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== path-scurry@^1.6.1: @@ -7362,21 +9243,12 @@ path-to-regexp@0.1.7: resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz" integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== -path-type@^1.0.0: - version "1.1.0" - resolved "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz" - integrity sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE= - dependencies: - graceful-fs "^4.1.2" - pify "^2.0.0" - pinkie-promise "^2.0.0" - path-type@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== -pbkdf2@^3.0.17: +pbkdf2@^3.0.17, pbkdf2@^3.0.3: version "3.1.2" resolved "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz" integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA== @@ -7402,28 +9274,11 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -pify@^2.0.0: - version "2.3.0" - resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" - integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= - pify@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz" integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY= -pinkie-promise@^2.0.0: - version "2.0.1" - resolved "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz" - integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= - dependencies: - pinkie "^2.0.0" - -pinkie@^2.0.0: - version "2.0.4" - resolved "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz" - integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= - pkg-up@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-3.1.0.tgz#100ec235cc150e4fd42519412596a28512a0def5" @@ -7436,38 +9291,38 @@ pluralize@^8.0.0: resolved "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz" integrity sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA== -pouchdb-abstract-mapreduce@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-abstract-mapreduce/-/pouchdb-abstract-mapreduce-7.3.1.tgz#96ff4a0f41cbe273f3f52fde003b719005a2093c" - integrity sha512-0zKXVFBvrfc1KnN0ggrB762JDmZnUpePHywo9Bq3Jy+L1FnoG7fXM5luFfvv5/T0gEw+ZTIwoocZECMnESBI9w== - dependencies: - pouchdb-binary-utils "7.3.1" - pouchdb-collate "7.3.1" - pouchdb-collections "7.3.1" - pouchdb-errors "7.3.1" - pouchdb-fetch "7.3.1" - pouchdb-mapreduce-utils "7.3.1" - pouchdb-md5 "7.3.1" - pouchdb-utils "7.3.1" - -pouchdb-adapter-leveldb-core@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-adapter-leveldb-core/-/pouchdb-adapter-leveldb-core-7.3.1.tgz#3c71dce7ff06c2e483d873d7aabc1fded56372ca" - integrity sha512-mxShHlqLMPz2gChrgtA9okV1ogFmQrRAoM/O4EN0CrQWPLXqYtpL1f7sI2asIvFe7SmpnvbLx7kkZyFmLTfwjA== +pouchdb-abstract-mapreduce@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-abstract-mapreduce/-/pouchdb-abstract-mapreduce-7.2.2.tgz#dd1b10a83f8d24361dce9aaaab054614b39f766f" + integrity sha512-7HWN/2yV2JkwMnGnlp84lGvFtnm0Q55NiBUdbBcaT810+clCGKvhssBCrXnmwShD1SXTwT83aszsgiSfW+SnBA== + dependencies: + pouchdb-binary-utils "7.2.2" + pouchdb-collate "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-fetch "7.2.2" + pouchdb-mapreduce-utils "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-adapter-leveldb-core@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-leveldb-core/-/pouchdb-adapter-leveldb-core-7.2.2.tgz#e0aa6a476e2607d7ae89f4a803c9fba6e6d05a8a" + integrity sha512-K9UGf1Ivwe87mjrMqN+1D07tO/DfU7ariVDrGffuOjvl+3BcvUF25IWrxsBObd4iPOYCH7NVQWRpojhBgxULtQ== dependencies: argsarray "0.0.1" - buffer-from "1.1.2" + buffer-from "1.1.1" double-ended-queue "2.1.0-0" levelup "4.4.0" - pouchdb-adapter-utils "7.3.1" - pouchdb-binary-utils "7.3.1" - pouchdb-collections "7.3.1" - pouchdb-errors "7.3.1" - pouchdb-json "7.3.1" - pouchdb-md5 "7.3.1" - pouchdb-merge "7.3.1" - pouchdb-utils "7.3.1" - sublevel-pouchdb "7.3.1" + pouchdb-adapter-utils "7.2.2" + pouchdb-binary-utils "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-json "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-merge "7.2.2" + pouchdb-utils "7.2.2" + sublevel-pouchdb "7.2.2" through2 "3.0.2" pouchdb-adapter-memory@^7.1.1: @@ -7479,34 +9334,80 @@ pouchdb-adapter-memory@^7.1.1: pouchdb-adapter-leveldb-core "7.2.2" pouchdb-utils "7.2.2" -pouchdb-adapter-utils@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-adapter-utils/-/pouchdb-adapter-utils-7.3.1.tgz#7237cb597f8d337057df15d4859bfe3c881d8832" - integrity sha512-uKLG6dClwTs/sLIJ4WkLAi9wlnDBpOnfyhpeAgOjlOGN/XLz5nKHrA4UJRnURDyc+uv79S9r/Unc4hVpmbSPUw== +pouchdb-adapter-node-websql@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-node-websql/-/pouchdb-adapter-node-websql-7.0.0.tgz#64ad88dd45b23578e454bf3032a3a79f9d1e4008" + integrity sha512-fNaOMO8bvMrRTSfmH4RSLSpgnKahRcCA7Z0jg732PwRbGvvMdGbreZwvKPPD1fg2tm2ZwwiXWK2G3+oXyoqZYw== dependencies: - pouchdb-binary-utils "7.3.1" - pouchdb-collections "7.3.1" - pouchdb-errors "7.3.1" - pouchdb-md5 "7.3.1" - pouchdb-merge "7.3.1" - pouchdb-utils "7.3.1" + pouchdb-adapter-websql-core "7.0.0" + pouchdb-utils "7.0.0" + websql "1.0.0" -pouchdb-binary-utils@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-binary-utils/-/pouchdb-binary-utils-7.3.1.tgz#eea22d9a5f880fcd95062476f4f5484cdf61496f" - integrity sha512-crZJNfAEOnUoRk977Qtmk4cxEv6sNKllQ6vDDKgQrQLFjMUXma35EHzNyIJr1s76J77Q4sqKQAmxz9Y40yHGtw== +pouchdb-adapter-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-utils/-/pouchdb-adapter-utils-7.0.0.tgz#1ac8d34481911e0e9a9bf51024610a2e7351dc80" + integrity sha512-UWKPC6jkz6mHUzZefrU7P5X8ZGvBC8LSNZ7BIp0hWvJE6c20cnpDwedTVDpZORcCbVJpDmFOHBYnOqEIblPtbA== dependencies: - buffer-from "1.1.2" + pouchdb-binary-utils "7.0.0" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-md5 "7.0.0" + pouchdb-merge "7.0.0" + pouchdb-utils "7.0.0" -pouchdb-collate@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-collate/-/pouchdb-collate-7.3.1.tgz#19d7b87dd173d1c765da8cc9987c5aa9eb24f11f" - integrity sha512-o4gyGqDMLMSNzf6EDTr3eHaH/JRMoqRhdc+eV+oA8u00nTBtr9wD+jypVe2LbgKLJ4NWqx2qVkXiTiQdUFtsLQ== +pouchdb-adapter-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-utils/-/pouchdb-adapter-utils-7.2.2.tgz#c64426447d9044ba31517a18500d6d2d28abd47d" + integrity sha512-2CzZkTyTyHZkr3ePiWFMTiD5+56lnembMjaTl8ohwegM0+hYhRyJux0biAZafVxgIL4gnCUC4w2xf6WVztzKdg== + dependencies: + pouchdb-binary-utils "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-merge "7.2.2" + pouchdb-utils "7.2.2" -pouchdb-collections@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-collections/-/pouchdb-collections-7.3.1.tgz#4f1819cf4dd6936a422c29f7fa26a9b5dca428f5" - integrity sha512-yUyDqR+OJmtwgExOSJegpBJXDLAEC84TWnbAYycyh+DZoA51Yw0+XVQF5Vh8Ii90/Ut2xo88fmrmp0t6kqom8w== +pouchdb-adapter-websql-core@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-websql-core/-/pouchdb-adapter-websql-core-7.0.0.tgz#27b3e404159538e515b2567baa7869f90caac16c" + integrity sha512-NyMaH0bl20SdJdOCzd+fwXo8JZ15a48/MAwMcIbXzsRHE4DjFNlRcWAcjUP6uN4Ezc+Gx+r2tkBBMf71mIz1Aw== + dependencies: + pouchdb-adapter-utils "7.0.0" + pouchdb-binary-utils "7.0.0" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-json "7.0.0" + pouchdb-merge "7.0.0" + pouchdb-utils "7.0.0" + +pouchdb-binary-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-binary-utils/-/pouchdb-binary-utils-7.0.0.tgz#cb71a288b09572a231f6bab1b4aed201c4d219a7" + integrity sha512-yUktdOPIPvOVouCjJN3uop+bCcpdPwePrLm9eUAZNgEYnUFu0njdx7Q0WRsZ7UJ6l75HinL5ZHk4bnvEt86FLw== + dependencies: + buffer-from "1.1.0" + +pouchdb-binary-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-binary-utils/-/pouchdb-binary-utils-7.2.2.tgz#0690b348052c543b1e67f032f47092ca82bcb10e" + integrity sha512-shacxlmyHbUrNfE6FGYpfyAJx7Q0m91lDdEAaPoKZM3SzAmbtB1i+OaDNtYFztXjJl16yeudkDb3xOeokVL3Qw== + dependencies: + buffer-from "1.1.1" + +pouchdb-collate@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-collate/-/pouchdb-collate-7.2.2.tgz#fc261f5ef837c437e3445fb0abc3f125d982c37c" + integrity sha512-/SMY9GGasslknivWlCVwXMRMnQ8myKHs4WryQ5535nq1Wj/ehpqWloMwxEQGvZE1Sda3LOm7/5HwLTcB8Our+w== + +pouchdb-collections@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-collections/-/pouchdb-collections-7.0.0.tgz#fd1f632337dc6301b0ff8649732ca79204e41780" + integrity sha512-DaoUr/vU24Q3gM6ghj0va9j/oBanPwkbhkvnqSyC3Dm5dgf5pculNxueLF9PKMo3ycApoWzHMh6N2N8KJbDU2Q== + +pouchdb-collections@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-collections/-/pouchdb-collections-7.2.2.tgz#aeed77f33322429e3f59d59ea233b48ff0e68572" + integrity sha512-6O9zyAYlp3UdtfneiMYuOCWdUCQNo2bgdjvNsMSacQX+3g8WvIoFQCYJjZZCpTttQGb+MHeRMr8m2U95lhJTew== pouchdb-debug@^7.1.1: version "7.2.1" @@ -7515,21 +9416,28 @@ pouchdb-debug@^7.1.1: dependencies: debug "3.1.0" -pouchdb-errors@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-errors/-/pouchdb-errors-7.3.1.tgz#78be36721e2edc446fac158a236a9218c7bcdb14" - integrity sha512-Zktz4gnXEUcZcty8FmyvtYUYsHskoST05m6H5/E2gg/0mCfEXq/XeyyLkZHaZmqD0ZPS9yNmASB1VaFWEKEaDw== +pouchdb-errors@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-errors/-/pouchdb-errors-7.0.0.tgz#4e2a5a8b82af20cbe5f9970ca90b7ec74563caa0" + integrity sha512-dTusY8nnTw4HIztCrNl7AoGgwvS1bVf/3/97hDaGc4ytn72V9/4dK8kTqlimi3UpaurohYRnqac0SGXYP8vgXA== + dependencies: + inherits "2.0.3" + +pouchdb-errors@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-errors/-/pouchdb-errors-7.2.2.tgz#80d811d65c766c9d20b755c6e6cc123f8c3c4792" + integrity sha512-6GQsiWc+7uPfgEHeavG+7wuzH3JZW29Dnrvz8eVbDFE50kVFxNDVm3EkYHskvo5isG7/IkOx7PV7RPTA3keG3g== dependencies: inherits "2.0.4" -pouchdb-fetch@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-fetch/-/pouchdb-fetch-7.3.1.tgz#d54b1807be0f0a5d4b6d06e416c7d54952bbc348" - integrity sha512-205xAtvdHRPQ4fp1h9+RmT9oQabo9gafuPmWsS9aEl3ER54WbY8Vaj1JHZGbU4KtMTYvW7H5088zLS7Nrusuag== +pouchdb-fetch@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-fetch/-/pouchdb-fetch-7.2.2.tgz#492791236d60c899d7e9973f9aca0d7b9cc02230" + integrity sha512-lUHmaG6U3zjdMkh8Vob9GvEiRGwJfXKE02aZfjiVQgew+9SLkuOxNw3y2q4d1B6mBd273y1k2Lm0IAziRNxQnA== dependencies: abort-controller "3.0.0" - fetch-cookie "0.11.0" - node-fetch "2.6.7" + fetch-cookie "0.10.1" + node-fetch "2.6.0" pouchdb-find@^7.0.0: version "7.2.2" @@ -7544,82 +9452,115 @@ pouchdb-find@^7.0.0: pouchdb-selector-core "7.2.2" pouchdb-utils "7.2.2" -pouchdb-json@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-json/-/pouchdb-json-7.3.1.tgz#a80a3060aa2914959e4dca7a4e2022ab20c7119a" - integrity sha512-AyOKsmc85/GtHjMZyEacqzja8qLVfycS1hh1oskR+Bm5PIITX52Fb8zyi0hEetV6VC0yuGbn0RqiLjJxQePeqQ== +pouchdb-json@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-json/-/pouchdb-json-7.0.0.tgz#d9860f66f27a359ac6e4b24da4f89b6909f37530" + integrity sha512-w0bNRu/7VmmCrFWMYAm62n30wvJJUT2SokyzeTyj3hRohj4GFwTRg1mSZ+iAmxgRKOFE8nzZstLG/WAB4Ymjew== dependencies: vuvuzela "1.0.3" -pouchdb-mapreduce-utils@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-mapreduce-utils/-/pouchdb-mapreduce-utils-7.3.1.tgz#f0ac2c8400fbedb705e9226082453ac7d3f2a066" - integrity sha512-oUMcq82+4pTGQ6dtrhgORHOVHZSr6w/5tFIUGlv7RABIDvJarL4snMawADjlpiEwPdiQ/ESG8Fqt8cxqvqsIgg== +pouchdb-json@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-json/-/pouchdb-json-7.2.2.tgz#b939be24b91a7322e9a24b8880a6e21514ec5e1f" + integrity sha512-3b2S2ynN+aoB7aCNyDZc/4c0IAdx/ir3nsHB+/RrKE9cM3QkQYbnnE3r/RvOD1Xvr6ji/KOCBie+Pz/6sxoaug== + dependencies: + vuvuzela "1.0.3" + +pouchdb-mapreduce-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-mapreduce-utils/-/pouchdb-mapreduce-utils-7.2.2.tgz#13a46a3cc2a3f3b8e24861da26966904f2963146" + integrity sha512-rAllb73hIkU8rU2LJNbzlcj91KuulpwQu804/F6xF3fhZKC/4JQMClahk+N/+VATkpmLxp1zWmvmgdlwVU4HtQ== dependencies: argsarray "0.0.1" inherits "2.0.4" - pouchdb-collections "7.3.1" - pouchdb-utils "7.3.1" + pouchdb-collections "7.2.2" + pouchdb-utils "7.2.2" -pouchdb-md5@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-md5/-/pouchdb-md5-7.3.1.tgz#70fae44f9d27eb4c6a8e7106156b4593d31c1762" - integrity sha512-aDV8ui/mprnL3xmt0gT/81DFtTtJiKyn+OxIAbwKPMfz/rDFdPYvF0BmDC9QxMMzGfkV+JJUjU6at0PPs2mRLg== +pouchdb-md5@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-md5/-/pouchdb-md5-7.0.0.tgz#935dc6bb507a5f3978fb653ca5790331bae67c96" + integrity sha512-yaSJKhLA3QlgloKUQeb2hLdT3KmUmPfoYdryfwHZuPTpXIRKTnMQTR9qCIRUszc0ruBpDe53DRslCgNUhAyTNQ== dependencies: - pouchdb-binary-utils "7.3.1" - spark-md5 "3.0.2" + pouchdb-binary-utils "7.0.0" + spark-md5 "3.0.0" -pouchdb-merge@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-merge/-/pouchdb-merge-7.3.1.tgz#97aae682d7d8499b62b6ce234dcb9527c7bf6f02" - integrity sha512-FeK3r35mKimokf2PQ2tUI523QWyZ4lYZ0Yd75FfSch/SPY6wIokz5XBZZ6PHdu5aOJsEKzoLUxr8CpSg9DhcAw== +pouchdb-md5@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-md5/-/pouchdb-md5-7.2.2.tgz#415401acc5a844112d765bd1fb4e5d9f38fb0838" + integrity sha512-c/RvLp2oSh8PLAWU5vFBnp6ejJABIdKqboZwRRUrWcfGDf+oyX8RgmJFlYlzMMOh4XQLUT1IoaDV8cwlsuryZw== + dependencies: + pouchdb-binary-utils "7.2.2" + spark-md5 "3.0.1" -pouchdb-selector-core@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-selector-core/-/pouchdb-selector-core-7.3.1.tgz#08245662de3d61f16ab8dae2b56ef622935b3fb3" - integrity sha512-HBX+nNGXcaL9z0uNpwSMRq2GNZd3EZXW+fe9rJHS0hvJohjZL7aRJLoaXfEdHPRTNW+CpjM3Rny60eGekQdI/w== +pouchdb-merge@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-merge/-/pouchdb-merge-7.0.0.tgz#9f476ce7e32aae56904ad770ae8a1dfe14b57547" + integrity sha512-tci5u6NpznQhGcPv4ho1h0miky9rs+ds/T9zQ9meQeDZbUojXNaX1Jxsb0uYEQQ+HMqdcQs3Akdl0/u0mgwPGg== + +pouchdb-merge@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-merge/-/pouchdb-merge-7.2.2.tgz#940d85a2b532d6a93a6cab4b250f5648511bcc16" + integrity sha512-6yzKJfjIchBaS7Tusuk8280WJdESzFfQ0sb4jeMUNnrqs4Cx3b0DIEOYTRRD9EJDM+je7D3AZZ4AT0tFw8gb4A== + +pouchdb-selector-core@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-selector-core/-/pouchdb-selector-core-7.2.2.tgz#264d7436a8c8ac3801f39960e79875ef7f3879a0" + integrity sha512-XYKCNv9oiNmSXV5+CgR9pkEkTFqxQGWplnVhO3W9P154H08lU0ZoNH02+uf+NjZ2kjse7Q1fxV4r401LEcGMMg== dependencies: - pouchdb-collate "7.3.1" - pouchdb-utils "7.3.1" + pouchdb-collate "7.2.2" + pouchdb-utils "7.2.2" -pouchdb-utils@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/pouchdb-utils/-/pouchdb-utils-7.3.1.tgz#d25f0a034427f388ba5ae37d9ae3fbed210e8720" - integrity sha512-R3hHBo1zTdTu/NFs3iqkcaQAPwhIH0gMIdfVKd5lbDYlmP26rCG5pdS+v7NuoSSFLJ4xxnaGV+Gjf4duYsJ8wQ== +pouchdb-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-utils/-/pouchdb-utils-7.0.0.tgz#48bfced6665b8f5a2b2d2317e2aa57635ed1e88e" + integrity sha512-1bnoX1KdZYHv9wicDIFdO0PLiVIMzNDUBUZ/yOJZ+6LW6niQCB8aCv09ZztmKfSQcU5nnN3fe656tScBgP6dOQ== + dependencies: + argsarray "0.0.1" + clone-buffer "1.0.0" + immediate "3.0.6" + inherits "2.0.3" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-md5 "7.0.0" + uuid "3.2.1" + +pouchdb-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-utils/-/pouchdb-utils-7.2.2.tgz#c17c4788f1d052b0daf4ef8797bbc4aaa3945aa4" + integrity sha512-XmeM5ioB4KCfyB2MGZXu1Bb2xkElNwF1qG+zVFbQsKQij0zvepdOUfGuWvLRHxTOmt4muIuSOmWZObZa3NOgzQ== dependencies: argsarray "0.0.1" clone-buffer "1.0.0" immediate "3.3.0" inherits "2.0.4" - pouchdb-collections "7.3.1" - pouchdb-errors "7.3.1" - pouchdb-md5 "7.3.1" - uuid "8.3.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-md5 "7.2.2" + uuid "8.1.0" -pouchdb@7.3.0: - version "7.3.0" - resolved "https://registry.yarnpkg.com/pouchdb/-/pouchdb-7.3.0.tgz#440fbef12dfd8f9002320802528665e883a3b7f8" - integrity sha512-OwsIQGXsfx3TrU1pLruj6PGSwFH+h5k4hGNxFkZ76Um7/ZI8F5TzUHFrpldVVIhfXYi2vP31q0q7ot1FSLFYOw== +pouchdb@7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/pouchdb/-/pouchdb-7.1.1.tgz#f5f8dcd1fc440fb76651cb26f6fc5d97a39cd6ce" + integrity sha512-8bXWclixNJZqokvxGHRsG19zehSJiaZaz4dVYlhXhhUctz7gMcNTElHjPBzBdZlKKvt9aFDndmXN1VVE53Co8g== dependencies: - abort-controller "3.0.0" argsarray "0.0.1" - buffer-from "1.1.2" + buffer-from "1.1.0" clone-buffer "1.0.0" double-ended-queue "2.1.0-0" - fetch-cookie "0.11.0" - immediate "3.3.0" - inherits "2.0.4" - level "6.0.1" - level-codec "9.0.2" + fetch-cookie "0.7.0" + immediate "3.0.6" + inherits "2.0.3" + level "5.0.1" + level-codec "9.0.1" level-write-stream "1.0.0" - leveldown "5.6.0" - levelup "4.4.0" + leveldown "5.0.2" + levelup "4.0.2" ltgt "2.2.1" - node-fetch "2.6.7" - readable-stream "1.1.14" - spark-md5 "3.0.2" - through2 "3.0.2" - uuid "8.3.2" + node-fetch "2.4.1" + readable-stream "1.0.33" + spark-md5 "3.0.0" + through2 "3.0.1" + uuid "3.2.1" vuvuzela "1.0.3" precond@0.2: @@ -7627,6 +9568,16 @@ precond@0.2: resolved "https://registry.npmjs.org/precond/-/precond-0.2.3.tgz" integrity sha1-qpWRvKokkj8eD0hJ0kD0fvwQdaw= +prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + integrity sha512-PhmXi5XmoyKw1Un4E+opM2KcsJInDvKyuOumcjjw3waw86ZNjHwVUOOWLc4bCzLdcKNaWBH9e99sbWzDQsVaYg== + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA== + prettier@1.19.1: version "1.19.1" resolved "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz" @@ -7655,6 +9606,24 @@ promise-to-callback@^1.0.0: is-fn "^1.0.0" set-immediate-shim "^1.0.1" +promise.allsettled@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/promise.allsettled/-/promise.allsettled-1.0.2.tgz#d66f78fbb600e83e863d893e98b3d4376a9c47c9" + integrity sha512-UpcYW5S1RaNKT6pd+s9jp9K9rlQge1UXKskec0j6Mmuq7UJCvlS2J2/s/yuPN8ehftf9HXMxWlKiPbGGUzpoRg== + dependencies: + array.prototype.map "^1.0.1" + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + function-bind "^1.1.1" + iterate-value "^1.0.0" + +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== + dependencies: + asap "~2.0.3" + promise@^8.0.0: version "8.1.0" resolved "https://registry.npmjs.org/promise/-/promise-8.1.0.tgz" @@ -7662,6 +9631,15 @@ promise@^8.0.0: dependencies: asap "~2.0.6" +prop-types@^15.7.2: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + protobufjs@^6.10.2: version "6.11.3" resolved "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.3.tgz" @@ -7709,6 +9687,18 @@ psl@^1.1.33: resolved "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz" integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== +public-encrypt@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0" + integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q== + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + safe-buffer "^5.1.2" + pump@^1.0.0: version "1.0.3" resolved "https://registry.npmjs.org/pump/-/pump-1.0.3.tgz" @@ -7785,28 +9775,53 @@ query-string@^5.0.1: object-assign "^4.1.0" strict-uri-encode "^1.0.0" +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + integrity sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g== + +querystring@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.1.tgz#40d77615bb09d16902a85c3e38aa8b5ed761c2dd" + integrity sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg== + queue-microtask@^1.2.2: version "1.2.2" resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.2.tgz" integrity sha512-dB15eXv3p2jDlbOiNLyMabYg1/sXvppd8DP2J3EOCQ0AkuSXCW2tP7mnVouVLJKgUMY6yP0kcQDVpLCN13h4Xg== -queue-microtask@^1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" - integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== - quick-lru@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== -randombytes@^2.1.0: +ramda@^0.24.1: + version "0.24.1" + resolved "https://registry.yarnpkg.com/ramda/-/ramda-0.24.1.tgz#c3b7755197f35b8dc3502228262c4c91ddb6b857" + integrity sha512-HEm619G8PaZMfkqCa23qiOe7r3R0brPu7ZgOsgKUsnvLhd0qhc/vTjkUovomgPWa5ECBa08fJZixth9LaoBo5w== + +ramdasauce@^2.1.0: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ramdasauce/-/ramdasauce-2.1.3.tgz#acb45ecc7e4fc4d6f39e19989b4a16dff383e9c2" + integrity sha512-Ml3CPim4SKwmg5g9UI77lnRSeKr/kQw7YhQ6rfdMcBYy6DMlwmkEwQqjygJ3OhxPR+NfFfpjKl3Tf8GXckaqqg== + dependencies: + ramda "^0.24.1" + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5, randombytes@^2.0.6, randombytes@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz" integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== dependencies: safe-buffer "^5.1.0" +randomfill@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" + integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw== + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + range-parser@~1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz" @@ -7832,6 +9847,21 @@ raw-body@2.5.2: iconv-lite "0.4.24" unpipe "1.0.0" +rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-is@^16.13.1, react-is@^16.7.0: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + react-native-fetch-api@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/react-native-fetch-api/-/react-native-fetch-api-3.0.0.tgz" @@ -7839,22 +9869,15 @@ react-native-fetch-api@^3.0.0: dependencies: p-defer "^3.0.0" -read-pkg-up@^1.0.1: - version "1.0.1" - resolved "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz" - integrity sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI= - dependencies: - find-up "^1.0.0" - read-pkg "^1.0.0" - -read-pkg@^1.0.0: - version "1.1.0" - resolved "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz" - integrity sha1-9f+qXs0pyzHAR0vKfXVra7KePyg= +readable-stream@1.0.33: + version "1.0.33" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.33.tgz#3a360dd66c1b1d7fd4705389860eda1d0f61126c" + integrity sha512-72KxhcKi8bAvHP/cyyWSP+ODS5ef0DIRs0OzrhGXw31q41f19aoELCbvd42FjhpyEDxQMRiiC5rq9rfE5PzTqg== dependencies: - load-json-file "^1.0.0" - normalize-package-data "^2.3.2" - path-type "^1.0.0" + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" readable-stream@1.1.14, readable-stream@^1.0.33: version "1.1.14" @@ -7888,6 +9911,19 @@ readable-stream@^2.0.0, readable-stream@^2.2.2, readable-stream@^2.2.9, readable string_decoder "~1.1.1" util-deprecate "~1.0.1" +readable-stream@^2.0.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b" + integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + readable-stream@~0.0.2: version "0.0.4" resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-0.0.4.tgz" @@ -7903,6 +9939,13 @@ readable-stream@~1.0.15, readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@~3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.4.0.tgz#9fdccdf9e9155805449221ac645e8303ab5b9ada" + integrity sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ== + dependencies: + picomatch "^2.2.1" + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz" @@ -7924,6 +9967,32 @@ redeyed@~2.1.0: dependencies: esprima "~4.0.0" +redux-cli-logger@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/redux-cli-logger/-/redux-cli-logger-2.1.0.tgz#7e546502a4b08c7fac4fe2faee2326a6326cb4a1" + integrity sha512-75mVsggAJRSykWy2qxdGI7osocDWvc3RCMeN93hlvS/FxgdRww12NaXslez+W6gBOrSJKO7W16V0IzuISSfCxg== + dependencies: + colors "^1.1.2" + +redux-devtools-core@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/redux-devtools-core/-/redux-devtools-core-0.2.1.tgz#4e43cbe590a1f18c13ee165d2d42e0bc77a164d8" + integrity sha512-RAGOxtUFdr/1USAvxrWd+Gq/Euzgw7quCZlO5TgFpDfG7rB5tMhZUrNyBjpzgzL2yMk0eHnPYIGm7NkIfRzHxQ== + dependencies: + get-params "^0.1.2" + jsan "^3.1.13" + lodash "^4.17.11" + nanoid "^2.0.0" + remotedev-serialize "^0.1.8" + +redux-devtools-instrument@^1.9.4: + version "1.10.0" + resolved "https://registry.yarnpkg.com/redux-devtools-instrument/-/redux-devtools-instrument-1.10.0.tgz#036caf79fa1e5f25ec4bae38a9af4f08c69e323a" + integrity sha512-X8JRBCzX2ADSMp+iiV7YQ8uoTNyEm0VPFPd4T854coz6lvRiBrFSqAr9YAS2n8Kzxx8CJQotR0QF9wsMM+3DvA== + dependencies: + lodash "^4.17.19" + symbol-observable "^1.2.0" + redux-saga@1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/redux-saga/-/redux-saga-1.0.0.tgz" @@ -7959,16 +10028,58 @@ regenerator-runtime@^0.11.0: resolved "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz" integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg== -regenerator-runtime@^0.13.11: - version "0.13.11" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" - integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== - regenerator-runtime@^0.13.4: version "0.13.9" resolved "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz" integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== +regenerator-runtime@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" + integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== + +regexp.prototype.flags@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz#fe7ce25e7e4cca8db37b6634c8a2c7009199b9cb" + integrity sha512-0SutC3pNudRKgquxGoRGIz946MZVHqbNfPjBdxeOhBrdgDKlRoXmYLQN9xRbrR09ZXWeGAdPuif7egofn6v5LA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + functions-have-names "^1.2.3" + +relay-runtime@12.0.0: + version "12.0.0" + resolved "https://registry.yarnpkg.com/relay-runtime/-/relay-runtime-12.0.0.tgz#1e039282bdb5e0c1b9a7dc7f6b9a09d4f4ff8237" + integrity sha512-QU6JKr1tMsry22DXNy9Whsq5rmvwr3LSZiiWV/9+DFpuTWvp+WFhobWMc8TC4OjKFfNhEZy7mOiqUAn5atQtug== + dependencies: + "@babel/runtime" "^7.0.0" + fbjs "^3.0.0" + invariant "^2.2.4" + +remote-redux-devtools@^0.5.12: + version "0.5.16" + resolved "https://registry.yarnpkg.com/remote-redux-devtools/-/remote-redux-devtools-0.5.16.tgz#95b1a4a1988147ca04f3368f3573b661748b3717" + integrity sha512-xZ2D1VRIWzat5nsvcraT6fKEX9Cfi+HbQBCwzNnUAM8Uicm/anOc60XGalcaDPrVmLug7nhDl2nimEa3bL3K9w== + dependencies: + jsan "^3.1.13" + querystring "^0.2.0" + redux-devtools-core "^0.2.1" + redux-devtools-instrument "^1.9.4" + rn-host-detect "^1.1.5" + socketcluster-client "^14.2.1" + +remotedev-serialize@^0.1.8: + version "0.1.9" + resolved "https://registry.yarnpkg.com/remotedev-serialize/-/remotedev-serialize-0.1.9.tgz#5e67e05cbca75d408d769d057dc59d0f56cd2c43" + integrity sha512-5tFdZg9mSaAWTv6xmQ7HtHjKMLSFQFExEZOtJe10PLsv1wb7cy7kYHtBvTYRro27/3fRGEcQBRNKSaixOpb69w== + dependencies: + jsan "^3.1.13" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + integrity sha512-/hS+Y0u3aOfIETiaiirUFwDBDzmXPvO+jAfKTitUngIPzdKc6Z0LoFjM/CK5PL4C+eKwHohlHAb6H0VFfmmUsw== + repeating@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz" @@ -8007,22 +10118,17 @@ require-directory@^2.1.1: resolved "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz" integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= -require-from-string@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-1.2.1.tgz#529c9ccef27380adfec9a2f965b649bbee636418" - integrity sha512-H7AkJWMobeskkttHyhTVtS0fxpFLjxhbfMa6Bk3wimP7sdPRGL3EyCg3sAQenFfAe+xQ+oAc85Nmtvq0ROM83Q== - -require-from-string@^2.0.2: +require-from-string@^2.0.0, require-from-string@^2.0.2: version "2.0.2" - resolved "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== -require-main-filename@^1.0.1: - version "1.0.1" - resolved "https://registry.npmjs.org/require-main-filename/-/require-main-filename-1.0.1.tgz" - integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== -reselect-tree@^1.3.7: +reselect-tree@^1.3.4: version "1.3.7" resolved "https://registry.yarnpkg.com/reselect-tree/-/reselect-tree-1.3.7.tgz#c3eca58765d9df96bae0017f6ff3504c304cdea0" integrity sha512-kZN+C1cVJ6fFN2smSb0l4UvYZlRzttgnu183svH4NrU22cBY++ikgr2QT75Uuk4MYpv5gXSVijw4c5U6cx6GKg== @@ -8036,17 +10142,22 @@ reselect@^4.0.0: resolved "https://registry.npmjs.org/reselect/-/reselect-4.0.0.tgz" integrity sha512-qUgANli03jjAyGlnbYVAV5vvnOmJnODyABz51RdBN7M4WaVu8mecZWgyQNkG8Yqe3KRGRt0l4K4B3XVEULC4CA== -resolve-alpn@^1.0.0, resolve-alpn@^1.2.0: +resolve-alpn@^1.2.0: version "1.2.1" resolved "https://registry.yarnpkg.com/resolve-alpn/-/resolve-alpn-1.2.1.tgz#b7adbdac3546aaaec20b45e7d8265927072726f9" integrity sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g== +resolve-from@5.0.0, resolve-from@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" + integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== + resolve-from@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz" integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== -resolve@^1.10.0, resolve@^1.14.2: +resolve@^1.14.2: version "1.20.0" resolved "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz" integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== @@ -8054,6 +10165,18 @@ resolve@^1.10.0, resolve@^1.14.2: is-core-module "^2.2.0" path-parse "^1.0.6" +response-iterator@^0.2.6: + version "0.2.6" + resolved "https://registry.yarnpkg.com/response-iterator/-/response-iterator-0.2.6.tgz#249005fb14d2e4eeb478a3f735a28fd8b4c9f3da" + integrity sha512-pVzEEzrsg23Sh053rmDUvLSkGXluZio0qu8VT6ukrYuvtjVfCbDZH9d6PGXb8HZfzdNZt8feXv/jvUzlhRgLnw== + +responselike@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ== + dependencies: + lowercase-keys "^1.0.0" + responselike@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/responselike/-/responselike-2.0.1.tgz#9a0bc8fdc252f3fb1cca68b016591059ba1422bc" @@ -8084,7 +10207,7 @@ reusify@^1.0.4: resolved "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz" integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== -rimraf@^2.2.8, rimraf@^2.6.3: +rimraf@^2.2.8, rimraf@^2.6.1, rimraf@^2.6.3: version "2.7.1" resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz" integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== @@ -8098,12 +10221,7 @@ rimraf@^3.0.0, rimraf@^3.0.2: dependencies: glob "^7.1.3" -ripemd160-min@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/ripemd160-min/-/ripemd160-min-0.0.6.tgz#a904b77658114474d02503e819dcc55853b67e62" - integrity sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A== - -ripemd160@^2.0.0, ripemd160@^2.0.1, ripemd160@^2.0.2: +ripemd160@^2.0.0, ripemd160@^2.0.1: version "2.0.2" resolved "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz" integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== @@ -8125,6 +10243,11 @@ rlp@^2.2.4: dependencies: bn.js "^5.2.0" +rn-host-detect@^1.1.5: + version "1.2.0" + resolved "https://registry.yarnpkg.com/rn-host-detect/-/rn-host-detect-1.2.0.tgz#8b0396fc05631ec60c1cb8789e5070cdb04d0da0" + integrity sha512-btNg5kzHcjZZ7t7mvvV/4wNJ9e3MPgrWivkRgWURzXL0JJ0pwWlU4zrbmdlz3HHzHOxhBhHB4D+/dbMFfu4/4A== + run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz" @@ -8137,6 +10260,16 @@ rustbn.js@~0.2.0: resolved "https://registry.npmjs.org/rustbn.js/-/rustbn.js-0.2.0.tgz" integrity sha512-4VlvkRUuCJvr2J6Y0ImW7NvTCriMi7ErOAqWk1y69vAdoNIzCF3yPmgeNzx+RQTLEDFq5sHfscn1MwHxP9hNfA== +safe-array-concat@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.0.0.tgz#2064223cba3c08d2ee05148eedbc563cd6d84060" + integrity sha512-9dVEFruWIsnie89yym+xWTAYASdpw3CJV7Li/6zBewGf9z2i1j31rP6jnY0pHEO4QZh6N0K11bFjWmdR8UGdPQ== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.0" + has-symbols "^1.0.3" + isarray "^2.0.5" + safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: version "5.2.1" resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz" @@ -8154,22 +10287,53 @@ safe-event-emitter@^1.0.1: dependencies: events "^3.0.0" +safe-regex-test@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.0.tgz#793b874d524eb3640d1873aad03596db2d4f2295" + integrity sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.3" + is-regex "^1.1.4" + "safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== +sax@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +sc-channel@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/sc-channel/-/sc-channel-1.2.0.tgz#d9209f3a91e3fa694c66b011ce55c4ad8c3087d9" + integrity sha512-M3gdq8PlKg0zWJSisWqAsMmTVxYRTpVRqw4CWAdKBgAfVKumFcTjoCV0hYu7lgUXccCtCD8Wk9VkkE+IXCxmZA== + dependencies: + component-emitter "1.2.1" + +sc-errors@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/sc-errors/-/sc-errors-2.0.1.tgz#3af2d934dfd82116279a4b2c1552c1e021ddcb03" + integrity sha512-JoVhq3Ud+3Ujv2SIG7W0XtjRHsrNgl6iXuHHsh0s+Kdt5NwI6N2EGAZD4iteitdDv68ENBkpjtSvN597/wxPSQ== + +sc-formatter@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/sc-formatter/-/sc-formatter-3.0.3.tgz#caeb1e9bf3145dc77b7128b2a8abbb14bad3162e" + integrity sha512-lYI/lTs1u1c0geKElcj+bmEUfcP/HuKg2iDeTijPSjiTNFzN3Cf8Qh6tVd65oi7Qn+2/oD7LP4s6GC13v/9NiQ== + scrypt-js@2.0.4: version "2.0.4" resolved "https://registry.npmjs.org/scrypt-js/-/scrypt-js-2.0.4.tgz" integrity sha512-4KsaGcPnuhtCZQCxFxN3GVYIhKFPTdLd8PLC552XwbMndtD0cjRFAhDuuydXQ0h08ZfPgzqe6EKHozpuH74iDw== -scrypt-js@3.0.1, scrypt-js@^3.0.0, scrypt-js@^3.0.1: +scrypt-js@^3.0.0, scrypt-js@^3.0.1: version "3.0.1" resolved "https://registry.npmjs.org/scrypt-js/-/scrypt-js-3.0.1.tgz" integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== -secp256k1@4.0.3, secp256k1@^4.0.1: +secp256k1@^4.0.1: version "4.0.3" resolved "https://registry.npmjs.org/secp256k1/-/secp256k1-4.0.3.tgz" integrity sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA== @@ -8183,11 +10347,6 @@ semaphore@>=1.0.1, semaphore@^1.0.3: resolved "https://registry.npmjs.org/semaphore/-/semaphore-1.1.0.tgz" integrity sha512-O4OZEaNtkMd/K0i6js9SL+gqy0ZCBMgUvlSqHKi4IBdjhe7wB8pwztUk1BbZ1fmrvpwFrPbHzqd2w5pTcJH6LA== -"semver@2 || 3 || 4 || 5", semver@^5.5.0: - version "5.7.1" - resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz" - integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== - semver@7.3.5: version "7.3.5" resolved "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz" @@ -8214,11 +10373,21 @@ semver@^5.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== +semver@^5.5.0: + version "5.7.1" + resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: version "6.3.0" resolved "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== +semver@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== + semver@^7.0.0: version "7.3.7" resolved "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz" @@ -8226,6 +10395,13 @@ semver@^7.0.0: dependencies: lru-cache "^6.0.0" +semver@^7.3.4: + version "7.5.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" + integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== + dependencies: + lru-cache "^6.0.0" + semver@^7.3.5: version "7.5.3" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.3.tgz#161ce8c2c6b4b3bdca6caadc9fa3317a4c4fe88e" @@ -8272,10 +10448,10 @@ sentence-case@^2.1.0: no-case "^2.2.0" upper-case-first "^1.1.2" -serialize-javascript@6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" - integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== +serialize-javascript@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-4.0.0.tgz#b525e1238489a5ecfc42afacc3fe99e666f4b1aa" + integrity sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw== dependencies: randombytes "^2.1.0" @@ -8300,7 +10476,7 @@ servify@^0.1.12: request "^2.79.0" xhr "^2.3.3" -set-blocking@^2.0.0: +set-blocking@^2.0.0, set-blocking@~2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz" integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= @@ -8333,13 +10509,6 @@ sha.js@^2.4.0, sha.js@^2.4.11, sha.js@^2.4.8: inherits "^2.0.1" safe-buffer "^5.0.1" -sha3@^2.1.1: - version "2.1.4" - resolved "https://registry.yarnpkg.com/sha3/-/sha3-2.1.4.tgz#000fac0fe7c2feac1f48a25e7a31b52a6492cc8f" - integrity sha512-S8cNxbyb0UGUM2VhRD4Poe5N58gJnJsLJ5vC7FYWGUmGhcsj4++WaIOBFVDxlG0W3To6xBuiRh+i0Qp2oNCOtg== - dependencies: - buffer "6.0.3" - shallowequal@^1.0.2: version "1.1.0" resolved "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz" @@ -8378,11 +10547,16 @@ side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" -signal-exit@^3.0.2, signal-exit@^3.0.3: +signal-exit@^3.0.0, signal-exit@^3.0.2, signal-exit@^3.0.3: version "3.0.7" resolved "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +signedsource@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/signedsource/-/signedsource-1.0.0.tgz#1ddace4981798f93bd833973803d80d52e93ad6a" + integrity sha512-6+eerH9fEnNmi/hyM1DXcRK3pWdoMQtlkQ+ns0ntzunjKqp5i3sKCc80ym8Fib3iaYhdJUOPdhlJWj1tvge2Ww== + simple-concat@^1.0.0: version "1.0.1" resolved "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz" @@ -8414,16 +10588,21 @@ snake-case@^2.1.0: dependencies: no-case "^2.2.0" -solc@^0.4.20: - version "0.4.26" - resolved "https://registry.yarnpkg.com/solc/-/solc-0.4.26.tgz#5390a62a99f40806b86258c737c1cf653cc35cb5" - integrity sha512-o+c6FpkiHd+HPjmjEVpQgH7fqZ14tJpXhho+/bQXlXbliLIS/xjXb42Vxh+qQY1WCSTMQ0+a5vR9vi0MfhU6mA== - dependencies: - fs-extra "^0.30.0" - memorystream "^0.3.1" - require-from-string "^1.1.0" - semver "^5.3.0" - yargs "^4.7.1" +socketcluster-client@^14.2.1: + version "14.3.2" + resolved "https://registry.yarnpkg.com/socketcluster-client/-/socketcluster-client-14.3.2.tgz#c0d245233b114a4972857dc81049c710b7691fb7" + integrity sha512-xDtgW7Ss0ARlfhx53bJ5GY5THDdEOeJnT+/C9Rmrj/vnZr54xeiQfrCZJbcglwe732nK3V+uZq87IvrRl7Hn4g== + dependencies: + buffer "^5.2.1" + clone "2.1.1" + component-emitter "1.2.1" + linked-list "0.1.0" + querystring "0.2.0" + sc-channel "^1.2.0" + sc-errors "^2.0.1" + sc-formatter "^3.0.1" + uuid "3.2.1" + ws "^7.5.0" solc@^0.8.2: version "0.8.2" @@ -8447,7 +10626,7 @@ source-map-support@^0.4.15: dependencies: source-map "^0.5.6" -source-map-support@^0.5.20: +source-map-support@^0.5.19, source-map-support@^0.5.20: version "0.5.21" resolved "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz" integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== @@ -8465,36 +10644,15 @@ source-map@^0.6.0: resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -spark-md5@3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.2.tgz#7952c4a30784347abcee73268e473b9c0167e3fc" - integrity sha512-wcFzz9cDfbuqe0FZzfi2or1sgyIrsDwmPwfZC4hiNidPdPINjeUwNfv5kldczoEAcjl9Y1L3SM7Uz2PUEQzxQw== - -spdx-correct@^3.0.0: - version "3.1.1" - resolved "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz" - integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== - dependencies: - spdx-expression-parse "^3.0.0" - spdx-license-ids "^3.0.0" - -spdx-exceptions@^2.1.0: - version "2.3.0" - resolved "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz" - integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== +spark-md5@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.0.tgz#3722227c54e2faf24b1dc6d933cc144e6f71bfef" + integrity sha512-BpPFB0Oh83mi+6DRcFwxPx96f3OL8Tkq3hdvaHuXaQUsy5F3saI3zIPNQ/UsTQgyAXIHnML1waeCe1WoCPXbpQ== -spdx-expression-parse@^3.0.0: +spark-md5@3.0.1: version "3.0.1" - resolved "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz" - integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== - dependencies: - spdx-exceptions "^2.1.0" - spdx-license-ids "^3.0.0" - -spdx-license-ids@^3.0.0: - version "3.0.7" - resolved "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.7.tgz" - integrity sha512-U+MTEOO0AiDzxwFvoa4JVnMV6mZlJKk2sBLt90s7G0Gd0Mlknc7kxEn3nuDPNZRta7O2uy8oLcZLVT+4sqNZHQ== + resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.1.tgz#83a0e255734f2ab4e5c466e5a2cfc9ba2aa2124d" + integrity sha512-0tF3AGSD1ppQeuffsLDIOWlKUd3lS92tFxcsrh5Pe3ZphhnoK+oXIBTzOAThZCiuINZLvpiLH/1VS1/ANEJVig== split-ca@^1.0.0: version "1.0.1" @@ -8506,6 +10664,14 @@ sprintf-js@~1.0.2: resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== +sqlite3@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/sqlite3/-/sqlite3-4.2.0.tgz#49026d665e9fc4f922e56fb9711ba5b4c85c4901" + integrity sha512-roEOz41hxui2Q7uYnWsjMOTry6TcNUNmp8audCx18gF10P2NknwdpF+E+HKvz/F2NvPKGGBF4NGc+ZPQ+AABwg== + dependencies: + nan "^2.12.1" + node-pre-gyp "^0.11.0" + sshpk@^1.7.0: version "1.17.0" resolved "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz" @@ -8526,6 +10692,23 @@ statuses@2.0.1: resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== +"statuses@>= 1.5.0 < 2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== + +stop-iteration-iterator@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz#6a60be0b4ee757d1ed5254858ec66b10c49285e4" + integrity sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ== + dependencies: + internal-slot "^1.0.4" + +stoppable@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/stoppable/-/stoppable-1.1.0.tgz#32da568e83ea488b08e4d7ea2c3bcc9d75015d5b" + integrity sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw== + stream-to-it@^0.2.2: version "0.2.4" resolved "https://registry.npmjs.org/stream-to-it/-/stream-to-it-0.2.4.tgz" @@ -8533,6 +10716,11 @@ stream-to-it@^0.2.2: dependencies: get-iterator "^1.0.2" +streamsearch@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" + integrity sha512-jos8u++JKm0ARcSUTAZXOVC0mSox7Bhn6sBgty73P1f3JGf7yG2clTbBNHUdde/kdvP2FESam+vM6l8jBrNxHA== + streamsearch@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz" @@ -8552,7 +10740,7 @@ string-width@^1.0.1: is-fullwidth-code-point "^1.0.0" strip-ansi "^3.0.0" -string-width@^2.1.1: +"string-width@^1.0.2 || 2", string-width@^2.1.1: version "2.1.1" resolved "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz" integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== @@ -8560,7 +10748,7 @@ string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^4.0.0, string-width@^4.2.3: +"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -8569,6 +10757,15 @@ string-width@^4.0.0, string-width@^4.2.3: is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.1" +string-width@^3.0.0, string-width@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + string-width@^4.1.0, string-width@^4.2.0: version "4.2.2" resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz" @@ -8578,6 +10775,33 @@ string-width@^4.1.0, string-width@^4.2.0: is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.0" +string.prototype.trim@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz#a68352740859f6893f14ce3ef1bb3037f7a90533" + integrity sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + es-abstract "^1.20.4" + +string.prototype.trimend@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz#c4a27fa026d979d79c04f17397f250a462944533" + integrity sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + es-abstract "^1.20.4" + +string.prototype.trimstart@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz#e90ab66aa8e4007d92ef591bbf3cd422c56bdcf4" + integrity sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.4" + es-abstract "^1.20.4" + string_decoder@^1.1.1: version "1.3.0" resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz" @@ -8611,7 +10835,7 @@ strip-ansi@^4.0.0: dependencies: ansi-regex "^3.0.0" -strip-ansi@^5.2.0: +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: version "5.2.0" resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz" integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== @@ -8625,13 +10849,6 @@ strip-ansi@^6.0.0, strip-ansi@^6.0.1: dependencies: ansi-regex "^5.0.1" -strip-bom@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz" - integrity sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4= - dependencies: - is-utf8 "^0.2.0" - strip-final-newline@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz" @@ -8649,27 +10866,27 @@ strip-indent@^2.0.0: resolved "https://registry.npmjs.org/strip-indent/-/strip-indent-2.0.0.tgz" integrity sha1-XvjbKV0B5u1sv3qrlpmNeCJSe2g= -strip-json-comments@3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" - integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== +strip-json-comments@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.0.1.tgz#85713975a91fb87bf1b305cca77395e40d2a64a7" + integrity sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw== strip-json-comments@~2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -sublevel-pouchdb@7.3.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/sublevel-pouchdb/-/sublevel-pouchdb-7.3.1.tgz#c1cc03af45081345c7c82821d6dcaa74564ae2ef" - integrity sha512-n+4fK72F/ORdqPwoGgMGYeOrW2HaPpW9o9k80bT1B3Cim5BSvkKkr9WbWOWynni/GHkbCEdvLVFJL1ktosAdhQ== +sublevel-pouchdb@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/sublevel-pouchdb/-/sublevel-pouchdb-7.2.2.tgz#49e46cd37883bf7ff5006d7c5b9bcc7bcc1f422f" + integrity sha512-y5uYgwKDgXVyPZceTDGWsSFAhpSddY29l9PJbXqMJLfREdPmQTY8InpatohlEfCXX7s1LGcrfYAhxPFZaJOLnQ== dependencies: inherits "2.0.4" level-codec "9.0.2" ltgt "2.2.1" readable-stream "1.1.14" -subscriptions-transport-ws@^0.9.19: +subscriptions-transport-ws@^0.9.18, subscriptions-transport-ws@^0.9.19: version "0.9.19" resolved "https://registry.npmjs.org/subscriptions-transport-ws/-/subscriptions-transport-ws-0.9.19.tgz" integrity sha512-dxdemxFFB0ppCLg10FTtRqH/31FNRL1y1BQv8209MK5I4CwALb7iihQg+7p65lFcIl8MHatINWBLOqpgU4Kyyw== @@ -8680,10 +10897,15 @@ subscriptions-transport-ws@^0.9.19: symbol-observable "^1.0.4" ws "^5.2.0 || ^6.0.0 || ^7.0.0" -supports-color@8.1.1, supports-color@^8.1.1: - version "8.1.1" - resolved "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz" - integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== +super-split@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/super-split/-/super-split-1.1.0.tgz#43b3ba719155f4d43891a32729d59b213d9155fc" + integrity sha512-I4bA5mgcb6Fw5UJ+EkpzqXfiuvVGS/7MuND+oBxNFmxu3ugLNrdIatzBLfhFRMVMLxgSsRy+TjIktgkF9RFSNQ== + +supports-color@7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.1.0.tgz#68e32591df73e25ad1c4b49108a2ec507962bfd1" + integrity sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g== dependencies: has-flag "^4.0.0" @@ -8706,6 +10928,13 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" +supports-color@^8.1.1: + version "8.1.1" + resolved "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + supports-hyperlinks@^2.2.0: version "2.3.0" resolved "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz" @@ -8714,11 +10943,6 @@ supports-hyperlinks@^2.2.0: has-flag "^4.0.0" supports-color "^7.0.0" -supports-preserve-symlinks-flag@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" - integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== - swap-case@^1.1.0: version "1.1.2" resolved "https://registry.npmjs.org/swap-case/-/swap-case-1.1.2.tgz" @@ -8744,11 +10968,24 @@ swarm-js@^0.1.40: tar "^4.0.2" xhr-request "^1.0.1" -symbol-observable@^1.0.3, symbol-observable@^1.0.4: +symbol-observable@^1.0.3, symbol-observable@^1.0.4, symbol-observable@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/symbol-observable/-/symbol-observable-1.2.0.tgz" integrity sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ== +symbol-observable@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-4.0.0.tgz#5b425f192279e87f2f9b937ac8540d1984b39205" + integrity sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ== + +sync-fetch@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/sync-fetch/-/sync-fetch-0.3.0.tgz#77246da949389310ad978ab26790bb05f88d1335" + integrity sha512-dJp4qg+x4JwSEW1HibAuMi0IIrBI3wuQr2GimmqB7OXR50wmwzfdusG+p39R9w3R6aFtZ2mzvxvWKQ3Bd/vx3g== + dependencies: + buffer "^5.7.0" + node-fetch "^2.6.1" + sync-request@6.1.0: version "6.1.0" resolved "https://registry.npmjs.org/sync-request/-/sync-request-6.1.0.tgz" @@ -8788,7 +11025,7 @@ tar-stream@^1.1.2: to-buffer "^1.1.1" xtend "^4.0.0" -tar@^4.0.2: +tar@^4, tar@^4.0.2: version "4.4.19" resolved "https://registry.npmjs.org/tar/-/tar-4.4.19.tgz" integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA== @@ -8813,11 +11050,6 @@ tar@^6.1.0: mkdirp "^1.0.3" yallist "^4.0.0" -testrpc@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/testrpc/-/testrpc-0.0.1.tgz#83e2195b1f5873aec7be1af8cbe6dcf39edb7aed" - integrity sha512-afH1hO+SQ/VPlmaLUFj2636QMeDvPCeQMc/9RBMW0IfjNe9gFD9Ra3ShqYkB7py0do1ZcCna/9acHyzTJ+GcNA== - then-request@^6.0.0: version "6.0.2" resolved "https://registry.npmjs.org/then-request/-/then-request-6.0.2.tgz" @@ -8835,6 +11067,13 @@ then-request@^6.0.0: promise "^8.0.0" qs "^6.4.0" +through2@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/through2/-/through2-3.0.1.tgz#39276e713c3302edf9e388dd9c812dd3b825bd5a" + integrity sha512-M96dvTalPT3YbYLaKaCuwu+j06D/8Jfib0o/PxbVt6Amhv3dUAtW6rTV1jPgJSBG83I/e04Y6xkVdVhSRhi0ww== + dependencies: + readable-stream "2 || 3" + through2@3.0.2: version "3.0.2" resolved "https://registry.npmjs.org/through2/-/through2-3.0.2.tgz" @@ -8848,7 +11087,7 @@ through2@3.0.2: resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz" integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== -timed-out@^4.0.1: +timed-out@^4.0.0, timed-out@^4.0.1: version "4.0.1" resolved "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz" integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= @@ -8862,6 +11101,11 @@ timeout-abort-controller@^2.0.0: native-abort-controller "^1.0.4" retimer "^3.0.0" +tiny-queue@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/tiny-queue/-/tiny-queue-0.2.1.tgz#25a67f2c6e253b2ca941977b5ef7442ef97a6046" + integrity sha512-EijGsv7kzd9I9g0ByCl6h42BWNGUZrlCSejfrb3AKeHC33SGbASu1VDf5O3rRiiUOhAC9CHdZxFPbZu0HmR70A== + tiny-typed-emitter@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/tiny-typed-emitter/-/tiny-typed-emitter-2.1.0.tgz#b3b027fdd389ff81a152c8e847ee2f5be9fad7b5" @@ -8911,6 +11155,11 @@ to-fast-properties@^2.0.0: resolved "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz" integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= +to-readable-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" + integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== + to-regex-range@^5.0.1: version "5.0.1" resolved "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz" @@ -8923,6 +11172,14 @@ toidentifier@1.0.1: resolved "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz" integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== +tough-cookie@^2.3.1, tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + "tough-cookie@^2.3.3 || ^3.0.1 || ^4.0.0": version "4.0.0" resolved "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.0.0.tgz" @@ -8932,14 +11189,6 @@ toidentifier@1.0.1: punycode "^2.1.1" universalify "^0.1.2" -tough-cookie@~2.5.0: - version "2.5.0" - resolved "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz" - integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== - dependencies: - psl "^1.1.28" - punycode "^2.1.1" - tr46@~0.0.3: version "0.0.3" resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz" @@ -8962,6 +11211,13 @@ truffle@^5.2: optionalDependencies: "@truffle/db" "^0.5.3" +ts-invariant@^0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/ts-invariant/-/ts-invariant-0.10.3.tgz#3e048ff96e91459ffca01304dbc7f61c1f642f6c" + integrity sha512-uivwYcQaxAucv1CzRp2n/QdYPo4ILf9VXgH19zEIjFx2EJufV16P0JtJVpYHy89DItG6Kwj2oIUjrcK5au+4tQ== + dependencies: + tslib "^2.1.0" + ts-invariant@^0.4.0: version "0.4.4" resolved "https://registry.npmjs.org/ts-invariant/-/ts-invariant-0.4.4.tgz" @@ -8998,15 +11254,30 @@ tslib@^2.0.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0: resolved "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz" integrity sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg== -tslib@^2.1.0: +tslib@^2.0.3, tslib@^2.3.0: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== + +tslib@^2.1.0, tslib@~2.3.0: version "2.3.1" resolved "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz" integrity sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw== -tslib@~2.4.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.1.tgz#0d0bfbaac2880b91e22df0768e55be9753a5b17e" - integrity sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA== +tslib@~2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.0.3.tgz#8e0741ac45fc0c226e58a17bfc3e64b9bc6ca61c" + integrity sha512-uZtkfKblCEQtZKBF6EBXVZeQNl82yqtDQdv+eck8u7tdPxjLu2/lp5/uPW+um2tpuxINHWy3GhiccY7QgEaVHQ== + +tslib@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" + integrity sha512-hcVC3wYEziELGGmEEXue7D75zbwIIVUMWAVbHItGPx0ziyXxrOMQx4rQEVEV45Ut/1IotuEvwqPopzIOkDMf0A== + +tslib@~2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.2.0.tgz#fb2c475977e35e241311ede2693cee1ec6698f5c" + integrity sha512-gS9GVHRU+RGn5KQM2rllAlR3dU6m7AcpJKdtH8gFvQiC4Otgk98XnmMU+nZenHt/+VhnBPWwgrJsyrdcw6i23w== tunnel-agent@^0.6.0: version "0.6.0" @@ -9015,21 +11286,11 @@ tunnel-agent@^0.6.0: dependencies: safe-buffer "^5.0.1" -tweetnacl-util@^0.15.0: - version "0.15.1" - resolved "https://registry.yarnpkg.com/tweetnacl-util/-/tweetnacl-util-0.15.1.tgz#b80fcdb5c97bcc508be18c44a4be50f022eea00b" - integrity sha512-RKJBIj8lySrShN4w6i/BonWp2Z/uxwC3h4y7xsRrpP59ZboCd0GpEVsOnMDYLMmKBpYhb5TgHzZXy7wTfYFBRw== - tweetnacl@^0.14.3, tweetnacl@~0.14.0: version "0.14.5" resolved "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz" integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== -tweetnacl@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596" - integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== - type-fest@^0.21.3: version "0.21.3" resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz" @@ -9048,12 +11309,51 @@ type@^1.0.1: resolved "https://registry.npmjs.org/type/-/type-1.2.0.tgz" integrity sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg== -type@^2.7.2: +type@^2.0.0: version "2.7.2" resolved "https://registry.yarnpkg.com/type/-/type-2.7.2.tgz#2376a15a3a28b1efa0f5350dcf72d24df6ef98d0" integrity sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw== -typedarray-to-buffer@^3.1.5: +typed-array-buffer@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz#18de3e7ed7974b0a729d3feecb94338d1472cd60" + integrity sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.1" + is-typed-array "^1.1.10" + +typed-array-byte-length@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz#d787a24a995711611fb2b87a4052799517b230d0" + integrity sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA== + dependencies: + call-bind "^1.0.2" + for-each "^0.3.3" + has-proto "^1.0.1" + is-typed-array "^1.1.10" + +typed-array-byte-offset@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz#cbbe89b51fdef9cd6aaf07ad4707340abbc4ea0b" + integrity sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg== + dependencies: + available-typed-arrays "^1.0.5" + call-bind "^1.0.2" + for-each "^0.3.3" + has-proto "^1.0.1" + is-typed-array "^1.1.10" + +typed-array-length@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.4.tgz#89d83785e5c4098bec72e08b319651f0eac9c1bb" + integrity sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng== + dependencies: + call-bind "^1.0.2" + for-each "^0.3.3" + is-typed-array "^1.1.9" + +typedarray-to-buffer@^3.1.5, typedarray-to-buffer@~3.1.5: version "3.1.5" resolved "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz" integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== @@ -9084,6 +11384,11 @@ typescript-tuple@^2.2.1: dependencies: typescript-compare "^0.0.2" +ua-parser-js@^1.0.35: + version "1.0.35" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-1.0.35.tgz#c4ef44343bc3db0a3cbefdf21822f1b1fc1ab011" + integrity sha512-fKnGuqmTBnIE+/KXSzCn4db8RTigUzw1AN0DmdU6hJovUTbYJKyqj+8Mt1c4VfRDnOVJnENmfYkIPZ946UrSAA== + uint8arrays@^3.0.0: version "3.1.1" resolved "https://registry.npmjs.org/uint8arrays/-/uint8arrays-3.1.1.tgz" @@ -9096,6 +11401,31 @@ ultron@~1.1.0: resolved "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz" integrity sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og== +unbox-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e" + integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw== + dependencies: + call-bind "^1.0.2" + has-bigints "^1.0.2" + has-symbols "^1.0.3" + which-boxed-primitive "^1.0.2" + +underscore@1.12.1: + version "1.12.1" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.12.1.tgz#7bb8cc9b3d397e201cf8553336d262544ead829e" + integrity sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw== + +underscore@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.9.1.tgz#06dce34a0e68a7babc29b365b8e74b8925203961" + integrity sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg== + +underscore@^1.8.3: + version "1.13.6" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.6.tgz#04786a1f589dc6c09f761fc5f45b89e935136441" + integrity sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A== + universalify@^0.1.0, universalify@^0.1.2: version "0.1.2" resolved "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz" @@ -9106,6 +11436,13 @@ universalify@^2.0.0: resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz" integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== +unixify@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unixify/-/unixify-1.0.0.tgz#3a641c8c2ffbce4da683a5c70f03a462940c2090" + integrity sha512-6bc58dPYhCMHHuwxldQxO3RRNZ4eCogZ/st++0+fcC1nr0jiGUtAdBJ2qzmLQWSxbtz42pWt4QQMiZ9HvZf5cg== + dependencies: + normalize-path "^2.1.1" + unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz" @@ -9138,23 +11475,35 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + integrity sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA== + dependencies: + prepend-http "^1.0.1" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ== + dependencies: + prepend-http "^2.0.0" + url-set-query@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/url-set-query/-/url-set-query-1.0.0.tgz" integrity sha1-AW6M/Xwg7gXK/neV6JK9BwL6ozk= +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + integrity sha512-0kQLIzG4fdk/G5NONku64rSH/x32NOA39LVQqlK8Le6lvTF6GGRJpqaQFGgU+CLwySIqBSMdwYM0sYcW9f6P4A== + urlpattern-polyfill@^8.0.0: version "8.0.2" resolved "https://registry.npmjs.org/urlpattern-polyfill/-/urlpattern-polyfill-8.0.2.tgz" integrity sha512-Qp95D4TPJl1kC9SKigDcqgyM2VDVO4RiJc2d4qe5GrYm+zbIQCWWKAFaJNQ4BhdFeDGwBmAxqJBwWSJDb9T3BQ== -utf-8-validate@5.0.7: - version "5.0.7" - resolved "https://registry.yarnpkg.com/utf-8-validate/-/utf-8-validate-5.0.7.tgz#c15a19a6af1f7ad9ec7ddc425747ca28c3644922" - integrity sha512-vLt1O5Pp+flcArHGIyKEQq883nBt8nN8tVBcoL0qUXj2XT1n7p70yGIq2VK98I5FdZ1YHc0wk/koOnHjnXWk1Q== - dependencies: - node-gyp-build "^4.3.0" - utf-8-validate@^5.0.2: version "5.0.10" resolved "https://registry.yarnpkg.com/utf-8-validate/-/utf-8-validate-5.0.10.tgz#d7d10ea39318171ca982718b6b96a8d2442571a2" @@ -9172,7 +11521,20 @@ util-deprecate@^1.0.1, util-deprecate@~1.0.1: resolved "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz" integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== -util@^0.12.5: +util.promisify@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.1.2.tgz#02b3dbadbb80071eee4c43aed58747afdfc516db" + integrity sha512-PBdZ03m1kBnQ5cjjO0ZvJMJS+QsbyIcFwi4hY4U76OQsCO9JrOYjbCFgIF76ccFg9xnJo7ZHPkqyj1GqmdS7MA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + for-each "^0.3.3" + has-proto "^1.0.1" + has-symbols "^1.0.3" + object.getownpropertydescriptors "^2.1.6" + safe-array-concat "^1.0.0" + +util@^0.12.0, util@^0.12.5: version "0.12.5" resolved "https://registry.yarnpkg.com/util/-/util-0.12.5.tgz#5f17a6059b73db61a875668781a1c2b136bd6fbc" integrity sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA== @@ -9193,16 +11555,31 @@ uuid@2.0.1: resolved "https://registry.npmjs.org/uuid/-/uuid-2.0.1.tgz" integrity sha1-wqMN7bPlNdcsz4LjQ5QaULqFM6w= -uuid@8.3.2, uuid@^8.3.2: - version "8.3.2" - resolved "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz" - integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +uuid@3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.2.1.tgz#12c528bb9d58d0b9265d9a2f6f0fe8be17ff1f14" + integrity sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA== + +uuid@3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== + +uuid@8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.1.0.tgz#6f1536eb43249f473abc6bd58ff983da1ca30d8d" + integrity sha512-CI18flHDznR0lq54xBycOVmphdCYnQLKn8abKn7PXUiKUGdEd+/l9LWNJmugXel4hXq7S+RMNl34ecyC9TntWg== uuid@^3.1.0, uuid@^3.3.2: version "3.4.0" resolved "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz" integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== +uuid@^8.0.0, uuid@^8.3.2: + version "8.3.2" + resolved "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz" + integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== + uuid@^9.0.0: version "9.0.0" resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" @@ -9213,23 +11590,20 @@ v8-compile-cache-lib@^3.0.1: resolved "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz" integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== -validate-npm-package-license@^3.0.1: - version "3.0.4" - resolved "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz" - integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== - dependencies: - spdx-correct "^3.0.0" - spdx-expression-parse "^3.0.0" +valid-url@1.0.9: + version "1.0.9" + resolved "https://registry.yarnpkg.com/valid-url/-/valid-url-1.0.9.tgz#1c14479b40f1397a75782f115e4086447433a200" + integrity sha512-QQDsV8OnSf5Uc30CKSwG9lnhMPe6exHtTXLRYX8uMwKENy640pU+2BgBL0LRbDh/eYRahNCS7aewCx0wf3NYVA== value-or-promise@1.0.11: version "1.0.11" resolved "https://registry.yarnpkg.com/value-or-promise/-/value-or-promise-1.0.11.tgz#3e90299af31dd014fe843fe309cefa7c1d94b140" integrity sha512-41BrgH+dIbCFXClcSapVs5M6GkENd3gQOJpEfPDNa71LsUGMXDL0jMWpI/Rh7WhX+Aalfz2TTS3Zt5pUsbnhLg== -value-or-promise@^1.0.12: - version "1.0.12" - resolved "https://registry.yarnpkg.com/value-or-promise/-/value-or-promise-1.0.12.tgz#0e5abfeec70148c78460a849f6b003ea7986f15c" - integrity sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q== +value-or-promise@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/value-or-promise/-/value-or-promise-1.0.6.tgz#218aa4794aa2ee24dcf48a29aba4413ed584747f" + integrity sha512-9r0wQsWD8z/BxPOvnwbPf05ZvFngXyouE9EKB+5GbYix+BYnAwrIChCUyFIinfbf2FL/U71z+CPpbnmTdxrwBg== varint@^5.0.0: version "5.0.2" @@ -9281,6 +11655,35 @@ web3-bzz@1.10.0: got "12.1.0" swarm-js "^0.1.40" +web3-bzz@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.10.2.tgz#482dfddcc5f65d5877b37cc20775725220b4ad87" + integrity sha512-vLOfDCj6198Qc7esDrCKeFA/M3ZLbowsaHQ0hIL4NmIHoq7lU8aSRTa5AI+JBh8cKN1gVryJsuW2ZCc5bM4I4Q== + dependencies: + "@types/node" "^12.12.6" + got "12.1.0" + swarm-js "^0.1.40" + +web3-bzz@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.2.9.tgz#25f8a373bc2dd019f47bf80523546f98b93c8790" + integrity sha512-ogVQr9jHodu9HobARtvUSmWG22cv2EUQzlPeejGWZ7j5h20HX40EDuWyomGY5VclIj5DdLY76Tmq88RTf/6nxA== + dependencies: + "@types/node" "^10.12.18" + got "9.6.0" + swarm-js "^0.1.40" + underscore "1.9.1" + +web3-bzz@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.3.6.tgz#95f370aecc3ff6ad07f057e6c0c916ef09b04dde" + integrity sha512-ibHdx1wkseujFejrtY7ZyC0QxQ4ATXjzcNUpaLrvM6AEae8prUiyT/OloG9FWDgFD2CPLwzKwfSQezYQlANNlw== + dependencies: + "@types/node" "^12.12.6" + got "9.6.0" + swarm-js "^0.1.40" + underscore "1.12.1" + web3-core-helpers@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.10.0.tgz#1016534c51a5df77ed4f94d1fcce31de4af37fad" @@ -9289,6 +11692,32 @@ web3-core-helpers@1.10.0: web3-eth-iban "1.10.0" web3-utils "1.10.0" +web3-core-helpers@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.10.2.tgz#bd47686c0e74ef4475713c581f9306a035ce8a74" + integrity sha512-1JfaNtox6/ZYJHNoI+QVc2ObgwEPeGF+YdxHZQ7aF5605BmlwM1Bk3A8xv6mg64jIRvEq1xX6k9oG6x7p1WgXQ== + dependencies: + web3-eth-iban "1.10.2" + web3-utils "1.10.2" + +web3-core-helpers@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.2.9.tgz#6381077c3e01c127018cb9e9e3d1422697123315" + integrity sha512-t0WAG3orLCE3lqi77ZoSRNFok3VQWZXTniZigDQjyOJYMAX7BU3F3js8HKbjVnAxlX3tiKoDxI0KBk9F3AxYuw== + dependencies: + underscore "1.9.1" + web3-eth-iban "1.2.9" + web3-utils "1.2.9" + +web3-core-helpers@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.3.6.tgz#c478246a9abe4e5456acf42657dac2f7c330be74" + integrity sha512-nhtjA2ZbkppjlxTSwG0Ttu6FcPkVu1rCN5IFAOVpF/L0SEt+jy+O5l90+cjDq0jAYvlBwUwnbh2mR9hwDEJCNA== + dependencies: + underscore "1.12.1" + web3-eth-iban "1.3.6" + web3-utils "1.3.6" + web3-core-method@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.10.0.tgz#82668197fa086e8cc8066742e35a9d72535e3412" @@ -9300,6 +11729,41 @@ web3-core-method@1.10.0: web3-core-subscriptions "1.10.0" web3-utils "1.10.0" +web3-core-method@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.10.2.tgz#4adf3f8c8d0776f0f320e583b791955c41037971" + integrity sha512-gG6ES+LOuo01MJHML4gnEt702M8lcPGMYZoX8UjZzmEebGrPYOY9XccpCrsFgCeKgQzM12SVnlwwpMod1+lcLg== + dependencies: + "@ethersproject/transactions" "^5.6.2" + web3-core-helpers "1.10.2" + web3-core-promievent "1.10.2" + web3-core-subscriptions "1.10.2" + web3-utils "1.10.2" + +web3-core-method@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.2.9.tgz#3fb538751029bea570e4f86731e2fa5e4945e462" + integrity sha512-bjsIoqP3gs7A/gP8+QeLUCyOKJ8bopteCSNbCX36Pxk6TYfYWNuC6hP+2GzUuqdP3xaZNe+XEElQFUNpR3oyAg== + dependencies: + "@ethersproject/transactions" "^5.0.0-beta.135" + underscore "1.9.1" + web3-core-helpers "1.2.9" + web3-core-promievent "1.2.9" + web3-core-subscriptions "1.2.9" + web3-utils "1.2.9" + +web3-core-method@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.3.6.tgz#4b0334edd94b03dfec729d113c69a4eb6ebc68ae" + integrity sha512-RyegqVGxn0cyYW5yzAwkPlsSEynkdPiegd7RxgB4ak1eKk2Cv1q2x4C7D2sZjeeCEF+q6fOkVmo2OZNqS2iQxg== + dependencies: + "@ethersproject/transactions" "^5.0.0-beta.135" + underscore "1.12.1" + web3-core-helpers "1.3.6" + web3-core-promievent "1.3.6" + web3-core-subscriptions "1.3.6" + web3-utils "1.3.6" + web3-core-promievent@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.10.0.tgz#cbb5b3a76b888df45ed3a8d4d8d4f54ccb66a37b" @@ -9307,6 +11771,27 @@ web3-core-promievent@1.10.0: dependencies: eventemitter3 "4.0.4" +web3-core-promievent@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.10.2.tgz#13b380b69ee05c5bf075836be64c2f3b8bdc1a5f" + integrity sha512-Qkkb1dCDOU8dZeORkcwJBQRAX+mdsjx8LqFBB+P4W9QgwMqyJ6LXda+y1XgyeEVeKEmY1RCeTq9Y94q1v62Sfw== + dependencies: + eventemitter3 "4.0.4" + +web3-core-promievent@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.2.9.tgz#bb1c56aa6fac2f4b3c598510f06554d25c11c553" + integrity sha512-0eAUA2zjgXTleSrnc1wdoKQPPIHU6KHf4fAscu4W9kKrR+mqP1KsjYrxY9wUyjNnXxfQ+5M29ipvbiaK8OqdOw== + dependencies: + eventemitter3 "3.1.2" + +web3-core-promievent@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.3.6.tgz#6c27dc79de8f71b74f5d17acaf9aaf593d3cb0c9" + integrity sha512-Z+QzfyYDTXD5wJmZO5wwnRO8bAAHEItT1XNSPVb4J1CToV/I/SbF7CuF8Uzh2jns0Cm1109o666H7StFFvzVKw== + dependencies: + eventemitter3 "4.0.4" + web3-core-requestmanager@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.10.0.tgz#4b34f6e05837e67c70ff6f6993652afc0d54c340" @@ -9318,6 +11803,40 @@ web3-core-requestmanager@1.10.0: web3-providers-ipc "1.10.0" web3-providers-ws "1.10.0" +web3-core-requestmanager@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.10.2.tgz#f5b1264c6470c033f08e21210b0af0c23497c68a" + integrity sha512-nlLeNJUu6fR+ZbJr2k9Du/nN3VWwB4AJPY4r6nxUODAmykgJq57T21cLP/BEk6mbiFQYGE9TrrPhh4qWxQEtAw== + dependencies: + util "^0.12.5" + web3-core-helpers "1.10.2" + web3-providers-http "1.10.2" + web3-providers-ipc "1.10.2" + web3-providers-ws "1.10.2" + +web3-core-requestmanager@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.2.9.tgz#dd6d855256c4dd681434fe0867f8cd742fe10503" + integrity sha512-1PwKV2m46ALUnIN5VPPgjOj8yMLJhhqZYvYJE34hTN5SErOkwhzx5zScvo5MN7v7KyQGFnpVCZKKGCiEnDmtFA== + dependencies: + underscore "1.9.1" + web3-core-helpers "1.2.9" + web3-providers-http "1.2.9" + web3-providers-ipc "1.2.9" + web3-providers-ws "1.2.9" + +web3-core-requestmanager@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.3.6.tgz#4fea269fe913fd4fca464b4f7c65cb94857b5b2a" + integrity sha512-2rIaeuqeo7QN1Eex7aXP0ZqeteJEPWXYFS/M3r3LXMiV8R4STQBKE+//dnHJXoo2ctzEB5cgd+7NaJM8S3gPyA== + dependencies: + underscore "1.12.1" + util "^0.12.0" + web3-core-helpers "1.3.6" + web3-providers-http "1.3.6" + web3-providers-ipc "1.3.6" + web3-providers-ws "1.3.6" + web3-core-subscriptions@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.10.0.tgz#b534592ee1611788fc0cb0b95963b9b9b6eacb7c" @@ -9326,6 +11845,32 @@ web3-core-subscriptions@1.10.0: eventemitter3 "4.0.4" web3-core-helpers "1.10.0" +web3-core-subscriptions@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.10.2.tgz#d325483141ab1406241d6707b86fd6944e4b7ea6" + integrity sha512-MiWcKjz4tco793EPPPLc/YOJmYUV3zAfxeQH/UVTfBejMfnNvmfwKa2SBKfPIvKQHz/xI5bV2TF15uvJEucU7w== + dependencies: + eventemitter3 "4.0.4" + web3-core-helpers "1.10.2" + +web3-core-subscriptions@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.2.9.tgz#335fd7d15dfce5d78b4b7bef05ce4b3d7237b0e4" + integrity sha512-Y48TvXPSPxEM33OmXjGVDMzTd0j8X0t2+sDw66haeBS8eYnrEzasWuBZZXDq0zNUsqyxItgBGDn+cszkgEnFqg== + dependencies: + eventemitter3 "3.1.2" + underscore "1.9.1" + web3-core-helpers "1.2.9" + +web3-core-subscriptions@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.3.6.tgz#ee24e7974d1d72ff6c992c599deba4ef9b308415" + integrity sha512-wi9Z9X5X75OKvxAg42GGIf81ttbNR2TxzkAsp1g+nnp5K8mBwgZvXrIsDuj7Z7gx72Y45mWJADCWjk/2vqNu8g== + dependencies: + eventemitter3 "4.0.4" + underscore "1.12.1" + web3-core-helpers "1.3.6" + web3-core@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.10.0.tgz#9aa07c5deb478cf356c5d3b5b35afafa5fa8e633" @@ -9339,6 +11884,45 @@ web3-core@1.10.0: web3-core-requestmanager "1.10.0" web3-utils "1.10.0" +web3-core@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.10.2.tgz#464a15335b3adecc4a1cdd53c89b995769059f03" + integrity sha512-qTn2UmtE8tvwMRsC5pXVdHxrQ4uZ6jiLgF5DRUVtdi7dPUmX18Dp9uxKfIfhGcA011EAn8P6+X7r3pvi2YRxBw== + dependencies: + "@types/bn.js" "^5.1.1" + "@types/node" "^12.12.6" + bignumber.js "^9.0.0" + web3-core-helpers "1.10.2" + web3-core-method "1.10.2" + web3-core-requestmanager "1.10.2" + web3-utils "1.10.2" + +web3-core@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.2.9.tgz#2cba57aa259b6409db532d21bdf57db8d504fd3e" + integrity sha512-fSYv21IP658Ty2wAuU9iqmW7V+75DOYMVZsDH/c14jcF/1VXnedOcxzxSj3vArsCvXZNe6XC5/wAuGZyQwR9RA== + dependencies: + "@types/bn.js" "^4.11.4" + "@types/node" "^12.6.1" + bignumber.js "^9.0.0" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-requestmanager "1.2.9" + web3-utils "1.2.9" + +web3-core@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.3.6.tgz#a6a761d1ff2f3ee462b8dab679229d2f8e267504" + integrity sha512-gkLDM4T1Sc0T+HZIwxrNrwPg0IfWI0oABSglP2X5ZbBAYVUeEATA0o92LWV8BeF+okvKXLK1Fek/p6axwM/h3Q== + dependencies: + "@types/bn.js" "^4.11.5" + "@types/node" "^12.12.6" + bignumber.js "^9.0.0" + web3-core-helpers "1.3.6" + web3-core-method "1.3.6" + web3-core-requestmanager "1.3.6" + web3-utils "1.3.6" + web3-eth-abi@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.10.0.tgz#53a7a2c95a571e205e27fd9e664df4919483cce1" @@ -9347,6 +11931,32 @@ web3-eth-abi@1.10.0: "@ethersproject/abi" "^5.6.3" web3-utils "1.10.0" +web3-eth-abi@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.10.2.tgz#65db4af1acb0b72cb9d10cd6f045a8bcdb270b1b" + integrity sha512-pY4fQUio7W7ZRSLf+vsYkaxJqaT/jHcALZjIxy+uBQaYAJ3t6zpQqMZkJB3Dw7HUODRJ1yI0NPEFGTnkYf/17A== + dependencies: + "@ethersproject/abi" "^5.6.3" + web3-utils "1.10.2" + +web3-eth-abi@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.2.9.tgz#14bedd7e4be04fcca35b2ac84af1400574cd8280" + integrity sha512-3YwUYbh/DMfDbhMWEebAdjSd5bj3ZQieOjLzWFHU23CaLEqT34sUix1lba+hgUH/EN6A7bKAuKOhR3p0OvTn7Q== + dependencies: + "@ethersproject/abi" "5.0.0-beta.153" + underscore "1.9.1" + web3-utils "1.2.9" + +web3-eth-abi@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.3.6.tgz#4272ca48d817aa651bbf97b269f5ff10abc2b8a9" + integrity sha512-Or5cRnZu6WzgScpmbkvC6bfNxR26hqiKK4i8sMPFeTUABQcb/FU3pBj7huBLYbp9dH+P5W79D2MqwbWwjj9DoQ== + dependencies: + "@ethersproject/abi" "5.0.7" + underscore "1.12.1" + web3-utils "1.3.6" + web3-eth-abi@1.7.0: version "1.7.0" resolved "https://registry.npmjs.org/web3-eth-abi/-/web3-eth-abi-1.7.0.tgz" @@ -9360,16 +11970,66 @@ web3-eth-accounts@1.10.0: resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.10.0.tgz#2942beca0a4291455f32cf09de10457a19a48117" integrity sha512-wiq39Uc3mOI8rw24wE2n15hboLE0E9BsQLdlmsL4Zua9diDS6B5abXG0XhFcoNsXIGMWXVZz4TOq3u4EdpXF/Q== dependencies: - "@ethereumjs/common" "2.5.0" - "@ethereumjs/tx" "3.3.2" + "@ethereumjs/common" "2.5.0" + "@ethereumjs/tx" "3.3.2" + eth-lib "0.2.8" + ethereumjs-util "^7.1.5" + scrypt-js "^3.0.1" + uuid "^9.0.0" + web3-core "1.10.0" + web3-core-helpers "1.10.0" + web3-core-method "1.10.0" + web3-utils "1.10.0" + +web3-eth-accounts@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.10.2.tgz#5ce9e4de0f84a88e72801810b98cc25164956404" + integrity sha512-6/HhCBYAXN/f553/SyxS9gY62NbLgpD1zJpENcvRTDpJN3Znvli1cmpl5Q3ZIUJkvHnG//48EWfWh0cbb3fbKQ== + dependencies: + "@ethereumjs/common" "2.5.0" + "@ethereumjs/tx" "3.3.2" + "@ethereumjs/util" "^8.1.0" + eth-lib "0.2.8" + scrypt-js "^3.0.1" + uuid "^9.0.0" + web3-core "1.10.2" + web3-core-helpers "1.10.2" + web3-core-method "1.10.2" + web3-utils "1.10.2" + +web3-eth-accounts@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.2.9.tgz#7ec422df90fecb5243603ea49dc28726db7bdab6" + integrity sha512-jkbDCZoA1qv53mFcRHCinoCsgg8WH+M0YUO1awxmqWXRmCRws1wW0TsuSQ14UThih5Dxolgl+e+aGWxG58LMwg== + dependencies: + crypto-browserify "3.12.0" + eth-lib "^0.2.8" + ethereumjs-common "^1.3.2" + ethereumjs-tx "^2.1.1" + scrypt-js "^3.0.1" + underscore "1.9.1" + uuid "3.3.2" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-utils "1.2.9" + +web3-eth-accounts@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.3.6.tgz#f9fcb50b28ee58090ab292a10d996155caa2b474" + integrity sha512-Ilr0hG6ONbCdSlVKffasCmNwftD5HsNpwyQASevocIQwHdTlvlwO0tb3oGYuajbKOaDzNTwXfz25bttAEoFCGA== + dependencies: + crypto-browserify "3.12.0" eth-lib "0.2.8" - ethereumjs-util "^7.1.5" + ethereumjs-common "^1.3.2" + ethereumjs-tx "^2.1.1" scrypt-js "^3.0.1" - uuid "^9.0.0" - web3-core "1.10.0" - web3-core-helpers "1.10.0" - web3-core-method "1.10.0" - web3-utils "1.10.0" + underscore "1.12.1" + uuid "3.3.2" + web3-core "1.3.6" + web3-core-helpers "1.3.6" + web3-core-method "1.3.6" + web3-utils "1.3.6" web3-eth-contract@1.10.0: version "1.10.0" @@ -9385,6 +12045,50 @@ web3-eth-contract@1.10.0: web3-eth-abi "1.10.0" web3-utils "1.10.0" +web3-eth-contract@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-contract/-/web3-eth-contract-1.10.2.tgz#9114c52ba5ca5859f3403abea69a13f8678828ad" + integrity sha512-CZLKPQRmupP/+OZ5A/CBwWWkBiz5B/foOpARz0upMh1yjb0dEud4YzRW2gJaeNu0eGxDLsWVaXhUimJVGYprQw== + dependencies: + "@types/bn.js" "^5.1.1" + web3-core "1.10.2" + web3-core-helpers "1.10.2" + web3-core-method "1.10.2" + web3-core-promievent "1.10.2" + web3-core-subscriptions "1.10.2" + web3-eth-abi "1.10.2" + web3-utils "1.10.2" + +web3-eth-contract@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-contract/-/web3-eth-contract-1.2.9.tgz#713d9c6d502d8c8f22b696b7ffd8e254444e6bfd" + integrity sha512-PYMvJf7EG/HyssUZa+pXrc8IB06K/YFfWYyW4R7ed3sab+9wWUys1TlWxBCBuiBXOokSAyM6H6P6/cKEx8FT8Q== + dependencies: + "@types/bn.js" "^4.11.4" + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-promievent "1.2.9" + web3-core-subscriptions "1.2.9" + web3-eth-abi "1.2.9" + web3-utils "1.2.9" + +web3-eth-contract@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-contract/-/web3-eth-contract-1.3.6.tgz#cccf4d32dc56917fb6923e778498a9ba2a5ba866" + integrity sha512-8gDaRrLF2HCg+YEZN1ov0zN35vmtPnGf3h1DxmJQK5Wm2lRMLomz9rsWsuvig3UJMHqZAQKD7tOl3ocJocQsmA== + dependencies: + "@types/bn.js" "^4.11.5" + underscore "1.12.1" + web3-core "1.3.6" + web3-core-helpers "1.3.6" + web3-core-method "1.3.6" + web3-core-promievent "1.3.6" + web3-core-subscriptions "1.3.6" + web3-eth-abi "1.3.6" + web3-utils "1.3.6" + web3-eth-ens@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.10.0.tgz#96a676524e0b580c87913f557a13ed810cf91cd9" @@ -9399,6 +12103,50 @@ web3-eth-ens@1.10.0: web3-eth-contract "1.10.0" web3-utils "1.10.0" +web3-eth-ens@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.10.2.tgz#5708e1830ab261b139882cc43662afb3a733112e" + integrity sha512-kTQ42UdNHy4BQJHgWe97bHNMkc3zCMBKKY7t636XOMxdI/lkRdIjdE5nQzt97VjQvSVasgIWYKRAtd8aRaiZiQ== + dependencies: + content-hash "^2.5.2" + eth-ens-namehash "2.0.8" + web3-core "1.10.2" + web3-core-helpers "1.10.2" + web3-core-promievent "1.10.2" + web3-eth-abi "1.10.2" + web3-eth-contract "1.10.2" + web3-utils "1.10.2" + +web3-eth-ens@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.2.9.tgz#577b9358c036337833fb2bdc59c11be7f6f731b6" + integrity sha512-kG4+ZRgZ8I1WYyOBGI8QVRHfUSbbJjvJAGA1AF/NOW7JXQ+x7gBGeJw6taDWJhSshMoEKWcsgvsiuoG4870YxQ== + dependencies: + content-hash "^2.5.2" + eth-ens-namehash "2.0.8" + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-promievent "1.2.9" + web3-eth-abi "1.2.9" + web3-eth-contract "1.2.9" + web3-utils "1.2.9" + +web3-eth-ens@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.3.6.tgz#0d28c5d4ea7b4462ef6c077545a77956a6cdf175" + integrity sha512-n27HNj7lpSkRxTgSx+Zo7cmKAgyg2ElFilaFlUu/X2CNH23lXfcPm2bWssivH9z0ndhg0OyR4AYFZqPaqDHkJA== + dependencies: + content-hash "^2.5.2" + eth-ens-namehash "2.0.8" + underscore "1.12.1" + web3-core "1.3.6" + web3-core-helpers "1.3.6" + web3-core-promievent "1.3.6" + web3-eth-abi "1.3.6" + web3-eth-contract "1.3.6" + web3-utils "1.3.6" + web3-eth-iban@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.10.0.tgz#5a46646401965b0f09a4f58e7248c8a8cd22538a" @@ -9407,6 +12155,30 @@ web3-eth-iban@1.10.0: bn.js "^5.2.1" web3-utils "1.10.0" +web3-eth-iban@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.10.2.tgz#f8e668034834c5be038adeb14c39b923e9257558" + integrity sha512-y8+Ii2XXdyHQMFNL2NWpBnXe+TVJ4ryvPlzNhObRRnIo4O4nLIXS010olLDMayozDzoUlmzCmBZJYc9Eev1g7A== + dependencies: + bn.js "^5.2.1" + web3-utils "1.10.2" + +web3-eth-iban@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.2.9.tgz#4ebf3d8783f34d04c4740dc18938556466399f7a" + integrity sha512-RtdVvJE0pyg9dHLy0GzDiqgnLnssSzfz/JYguhC1wsj9+Gnq1M6Diy3NixACWUAp6ty/zafyOaZnNQ+JuH9TjQ== + dependencies: + bn.js "4.11.8" + web3-utils "1.2.9" + +web3-eth-iban@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.3.6.tgz#0d6ba21fe78f190af8919e9cd5453882457209e0" + integrity sha512-nfMQaaLA/zsg5W4Oy/EJQbs8rSs1vBAX6b/35xzjYoutXlpHMQadujDx2RerTKhSHqFXSJeQAfE+2f6mdhYkRQ== + dependencies: + bn.js "^4.11.9" + web3-utils "1.3.6" + web3-eth-personal@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.10.0.tgz#94d525f7a29050a0c2a12032df150ac5ea633071" @@ -9419,6 +12191,42 @@ web3-eth-personal@1.10.0: web3-net "1.10.0" web3-utils "1.10.0" +web3-eth-personal@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.10.2.tgz#a281cc1cecb2f3243ac0467c075a1579fa562901" + integrity sha512-+vEbJsPUJc5J683y0c2aN645vXC+gPVlFVCQu4IjPvXzJrAtUfz26+IZ6AUOth4fDJPT0f1uSLS5W2yrUdw9BQ== + dependencies: + "@types/node" "^12.12.6" + web3-core "1.10.2" + web3-core-helpers "1.10.2" + web3-core-method "1.10.2" + web3-net "1.10.2" + web3-utils "1.10.2" + +web3-eth-personal@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.2.9.tgz#9b95eb159b950b83cd8ae15873e1d57711b7a368" + integrity sha512-cFiNrktxZ1C/rIdJFzQTvFn3/0zcsR3a+Jf8Y3KxeQDHszQtosjLWptP7bsUmDwEh4hzh0Cy3KpOxlYBWB8bJQ== + dependencies: + "@types/node" "^12.6.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-net "1.2.9" + web3-utils "1.2.9" + +web3-eth-personal@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.3.6.tgz#226137916754c498f0284f22c55924c87a2efcf0" + integrity sha512-pOHU0+/h1RFRYoh1ehYBehRbcKWP4OSzd4F7mDljhHngv6W8ewMHrAN8O1ol9uysN2MuCdRE19qkRg5eNgvzFQ== + dependencies: + "@types/node" "^12.12.6" + web3-core "1.3.6" + web3-core-helpers "1.3.6" + web3-core-method "1.3.6" + web3-net "1.3.6" + web3-utils "1.3.6" + web3-eth@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.10.0.tgz#38b905e2759697c9624ab080cfcf4e6c60b3a6cf" @@ -9437,6 +12245,62 @@ web3-eth@1.10.0: web3-net "1.10.0" web3-utils "1.10.0" +web3-eth@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.10.2.tgz#46baa0d8a1203b425f77ac2cf823fbb73666fcb9" + integrity sha512-s38rhrntyhGShmXC4R/aQtfkpcmev9c7iZwgb9CDIBFo7K8nrEJvqIOyajeZTxnDIiGzTJmrHxiKSadii5qTRg== + dependencies: + web3-core "1.10.2" + web3-core-helpers "1.10.2" + web3-core-method "1.10.2" + web3-core-subscriptions "1.10.2" + web3-eth-abi "1.10.2" + web3-eth-accounts "1.10.2" + web3-eth-contract "1.10.2" + web3-eth-ens "1.10.2" + web3-eth-iban "1.10.2" + web3-eth-personal "1.10.2" + web3-net "1.10.2" + web3-utils "1.10.2" + +web3-eth@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.2.9.tgz#e40e7b88baffc9b487193211c8b424dc944977b3" + integrity sha512-sIKO4iE9FEBa/CYUd6GdPd7GXt/wISqxUd8PlIld6+hvMJj02lgO7Z7p5T9mZIJcIZJGvZX81ogx8oJ9yif+Ag== + dependencies: + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-subscriptions "1.2.9" + web3-eth-abi "1.2.9" + web3-eth-accounts "1.2.9" + web3-eth-contract "1.2.9" + web3-eth-ens "1.2.9" + web3-eth-iban "1.2.9" + web3-eth-personal "1.2.9" + web3-net "1.2.9" + web3-utils "1.2.9" + +web3-eth@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.3.6.tgz#2c650893d540a7a0eb1365dd5b2dca24ac919b7c" + integrity sha512-9+rnywRRpyX3C4hfsAQXPQh6vHh9XzQkgLxo3gyeXfbhbShUoq2gFVuy42vsRs//6JlsKdyZS7Z3hHPHz2wreA== + dependencies: + underscore "1.12.1" + web3-core "1.3.6" + web3-core-helpers "1.3.6" + web3-core-method "1.3.6" + web3-core-subscriptions "1.3.6" + web3-eth-abi "1.3.6" + web3-eth-accounts "1.3.6" + web3-eth-contract "1.3.6" + web3-eth-ens "1.3.6" + web3-eth-iban "1.3.6" + web3-eth-personal "1.3.6" + web3-net "1.3.6" + web3-utils "1.3.6" + web3-net@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.10.0.tgz#be53e7f5dafd55e7c9013d49c505448b92c9c97b" @@ -9446,6 +12310,33 @@ web3-net@1.10.0: web3-core-method "1.10.0" web3-utils "1.10.0" +web3-net@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.10.2.tgz#77f39dea930619035d3bf99969941870f2f0c550" + integrity sha512-w9i1t2z7dItagfskhaCKwpp6W3ylUR88gs68u820y5f8yfK5EbPmHc6c2lD8X9ZrTnmDoeOpIRCN/RFPtZCp+g== + dependencies: + web3-core "1.10.2" + web3-core-method "1.10.2" + web3-utils "1.10.2" + +web3-net@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.2.9.tgz#51d248ed1bc5c37713c4ac40c0073d9beacd87d3" + integrity sha512-d2mTn8jPlg+SI2hTj2b32Qan6DmtU9ap/IUlJTeQbZQSkTLf0u9suW8Vjwyr4poJYXTurdSshE7OZsPNn30/ZA== + dependencies: + web3-core "1.2.9" + web3-core-method "1.2.9" + web3-utils "1.2.9" + +web3-net@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.3.6.tgz#a56492e2227475e38db29394f8bac305a2446e41" + integrity sha512-KhzU3wMQY/YYjyMiQzbaLPt2kut88Ncx2iqjy3nw28vRux3gVX0WOCk9EL/KVJBiAA/fK7VklTXvgy9dZnnipw== + dependencies: + web3-core "1.3.6" + web3-core-method "1.3.6" + web3-utils "1.3.6" + web3-providers-http@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.10.0.tgz#864fa48675e7918c9a4374e5f664b32c09d0151b" @@ -9456,6 +12347,32 @@ web3-providers-http@1.10.0: es6-promise "^4.2.8" web3-core-helpers "1.10.0" +web3-providers-http@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.10.2.tgz#8bd54b5bc5bcc50612fd52af65bd773f926045f7" + integrity sha512-G8abKtpkyKGpRVKvfjIF3I4O/epHP7mxXWN8mNMQLkQj1cjMFiZBZ13f+qI77lNJN7QOf6+LtNdKrhsTGU72TA== + dependencies: + abortcontroller-polyfill "^1.7.5" + cross-fetch "^4.0.0" + es6-promise "^4.2.8" + web3-core-helpers "1.10.2" + +web3-providers-http@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.2.9.tgz#e698aa5377e2019c24c5a1e6efa0f51018728934" + integrity sha512-F956tCIj60Ttr0UvEHWFIhx+be3He8msoPzyA44/kfzzYoMAsCFRn5cf0zQG6al0znE75g6HlWVSN6s3yAh51A== + dependencies: + web3-core-helpers "1.2.9" + xhr2-cookies "1.1.0" + +web3-providers-http@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.3.6.tgz#36e8724a7424d52827819d53fd75dbf31f5422c2" + integrity sha512-OQkT32O1A06dISIdazpGLveZcOXhEo5cEX6QyiSQkiPk/cjzDrXMw4SKZOGQbbS1+0Vjizm1Hrp7O8Vp2D1M5Q== + dependencies: + web3-core-helpers "1.3.6" + xhr2-cookies "1.1.0" + web3-providers-ipc@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.10.0.tgz#9747c7a6aee96a51488e32fa7c636c3460b39889" @@ -9464,6 +12381,32 @@ web3-providers-ipc@1.10.0: oboe "2.1.5" web3-core-helpers "1.10.0" +web3-providers-ipc@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.10.2.tgz#4314a04c1d68f5d1cb2d047d027db97c85f921f7" + integrity sha512-lWbn6c+SgvhLymU8u4Ea/WOVC0Gqs7OJUvauejWz+iLycxeF0xFNyXnHVAi42ZJDPVI3vnfZotafoxcNNL7Sug== + dependencies: + oboe "2.1.5" + web3-core-helpers "1.10.2" + +web3-providers-ipc@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.2.9.tgz#6159eacfcd7ac31edc470d93ef10814fe874763b" + integrity sha512-NQ8QnBleoHA2qTJlqoWu7EJAD/FR5uimf7Ielzk4Z2z+m+6UAuJdJMSuQNj+Umhz9L/Ys6vpS1vHx9NizFl+aQ== + dependencies: + oboe "2.1.4" + underscore "1.9.1" + web3-core-helpers "1.2.9" + +web3-providers-ipc@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.3.6.tgz#cef8d12c1ebb47adce5ebf597f553c623362cb4a" + integrity sha512-+TVsSd2sSVvVgHG4s6FXwwYPPT91boKKcRuEFXqEfAbUC5t52XOgmyc2LNiD9LzPhed65FbV4LqICpeYGUvSwA== + dependencies: + oboe "2.1.5" + underscore "1.12.1" + web3-core-helpers "1.3.6" + web3-providers-ws@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.10.0.tgz#cb0b87b94c4df965cdf486af3a8cd26daf3975e5" @@ -9473,6 +12416,35 @@ web3-providers-ws@1.10.0: web3-core-helpers "1.10.0" websocket "^1.0.32" +web3-providers-ws@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.10.2.tgz#00bf6e00080dd82b8ad7fbed657a6d20ecc532de" + integrity sha512-3nYSiP6grI5GvpkSoehctSywfCTodU21VY8bUtXyFHK/IVfDooNtMpd5lVIMvXVAlaxwwrCfjebokaJtKH2Iag== + dependencies: + eventemitter3 "4.0.4" + web3-core-helpers "1.10.2" + websocket "^1.0.32" + +web3-providers-ws@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.2.9.tgz#22c2006655ec44b4ad2b41acae62741a6ae7a88c" + integrity sha512-6+UpvINeI//dglZoAKStUXqxDOXJy6Iitv2z3dbgInG4zb8tkYl/VBDL80UjUg3ZvzWG0g7EKY2nRPEpON2TFA== + dependencies: + eventemitter3 "^4.0.0" + underscore "1.9.1" + web3-core-helpers "1.2.9" + websocket "^1.0.31" + +web3-providers-ws@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.3.6.tgz#e1df617bc89d66165abdf2191da0014c505bfaac" + integrity sha512-bk7MnJf5or0Re2zKyhR3L3CjGululLCHXx4vlbc/drnaTARUVvi559OI5uLytc/1k5HKUUyENAxLvetz2G1dnQ== + dependencies: + eventemitter3 "4.0.4" + underscore "1.12.1" + web3-core-helpers "1.3.6" + websocket "^1.0.32" + web3-shh@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.10.0.tgz#c2979b87e0f67a7fef2ce9ee853bd7bfbe9b79a8" @@ -9483,7 +12455,37 @@ web3-shh@1.10.0: web3-core-subscriptions "1.10.0" web3-net "1.10.0" -web3-utils@1.10.0, web3-utils@^1.0.0-beta.31: +web3-shh@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.10.2.tgz#2a41e1a308de5320d1f17080765206b727aa669e" + integrity sha512-UP0Kc3pHv9uULFu0+LOVfPwKBSJ6B+sJ5KflF7NyBk6TvNRxlpF3hUhuaVDCjjB/dDUR6T0EQeg25FA2uzJbag== + dependencies: + web3-core "1.10.2" + web3-core-method "1.10.2" + web3-core-subscriptions "1.10.2" + web3-net "1.10.2" + +web3-shh@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.2.9.tgz#c4ba70d6142cfd61341a50752d8cace9a0370911" + integrity sha512-PWa8b/EaxaMinFaxy6cV0i0EOi2M7a/ST+9k9nhyhCjVa2vzXuNoBNo2IUOmeZ0WP2UQB8ByJ2+p4htlJaDOjA== + dependencies: + web3-core "1.2.9" + web3-core-method "1.2.9" + web3-core-subscriptions "1.2.9" + web3-net "1.2.9" + +web3-shh@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.3.6.tgz#4e3486c7eca5cbdb87f88910948223a5b7ea6c20" + integrity sha512-9zRo415O0iBslxBnmu9OzYjNErzLnzOsy+IOvSpIreLYbbAw0XkDWxv3SfcpKnTIWIACBR4AYMIxmmyi5iB3jw== + dependencies: + web3-core "1.3.6" + web3-core-method "1.3.6" + web3-core-subscriptions "1.3.6" + web3-net "1.3.6" + +web3-utils@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.10.0.tgz#ca4c1b431a765c14ac7f773e92e0fd9377ccf578" integrity sha512-kSaCM0uMcZTNUSmn5vMEhlo02RObGNRRCkdX0V9UTAU0+lrvn0HSaudyCo6CQzuXUsnuY2ERJGCGPfeWmv19Rg== @@ -9496,6 +12498,61 @@ web3-utils@1.10.0, web3-utils@^1.0.0-beta.31: randombytes "^2.1.0" utf8 "3.0.0" +web3-utils@1.10.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.10.2.tgz#361103d28a94d5e2a87ba15d776a62c33303eb44" + integrity sha512-TdApdzdse5YR+5GCX/b/vQnhhbj1KSAtfrDtRW7YS0kcWp1gkJsN62gw6GzCaNTeXookB7UrLtmDUuMv65qgow== + dependencies: + "@ethereumjs/util" "^8.1.0" + bn.js "^5.2.1" + ethereum-bloom-filters "^1.0.6" + ethereum-cryptography "^2.1.2" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + utf8 "3.0.0" + +web3-utils@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.2.9.tgz#abe11735221627da943971ef1a630868fb9c61f3" + integrity sha512-9hcpuis3n/LxFzEVjwnVgvJzTirS2S9/MiNAa7l4WOEoywY+BSNwnRX4MuHnjkh9NY25B6QOjuNG6FNnSjTw1w== + dependencies: + bn.js "4.11.8" + eth-lib "0.2.7" + ethereum-bloom-filters "^1.0.6" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + underscore "1.9.1" + utf8 "3.0.0" + +web3-utils@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.3.6.tgz#390bc9fa3a7179746963cfaca55bb80ac4d8dc10" + integrity sha512-hHatFaQpkQgjGVER17gNx8u1qMyaXFZtM0y0XLGH1bzsjMPlkMPLRcYOrZ00rOPfTEuYFOdrpGOqZXVmGrMZRg== + dependencies: + bn.js "^4.11.9" + eth-lib "0.2.8" + ethereum-bloom-filters "^1.0.6" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + underscore "1.12.1" + utf8 "3.0.0" + +web3-utils@1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.5.3.tgz#e914c9320cd663b2a09a5cb920ede574043eb437" + integrity sha512-56nRgA+Ad9SEyCv39g36rTcr5fpsd4L9LgV3FK0aB66nAMazLAA6Qz4lH5XrUKPDyBIPGJIR+kJsyRtwcu2q1Q== + dependencies: + bn.js "^4.11.9" + eth-lib "0.2.8" + ethereum-bloom-filters "^1.0.6" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + utf8 "3.0.0" + web3-utils@1.7.0: version "1.7.0" resolved "https://registry.npmjs.org/web3-utils/-/web3-utils-1.7.0.tgz" @@ -9522,6 +12579,45 @@ web3@1.10.0: web3-shh "1.10.0" web3-utils "1.10.0" +web3@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3/-/web3-1.2.9.tgz#cbcf1c0fba5e213a6dfb1f2c1f4b37062e4ce337" + integrity sha512-Mo5aBRm0JrcNpN/g4VOrDzudymfOnHRC3s2VarhYxRA8aWgF5rnhQ0ziySaugpic1gksbXPe105pUWyRqw8HUA== + dependencies: + web3-bzz "1.2.9" + web3-core "1.2.9" + web3-eth "1.2.9" + web3-eth-personal "1.2.9" + web3-net "1.2.9" + web3-shh "1.2.9" + web3-utils "1.2.9" + +web3@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/web3/-/web3-1.3.6.tgz#599425461c3f9a8cbbefa70616438995f4a064cc" + integrity sha512-jEpPhnL6GDteifdVh7ulzlPrtVQeA30V9vnki9liYlUvLV82ZM7BNOQJiuzlDePuE+jZETZSP/0G/JlUVt6pOA== + dependencies: + web3-bzz "1.3.6" + web3-core "1.3.6" + web3-eth "1.3.6" + web3-eth-personal "1.3.6" + web3-net "1.3.6" + web3-shh "1.3.6" + web3-utils "1.3.6" + +web3@^1.0.0-beta.34: + version "1.10.2" + resolved "https://registry.yarnpkg.com/web3/-/web3-1.10.2.tgz#5b7e165b396fb0bea501cef4d5ce754aebad5b73" + integrity sha512-DAtZ3a3ruPziE80uZ3Ob0YDZxt6Vk2un/F5BcBrxO70owJ9Z1Y2+loZmbh1MoAmoLGjA/SUSHeUtid3fYmBaog== + dependencies: + web3-bzz "1.10.2" + web3-core "1.10.2" + web3-eth "1.10.2" + web3-eth-personal "1.10.2" + web3-net "1.10.2" + web3-shh "1.10.2" + web3-utils "1.10.2" + webcrypto-core@^1.7.7: version "1.7.7" resolved "https://registry.npmjs.org/webcrypto-core/-/webcrypto-core-1.7.7.tgz" @@ -9538,6 +12634,18 @@ webidl-conversions@^3.0.0: resolved "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz" integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE= +websocket@^1.0.31: + version "1.0.34" + resolved "https://registry.yarnpkg.com/websocket/-/websocket-1.0.34.tgz#2bdc2602c08bf2c82253b730655c0ef7dcab3111" + integrity sha512-PRDso2sGwF6kM75QykIesBijKSVceR6jL2G8NGYyq2XrItNC2P5/qL5XeR056GhA+Ly7JMFvJb9I312mJfmqnQ== + dependencies: + bufferutil "^4.0.1" + debug "^2.2.0" + es5-ext "^0.10.50" + typedarray-to-buffer "^3.1.5" + utf-8-validate "^5.0.2" + yaeti "^0.0.6" + websocket@^1.0.32: version "1.0.33" resolved "https://registry.npmjs.org/websocket/-/websocket-1.0.33.tgz" @@ -9550,21 +12658,27 @@ websocket@^1.0.32: utf-8-validate "^5.0.2" yaeti "^0.0.6" +websql@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/websql/-/websql-1.0.0.tgz#1bd00b27392893134715d5dd6941fd89e730bab5" + integrity sha512-7iZ+u28Ljw5hCnMiq0BCOeSYf0vCFQe/ORY0HgscTiKjQed8WqugpBUggJ2NTnB9fahn1kEnPRX2jf8Px5PhJw== + dependencies: + argsarray "^0.0.1" + immediate "^3.2.2" + noop-fn "^1.0.0" + sqlite3 "^4.0.0" + tiny-queue "^0.2.1" + whatwg-fetch@2.0.3: version "2.0.3" resolved "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-2.0.3.tgz" integrity sha1-nITsLc9oGH/wC8ZOEnS0QhduHIQ= -whatwg-fetch@^2.0.4: +whatwg-fetch@2.0.4: version "2.0.4" - resolved "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz#dde6a5df315f9d39991aa17621853d720b85566f" integrity sha512-dcQ1GWpOD/eEQ97k66aiEVpNnapVj90/+R+SXTPYGHpYBBypfKJEQjLrvMZ7YXbKm21gXd4NcuxUTjiv1YtLng== -whatwg-mimetype@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz#5fa1a7623867ff1af6ca3dc72ad6b8a4208beba7" - integrity sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q== - whatwg-url@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz" @@ -9573,10 +12687,32 @@ whatwg-url@^5.0.0: tr46 "~0.0.3" webidl-conversions "^3.0.0" -which-module@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f" - integrity sha512-F6+WgncZi/mJDrammbTuHe1q0R5hOXv/mBaiNA2TCNT/LTHusX0V+CJnj9XT8ki5ln2UZyyddDgHfCzyrOH7MQ== +which-boxed-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" + integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== + dependencies: + is-bigint "^1.0.1" + is-boolean-object "^1.1.0" + is-number-object "^1.0.4" + is-string "^1.0.5" + is-symbol "^1.0.3" + +which-module@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.1.tgz#776b1fe35d90aebe99e8ac15eb24093389a4a409" + integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ== + +which-typed-array@^1.1.10, which-typed-array@^1.1.11: + version "1.1.11" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.11.tgz#99d691f23c72aab6768680805a271b69761ed61a" + integrity sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew== + dependencies: + available-typed-arrays "^1.0.5" + call-bind "^1.0.2" + for-each "^0.3.3" + gopd "^1.0.1" + has-tostringtag "^1.0.0" which-typed-array@^1.1.2: version "1.1.4" @@ -9605,6 +12741,20 @@ which@^1.2.9: dependencies: isexe "^2.0.0" +wide-align@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== + dependencies: + string-width "^1.0.2 || 2" + +wide-align@^1.1.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.5.tgz#df1d4c206854369ecf3c9a4898f1b23fbd9d15d3" + integrity sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg== + dependencies: + string-width "^1.0.2 || 2 || 3 || 4" + widest-line@^3.1.0: version "3.1.0" resolved "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz" @@ -9612,28 +12762,33 @@ widest-line@^3.1.0: dependencies: string-width "^4.0.0" -window-size@^0.2.0: - version "0.2.0" - resolved "https://registry.npmjs.org/window-size/-/window-size-0.2.0.tgz" - integrity sha1-tDFbtCFKPXBY6+7okuE/ok2YsHU= - wordwrap@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz" integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== -workerpool@6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" - integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== +workerpool@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.0.0.tgz#85aad67fa1a2c8ef9386a1b43539900f61d03d58" + integrity sha512-fU2OcNA/GVAJLLyKUoHkAgIhKb0JoCpSjLC/G2vYKxUjVmQwGbRVeoPJ1a8U4pnVofz4AQV5Y/NEw8oKqxEBtA== -wrap-ansi@^2.0.0: - version "2.1.0" - resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz" - integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" wrap-ansi@^7.0.0: version "7.0.0" @@ -9656,15 +12811,10 @@ write-stream@~0.4.3: dependencies: readable-stream "~0.0.2" -ws@7.4.6: - version "7.4.6" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" - integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== - -ws@8.2.3: - version "8.2.3" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.2.3.tgz#63a56456db1b04367d0b721a0b80cae6d8becbba" - integrity sha512-wBuoj1BDpC6ZQ1B7DWQBYVLphPWkm8i9Y0/3YdHjHKHiohOJ1ws+3OccDWtH+PoC9DZD5WOTrJvNbWvjS6JWaA== +ws@7.4.5: + version "7.4.5" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.5.tgz#a484dd851e9beb6fdb420027e3885e8ce48986c1" + integrity sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g== ws@^3.0.0: version "3.3.3" @@ -9687,7 +12837,7 @@ ws@^5.1.1: resolved "https://registry.npmjs.org/ws/-/ws-7.5.5.tgz" integrity sha512-BAkMFcAzl8as1G/hArkxOxq3G7pjUqQ3gzYbLL0/5zNkph70e+lCoxBGnm6AW1+/aiNeV4fnKqZ8m4GZewmH2w== -ws@^7.2.0, ws@^7.4.5: +ws@^7.2.0, ws@^7.4.5, ws@^7.5.0: version "7.5.9" resolved "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz" integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== @@ -9712,6 +12862,13 @@ xhr-request@^1.0.1, xhr-request@^1.1.0: url-set-query "^1.0.0" xhr "^2.0.4" +xhr2-cookies@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/xhr2-cookies/-/xhr2-cookies-1.1.0.tgz#7d77449d0999197f155cb73b23df72505ed89d48" + integrity sha512-hjXUA6q+jl/bd8ADHcVfFsSPIf+tyLIjuO9TwJC9WI6JP2zKcS7C+p56I9kCLLsaCiNT035iYvEUUzdEFj/8+g== + dependencies: + cookiejar "^2.1.1" + xhr@^2.0.4, xhr@^2.2.0, xhr@^2.3.3: version "2.6.0" resolved "https://registry.npmjs.org/xhr/-/xhr-2.6.0.tgz" @@ -9747,15 +12904,10 @@ xtend@~2.1.1: dependencies: object-keys "~0.4.0" -y18n@^3.2.1: - version "3.2.2" - resolved "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz" - integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== - -y18n@^5.0.5: - version "5.0.8" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" - integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== +y18n@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== yaeti@^0.0.6: version "0.0.6" @@ -9777,10 +12929,21 @@ yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: resolved "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== -yargs-parser@20.2.4: - version "20.2.4" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" - integrity sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA== +yargs-parser@13.1.2, yargs-parser@^13.1.2: + version "13.1.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" + integrity sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^15.0.1: + version "15.0.3" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-15.0.3.tgz#316e263d5febe8b38eef61ac092b33dfcc9b1115" + integrity sha512-/MVEVjTXy/cGAjdtQf8dW3V9b97bPN7rNn8ETj6BmAQL7ibC7O1Q9SPJbGjgh3SlwoBNXMzj/ZGIj8mBgl12YA== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" yargs-parser@^16.1.0: version "16.1.0" @@ -9790,66 +12953,79 @@ yargs-parser@^16.1.0: camelcase "^5.0.0" decamelize "^1.2.0" -yargs-parser@^2.4.1: - version "2.4.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-2.4.1.tgz#85568de3cf150ff49fa51825f03a8c880ddcc5c4" - integrity sha512-9pIKIJhnI5tonzG6OnCFlz/yln8xHYcGl+pn3xR0Vzff0vzN1PbNRaelgfgRUwZ3s4i3jvxT9WhmUGL4whnasA== +yargs-parser@^18.1.2: + version "18.1.3" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" + integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== dependencies: - camelcase "^3.0.0" - lodash.assign "^4.0.6" - -yargs-parser@^20.2.2: - version "20.2.9" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" - integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== + camelcase "^5.0.0" + decamelize "^1.2.0" yargs-parser@^21.0.0: version "21.1.1" resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs-unparser@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" - integrity sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA== +yargs-unparser@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.1.tgz#bd4b0ee05b4c94d058929c32cb09e3fce71d3c5f" + integrity sha512-qZV14lK9MWsGCmcr7u5oXGH0dbGqZAIxTDrWXZDo5zUr6b6iUmelNKO6x6R1dQT24AH3LgRxJpr8meWy2unolA== dependencies: - camelcase "^6.0.0" - decamelize "^4.0.0" - flat "^5.0.2" - is-plain-obj "^2.1.0" + camelcase "^5.3.1" + decamelize "^1.2.0" + flat "^4.1.0" + is-plain-obj "^1.1.0" + yargs "^14.2.3" -yargs@16.2.0: - version "16.2.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" - integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== +yargs@13.3.2: + version "13.3.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" + integrity sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw== dependencies: - cliui "^7.0.2" - escalade "^3.1.1" - get-caller-file "^2.0.5" + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" require-directory "^2.1.1" - string-width "^4.2.0" - y18n "^5.0.5" - yargs-parser "^20.2.2" - -yargs@^4.7.1: - version "4.8.1" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-4.8.1.tgz#c0c42924ca4aaa6b0e6da1739dfb216439f9ddc0" - integrity sha512-LqodLrnIDM3IFT+Hf/5sxBnEGECrfdC1uIbgZeJmESCSo4HoCAaKEus8MylXHAkdacGc0ye+Qa+dpkuom8uVYA== - dependencies: - cliui "^3.2.0" - decamelize "^1.1.1" - get-caller-file "^1.0.1" - lodash.assign "^4.0.3" - os-locale "^1.4.0" - read-pkg-up "^1.0.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.2" + +yargs@^14.2.3: + version "14.2.3" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-14.2.3.tgz#1a1c3edced1afb2a2fea33604bc6d1d8d688a414" + integrity sha512-ZbotRWhF+lkjijC/VhmOT9wSgyBQ7+zr13+YLkhfsSiTriYsMzkTUFP18pFhWwBeMa5gUc1MzbhrO6/VB7c9Xg== + dependencies: + cliui "^5.0.0" + decamelize "^1.2.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" require-directory "^2.1.1" - require-main-filename "^1.0.1" + require-main-filename "^2.0.0" set-blocking "^2.0.0" - string-width "^1.0.1" - which-module "^1.0.0" - window-size "^0.2.0" - y18n "^3.2.1" - yargs-parser "^2.4.1" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^15.0.1" + +yargs@^15.3.1: + version "15.4.1" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" yn@3.1.1: version "3.1.1" @@ -9869,7 +13045,14 @@ zen-observable-ts@^0.8.21: tslib "^1.9.3" zen-observable "^0.8.0" -zen-observable@^0.8.0: +zen-observable-ts@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/zen-observable-ts/-/zen-observable-ts-1.2.5.tgz#6c6d9ea3d3a842812c6e9519209365a122ba8b58" + integrity sha512-QZWQekv6iB72Naeake9hS1KxHlotfRpe+WGNbNx5/ta+R3DNjVO2bswf63gXlWDcs+EMd7XY8HfVQyP1X6T4Zg== + dependencies: + zen-observable "0.8.15" + +zen-observable@0.8.15, zen-observable@^0.8.0: version "0.8.15" resolved "https://registry.npmjs.org/zen-observable/-/zen-observable-0.8.15.tgz" integrity sha512-PQ2PC7R9rslx84ndNBZB/Dkv8V8fZEpk83RLgXtYd0fwUgEjseMn1Dgajh2x6S8QbZAFa9p2qVCEuYZNgve0dQ== diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index e3513d854cf..be12c956929 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -700,6 +700,37 @@ async fn template_static_filters_false_positives() { ); } +#[tokio::test] +async fn parse_data_source_context() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("data-sources").await; + + let blocks = { + let block_0 = genesis(); + let block_1 = empty_block(block_0.ptr(), test_ptr(1)); + let block_2 = empty_block(block_1.ptr(), test_ptr(2)); + vec![block_0, block_1, block_2] + }; + let stop_block = blocks.last().unwrap().block.ptr(); + let chain = chain(blocks, &stores, None).await; + + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + ctx.start_and_sync_to(stop_block).await; + + let query_res = ctx + .query(r#"{ data(id: "0") { id, foo, bar } }"#) + .await + .unwrap(); + + assert_eq!( + query_res, + Some(object! { data: object!{ id: "0", foo: "test", bar: 1 } }) + ); +} + #[tokio::test] async fn retry_create_ds() { let RunnerTestRecipe { From e8ee79a2e3f38ac71b8e725661767c0636ec7963 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Wed, 6 Sep 2023 21:19:01 +0530 Subject: [PATCH 0404/2104] server: add graph-node version to index-node API --- Cargo.lock | 1 + server/index-node/Cargo.toml | 1 + server/index-node/src/resolver.rs | 34 ++++++++++++++++++++++++++++ server/index-node/src/schema.graphql | 6 +++++ 4 files changed, 42 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5f8621dfba8..7f356795ae4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1884,6 +1884,7 @@ dependencies = [ "blake3 1.4.1", "either", "futures 0.3.16", + "git-testament", "graph", "graph-chain-arweave", "graph-chain-cosmos", diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index 2ab15c77c20..38d290c5fb8 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -18,3 +18,4 @@ http = "0.2" hyper = "0.14" lazy_static = "1.2.0" serde = "1.0" +git-testament = "0.2.4" diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 5aae51fc762..435e9ea8a07 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -4,6 +4,7 @@ use std::convert::TryInto; use graph::data::query::Trace; use web3::types::Address; +use git_testament::{git_testament, CommitKind}; use graph::blockchain::{Blockchain, BlockchainKind, BlockchainMap}; use graph::components::store::{BlockPtrForNumber, BlockStore, EntityType, Store}; use graph::components::versions::VERSIONS; @@ -18,6 +19,19 @@ use crate::auth::PoiProtection; /// Timeout for calls to fetch the block from JSON-RPC or Firehose. const BLOCK_HASH_FROM_NUMBER_TIMEOUT: Duration = Duration::from_secs(10); +git_testament!(TESTAMENT); + +lazy_static! { + static ref VERSION: Version = Version { + version: env!("CARGO_PKG_VERSION").to_string(), + commit: match TESTAMENT.commit { + CommitKind::FromTag(_, hash, _, _) => hash.to_string(), + CommitKind::NoTags(hash, _) => hash.to_string(), + _ => "unknown".to_string(), + } + }; +} + #[derive(Clone, Debug)] struct PublicProofOfIndexingRequest { pub deployment: DeploymentHash, @@ -39,6 +53,21 @@ impl TryFromValue for PublicProofOfIndexingRequest { } } +#[derive(Clone, Debug)] +struct Version { + version: String, + commit: String, +} + +impl IntoValue for Version { + fn into_value(self) -> r::Value { + object! { + version: self.version, + commit: self.commit, + } + } +} + #[derive(Debug)] struct PublicProofOfIndexingResult { pub deployment: DeploymentHash, @@ -469,6 +498,10 @@ impl IndexNodeResolver { )) } + fn version(&self) -> Result { + Ok(VERSION.clone().into_value()) + } + async fn block_ptr_for_number( &self, network: String, @@ -700,6 +733,7 @@ impl Resolver for IndexNodeResolver { (None, "entityChangesInBlock") => self.resolve_entity_changes_in_block(field), // The top-level `subgraphVersions` field (None, "apiVersions") => self.resolve_api_versions(field), + (None, "version") => self.version(), // Resolve fields of `Object` values (e.g. the `latestBlock` field of `EthereumBlock`) (value, _) => Ok(value.unwrap_or(r::Value::Null)), diff --git a/server/index-node/src/schema.graphql b/server/index-node/src/schema.graphql index a91a5ece40a..823882ab2ab 100644 --- a/server/index-node/src/schema.graphql +++ b/server/index-node/src/schema.graphql @@ -40,6 +40,7 @@ type Query { entityChangesInBlock(subgraphId: String!, blockNumber: Int!): EntityChanges! blockData(network: String!, blockHash: Bytes!): JSONObject blockHashFromNumber(network: String!, blockNumber: Int!): Bytes + version: Version! cachedEthereumCalls( network: String! blockHash: Bytes! @@ -47,6 +48,11 @@ type Query { apiVersions(subgraphId: String!): [ApiVersion!]! } +type Version { + version: String! + commit: String! +} + type SubgraphIndexingStatus { subgraph: String! synced: Boolean! From 43baf5ba853e7d1d1e44d6fa2490e7b8a8ed3eee Mon Sep 17 00:00:00 2001 From: Kamil Kisiela Date: Fri, 25 Aug 2023 11:31:34 +0200 Subject: [PATCH 0405/2104] graph: Adds attestable ChildFilterNestingNotSupportedError --- graph/src/components/store/err.rs | 5 +++++ graph/src/data/query/error.rs | 8 ++++++++ store/postgres/src/relational_queries.rs | 5 +++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index 3cfa74bef5a..f5052a3a179 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -29,6 +29,8 @@ pub enum StoreError { MalformedDirective(String), #[error("query execution failed: {0}")] QueryExecutionError(String), + #[error("Child filter nesting not supported by value `{0}`: `{1}`")] + ChildFilterNestingNotSupportedError(String, String), #[error("invalid identifier: {0}")] InvalidIdentifier(String), #[error( @@ -98,6 +100,9 @@ impl Clone for StoreError { } Self::MalformedDirective(arg0) => Self::MalformedDirective(arg0.clone()), Self::QueryExecutionError(arg0) => Self::QueryExecutionError(arg0.clone()), + Self::ChildFilterNestingNotSupportedError(arg0, arg1) => { + Self::ChildFilterNestingNotSupportedError(arg0.clone(), arg1.clone()) + } Self::InvalidIdentifier(arg0) => Self::InvalidIdentifier(arg0.clone()), Self::DuplicateBlockProcessing(arg0, arg1) => { Self::DuplicateBlockProcessing(arg0.clone(), arg1.clone()) diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index 5449a330c30..4764b47c1af 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -49,6 +49,7 @@ pub enum QueryExecutionError { EntityFieldError(String, String), ListTypesError(String, Vec), ListFilterError(String), + ChildFilterNestingNotSupportedError(String, String), ValueParseError(String, String), AttributeTypeError(String, String), EntityParseError(String), @@ -96,6 +97,7 @@ impl QueryExecutionError { | OrderByNotSupportedError(_, _) | OrderByNotSupportedForType(_) | FilterNotSupportedError(_, _) + | ChildFilterNestingNotSupportedError(_, _) | UnknownField(_, _, _) | EmptyQuery | MultipleSubscriptionFields @@ -201,6 +203,9 @@ impl fmt::Display for QueryExecutionError { FilterNotSupportedError(value, filter) => { write!(f, "Filter not supported by value `{}`: `{}`", value, filter) } + ChildFilterNestingNotSupportedError(value, filter) => { + write!(f, "Child filter nesting not supported by value `{}`: `{}`", value, filter) + } UnknownField(_, t, s) => { write!(f, "Type `{}` has no field `{}`", t, s) } @@ -309,6 +314,9 @@ impl From for QueryExecutionError { StoreError::DeploymentNotFound(id_or_name) => { QueryExecutionError::DeploymentNotFound(id_or_name) } + StoreError::ChildFilterNestingNotSupportedError(attr, filter) => { + QueryExecutionError::ChildFilterNestingNotSupportedError(attr, filter) + } _ => QueryExecutionError::StoreError(CloneableAnyhowError(Arc::new(e.into()))), } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 7e1d0eb7cf4..ab28edb12df 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -973,8 +973,9 @@ impl<'a> QueryFilter<'a> { } Child(child) => { if child_filter_ancestor { - return Err(StoreError::QueryExecutionError( - "Child filters can not be nested".to_string(), + return Err(StoreError::ChildFilterNestingNotSupportedError( + child.attr.to_string(), + filter.to_string(), )); } From cd9f10f6d3c020b412cd42c4d786fa20a937dbf9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 1 Sep 2023 12:01:12 -0700 Subject: [PATCH 0406/2104] store: Map other shards into a new shard on startup Fixes https://github.com/graphprotocol/graph-node/issues/4719 --- store/postgres/src/catalog.rs | 8 ++- store/postgres/src/connection_pool.rs | 72 ++++++++++++++++++--------- 2 files changed, 55 insertions(+), 25 deletions(-) diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index db5ba421134..e343a2c4a2b 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -415,14 +415,18 @@ pub fn drop_schema(conn: &PgConnection, nsp: &str) -> Result<(), StoreError> { Ok(conn.batch_execute(&query)?) } -pub fn migration_count(conn: &PgConnection) -> Result { +pub fn migration_count(conn: &PgConnection) -> Result { use __diesel_schema_migrations as m; if !table_exists(conn, NAMESPACE_PUBLIC, &MIGRATIONS_TABLE)? { return Ok(0); } - m::table.count().get_result(conn).map_err(StoreError::from) + m::table + .count() + .get_result(conn) + .map(|n: i64| n as usize) + .map_err(StoreError::from) } pub fn account_like(conn: &PgConnection, site: &Site) -> Result, StoreError> { diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index 7c1cb69f7fa..61bff94e729 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -1011,13 +1011,7 @@ impl PoolInner { let result = pool .configure_fdw(coord.servers.as_ref()) .and_then(|()| migrate_schema(&pool.logger, &conn)) - .and_then(|had_migrations| { - if had_migrations { - coord.propagate_schema_change(&self.shard) - } else { - Ok(()) - } - }); + .and_then(|count| coord.propagate(&pool, count)); debug!(&pool.logger, "Release migration lock"); advisory_lock::unlock_migration(&conn).unwrap_or_else(|err| { die(&pool.logger, "failed to release migration lock", &err); @@ -1107,12 +1101,31 @@ impl PoolInner { embed_migrations!("./migrations"); +struct MigrationCount { + old: usize, + new: usize, +} + +impl MigrationCount { + fn new(old: usize, new: usize) -> Self { + Self { old, new } + } + + fn had_migrations(&self) -> bool { + self.old != self.new + } + + fn is_new(&self) -> bool { + self.old == 0 + } +} + /// Run all schema migrations. /// /// When multiple `graph-node` processes start up at the same time, we ensure /// that they do not run migrations in parallel by using `blocking_conn` to /// serialize them. The `conn` is used to run the actual migration. -fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result { +fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result { // Collect migration logging output let mut output = vec![]; @@ -1122,7 +1135,7 @@ fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result")); @@ -1136,14 +1149,15 @@ fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result msg); } } + let count = MigrationCount::new(old_count, new_count); - if had_migrations { + if count.had_migrations() { // Reset the query statistics since a schema change makes them not // all that useful. An error here is not serious and can be ignored. conn.batch_execute("select pg_stat_statements_reset()").ok(); } - Ok(had_migrations) + Ok(count) } /// Helper to coordinate propagating schema changes from the database that @@ -1207,18 +1221,23 @@ impl PoolCoordinator { /// Propagate changes to the schema in `shard` to all other pools. Those /// other pools will then recreate any tables that they imported from - /// `shard` - fn propagate_schema_change(&self, shard: &Shard) -> Result<(), StoreError> { - let server = self - .servers - .iter() - .find(|server| &server.shard == shard) - .ok_or_else(|| constraint_violation!("unknown shard {shard}"))?; - - for pool in self.pools.lock().unwrap().values() { - if let Err(e) = pool.remap(server) { - error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); - return Err(e); + /// `shard`. If `pool` is a new shard, we also map all other shards into + /// it. + fn propagate(&self, pool: &PoolInner, count: MigrationCount) -> Result<(), StoreError> { + // pool is a new shard, map all other shards into it + if count.is_new() { + for server in self.servers.iter() { + pool.remap(server)?; + } + } + // pool had schema changes, refresh the import from pool into all other shards + if count.had_migrations() { + let server = self.server(&pool.shard)?; + for pool in self.pools.lock().unwrap().values() { + if let Err(e) = pool.remap(server) { + error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); + return Err(e); + } } } Ok(()) @@ -1231,4 +1250,11 @@ impl PoolCoordinator { pub fn servers(&self) -> Arc> { self.servers.clone() } + + fn server(&self, shard: &Shard) -> Result<&ForeignServer, StoreError> { + self.servers + .iter() + .find(|server| &server.shard == shard) + .ok_or_else(|| constraint_violation!("unknown shard {shard}")) + } } From 9fd2726cedcbf123108ac183752a2c63e433daa4 Mon Sep 17 00:00:00 2001 From: Vishal_Saroj Date: Sat, 2 Sep 2023 13:15:50 +0530 Subject: [PATCH 0407/2104] Changes the spelling of History from Hostory --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 815b0ba6380..7992c32c49f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -79,7 +79,7 @@ Please do not merge master into your branch as you develop your pull request; instead, rebase your branch on top of the latest master if your pull request branch is long-lived. -We try to keep the hostory of the `master` branch linear, and avoid merge +We try to keep the history of the `master` branch linear, and avoid merge commits. Once your pull request is approved, merge it following these steps: ``` From 49b63ee09ae6fbfbaca9cf15c0dfc10ff0bdfbe3 Mon Sep 17 00:00:00 2001 From: Saihajpreet Singh Date: Tue, 12 Sep 2023 22:36:35 -0400 Subject: [PATCH 0408/2104] graphql: update introspection schema (#4676) * graphql: update introspection schema * refactor(graph, graphql): avoid duplicating introspection schema * fix(graphql): introspection query to include a query root * style: run code format * graph: When mixing in introspection schema do not clobber root query type --------- Co-authored-by: David Lutterkort --- graph/src/data/graphql/ext.rs | 13 +++ graph/src/lib.rs | 2 +- graph/src/schema/api.rs | 35 +++++-- graph/src/schema/introspection.graphql | 22 +++-- graph/src/schema/mod.rs | 2 +- graphql/src/execution/execution.rs | 3 +- graphql/src/introspection/mod.rs | 2 - graphql/src/introspection/schema.rs | 132 ------------------------- 8 files changed, 61 insertions(+), 150 deletions(-) delete mode 100644 graphql/src/introspection/schema.rs diff --git a/graph/src/data/graphql/ext.rs b/graph/src/data/graphql/ext.rs index ac7c956d97e..2af85f18315 100644 --- a/graph/src/data/graphql/ext.rs +++ b/graph/src/data/graphql/ext.rs @@ -225,6 +225,19 @@ impl DocumentExt for Document { } } +pub trait DefinitionExt { + fn is_root_query_type(&self) -> bool; +} + +impl DefinitionExt for Definition { + fn is_root_query_type(&self) -> bool { + match self { + Definition::TypeDefinition(TypeDefinition::Object(t)) => t.name == "Query", + _ => false, + } + } +} + pub trait TypeExt { fn get_base_type(&self) -> &str; fn is_list(&self) -> bool; diff --git a/graph/src/lib.rs b/graph/src/lib.rs index f11fe2c84e2..d90730b1b4c 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -202,7 +202,7 @@ pub mod prelude { }); static_graphql!(s, schema, { Field, Directive, InterfaceType, ObjectType, Value, TypeDefinition, - EnumType, Type, Document, ScalarType, InputValue, DirectiveDefinition, + EnumType, Type, Definition, Document, ScalarType, InputValue, DirectiveDefinition, UnionType, InputObjectType, EnumValue, }); diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 3d084f79c92..6d120ade266 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -6,10 +6,10 @@ use inflector::Inflector; use lazy_static::lazy_static; use crate::components::store::EntityType; -use crate::data::graphql::ObjectOrInterface; +use crate::data::graphql::{ObjectOrInterface, ObjectTypeExt}; use crate::schema::{ast, META_FIELD_NAME, META_FIELD_TYPE}; -use crate::data::graphql::ext::{DirectiveExt, DocumentExt, ValueExt}; +use crate::data::graphql::ext::{DefinitionExt, DirectiveExt, DocumentExt, ValueExt}; use crate::prelude::s::{Value, *}; use crate::prelude::*; use thiserror::Error; @@ -223,7 +223,7 @@ impl ApiSchema { } #[cfg(debug_assertions)] - pub fn definitions(&self) -> impl Iterator> { + pub fn definitions(&self) -> impl Iterator { self.schema.document.definitions.iter() } } @@ -233,8 +233,24 @@ lazy_static! { let schema = include_str!("introspection.graphql"); parse_schema(schema).expect("the schema `introspection.graphql` is invalid") }; + pub static ref INTROSPECTION_QUERY_TYPE: ast::ObjectType = { + let root_query_type = INTROSPECTION_SCHEMA + .get_root_query_type() + .expect("Schema does not have a root query type"); + ast::ObjectType::from(Arc::new(root_query_type.clone())) + }; } +pub fn is_introspection_field(name: &str) -> bool { + INTROSPECTION_QUERY_TYPE.field(name).is_some() +} + +/// Extend `schema` with the definitions from the introspection schema and +/// modify the root query type to contain the fields from the introspection +/// schema's root query type. +/// +/// This results in a schema that combines the original schema with the +/// introspection schema fn add_introspection_schema(schema: &mut Document) { fn introspection_fields() -> Vec { // Generate fields for the root query fields in an introspection schema, @@ -274,9 +290,16 @@ fn add_introspection_schema(schema: &mut Document) { ] } - schema - .definitions - .extend(INTROSPECTION_SCHEMA.definitions.iter().cloned()); + // Add all definitions from the introspection schema to the schema, + // except for the root query type as that qould clobber the 'real' root + // query type + schema.definitions.extend( + INTROSPECTION_SCHEMA + .definitions + .iter() + .filter(|dfn| !dfn.is_root_query_type()) + .cloned(), + ); let query_type = schema .definitions diff --git a/graph/src/schema/introspection.graphql b/graph/src/schema/introspection.graphql index c3d2c1b8842..d34b4d67e5b 100644 --- a/graph/src/schema/introspection.graphql +++ b/graph/src/schema/introspection.graphql @@ -1,9 +1,12 @@ # A GraphQL introspection schema for inclusion in a subgraph's API schema. -# The schema differs from the 'standard' introspection schema in that it -# doesn't have a Query type nor scalar declarations as they come from the -# API schema. + +type Query { + __schema: __Schema! + __type(name: String!): __Type +} type __Schema { + description: String types: [__Type!]! queryType: __Type! mutationType: __Type @@ -33,12 +36,15 @@ type __Type { # NON_NULL and LIST only ofType: __Type + + # may be non-null for custom SCALAR, otherwise null. + specifiedByURL: String } type __Field { name: String! description: String - args: [__InputValue!]! + args(includeDeprecated: Boolean = false): [__InputValue!]! type: __Type! isDeprecated: Boolean! deprecationReason: String @@ -49,6 +55,8 @@ type __InputValue { description: String type: __Type! defaultValue: String + isDeprecated: Boolean! + deprecationReason: String } type __EnumValue { @@ -73,7 +81,8 @@ type __Directive { name: String! description: String locations: [__DirectiveLocation!]! - args: [__InputValue!]! + args(includeDeprecated: Boolean = false): [__InputValue!]! + isRepeatable: Boolean! } enum __DirectiveLocation { @@ -84,6 +93,7 @@ enum __DirectiveLocation { FRAGMENT_DEFINITION FRAGMENT_SPREAD INLINE_FRAGMENT + VARIABLE_DEFINITION SCHEMA SCALAR OBJECT @@ -95,4 +105,4 @@ enum __DirectiveLocation { ENUM_VALUE INPUT_OBJECT INPUT_FIELD_DEFINITION -} \ No newline at end of file +} diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 5d4a3a0789a..4361255debf 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -31,7 +31,7 @@ pub mod ast; mod fulltext; mod input_schema; -pub use api::{api_schema, APISchemaError}; +pub use api::{api_schema, is_introspection_field, APISchemaError, INTROSPECTION_QUERY_TYPE}; pub use api::{ApiSchema, ErrorPolicy}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 2e256cb76e6..8e819293938 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -7,7 +7,7 @@ use graph::{ value::{Object, Word}, }, prelude::{s, CheapClone}, - schema::META_FIELD_NAME, + schema::{is_introspection_field, INTROSPECTION_QUERY_TYPE, META_FIELD_NAME}, util::{lfu_cache::EvictStats, timed_rw_lock::TimedMutex}, }; use lazy_static::lazy_static; @@ -24,7 +24,6 @@ use graph::util::{lfu_cache::LfuCache, stable_hash_glue::impl_stable_hash}; use super::QueryHash; use crate::execution::ast as a; -use crate::introspection::{is_introspection_field, INTROSPECTION_QUERY_TYPE}; use crate::prelude::*; lazy_static! { diff --git a/graphql/src/introspection/mod.rs b/graphql/src/introspection/mod.rs index 16b751284ee..7f4ccde25bd 100644 --- a/graphql/src/introspection/mod.rs +++ b/graphql/src/introspection/mod.rs @@ -1,5 +1,3 @@ mod resolver; -mod schema; pub use self::resolver::IntrospectionResolver; -pub use self::schema::{is_introspection_field, INTROSPECTION_DOCUMENT, INTROSPECTION_QUERY_TYPE}; diff --git a/graphql/src/introspection/schema.rs b/graphql/src/introspection/schema.rs deleted file mode 100644 index 303c46f36d5..00000000000 --- a/graphql/src/introspection/schema.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::sync::Arc; - -use graphql_parser; - -use graph::data::graphql::ext::DocumentExt; -use graph::data::graphql::ext::ObjectTypeExt; -use graph::prelude::s::Document; - -use lazy_static::lazy_static; - -use graph::schema::ast as sast; - -const INTROSPECTION_SCHEMA: &str = " -scalar Boolean -scalar Float -scalar Int -scalar ID -scalar String - -type Query { - __schema: __Schema! - __type(name: String!): __Type -} - -type __Schema { - types: [__Type!]! - queryType: __Type! - mutationType: __Type - subscriptionType: __Type - directives: [__Directive!]! -} - -type __Type { - kind: __TypeKind! - name: String - description: String - - # OBJECT and INTERFACE only - fields(includeDeprecated: Boolean = false): [__Field!] - - # OBJECT only - interfaces: [__Type!] - - # INTERFACE and UNION only - possibleTypes: [__Type!] - - # ENUM only - enumValues(includeDeprecated: Boolean = false): [__EnumValue!] - - # INPUT_OBJECT only - inputFields: [__InputValue!] - - # NON_NULL and LIST only - ofType: __Type -} - -type __Field { - name: String! - description: String - args: [__InputValue!]! - type: __Type! - isDeprecated: Boolean! - deprecationReason: String -} - -type __InputValue { - name: String! - description: String - type: __Type! - defaultValue: String -} - -type __EnumValue { - name: String! - description: String - isDeprecated: Boolean! - deprecationReason: String -} - -enum __TypeKind { - SCALAR - OBJECT - INTERFACE - UNION - ENUM - INPUT_OBJECT - LIST - NON_NULL -} - -type __Directive { - name: String! - description: String - locations: [__DirectiveLocation!]! - args: [__InputValue!]! -} - -enum __DirectiveLocation { - QUERY - MUTATION - SUBSCRIPTION - FIELD - FRAGMENT_DEFINITION - FRAGMENT_SPREAD - INLINE_FRAGMENT - SCHEMA - SCALAR - OBJECT - FIELD_DEFINITION - ARGUMENT_DEFINITION - INTERFACE - UNION - ENUM - ENUM_VALUE - INPUT_OBJECT - INPUT_FIELD_DEFINITION -}"; - -lazy_static! { - pub static ref INTROSPECTION_DOCUMENT: Document = - graphql_parser::parse_schema(INTROSPECTION_SCHEMA).unwrap(); - pub static ref INTROSPECTION_QUERY_TYPE: sast::ObjectType = sast::ObjectType::from(Arc::new( - INTROSPECTION_DOCUMENT - .get_root_query_type() - .unwrap() - .clone() - )); -} - -pub fn is_introspection_field(name: &str) -> bool { - INTROSPECTION_QUERY_TYPE.field(name).is_some() -} From c1f6150be40445e25f8efe477babd8964d8ee973 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Mon, 28 Aug 2023 12:08:00 +0530 Subject: [PATCH 0409/2104] store: index only a prefix for String and Bytes when calling `create_manual_index` --- store/postgres/src/deployment_store.rs | 49 ++++++++++++++++++++------ 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 9f7c2a1c84a..50052f3024e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -52,7 +52,9 @@ use crate::detail::ErrorDetail; use crate::dynds::DataSourcesTable; use crate::primary::DeploymentId; use crate::relational::index::{CreateIndex, Method}; -use crate::relational::{Layout, LayoutCache, SqlName, Table}; +use crate::relational::{ + ColumnType, Layout, LayoutCache, SqlName, Table, BYTE_ARRAY_PREFIX_SIZE, STRING_PREFIX_SIZE, +}; use crate::relational_queries::FromEntityData; use crate::{advisory_lock, catalog, retry}; use crate::{connection_pool::ConnectionPool, detail}; @@ -674,7 +676,10 @@ impl DeploymentStore { conn.transaction(|| { for table in tables { - let columns = resolve_column_names(table, &columns)?; + let columns = resolve_column_names(table, &columns)? + .iter() + .map(|(name, _)| *name) + .collect::>(); catalog::set_stats_target(&conn, &site.namespace, &table.name, &columns, target)?; } Ok(()) @@ -711,15 +716,20 @@ impl DeploymentStore { let schema_name = site.namespace.clone(); let layout = store.layout(conn, site)?; let table = resolve_table_name(&layout, &entity_name)?; - let column_names = resolve_column_names(table, &field_names)?; + let column_names_to_types = resolve_column_names(table, &field_names)?; + let column_names = column_names_to_types + .iter() + .map(|(name, _)| *name) + .collect::>(); + let column_names_sep_by_underscores = column_names.join("_"); - let column_names_sep_by_commas = column_names.join(", "); + let index_exprs = resolve_index_exprs(column_names_to_types); let table_name = &table.name; let index_name = format!("manual_{table_name}_{column_names_sep_by_underscores}"); let sql = format!( "create index concurrently if not exists {index_name} \ on {schema_name}.{table_name} using {index_method} \ - ({column_names_sep_by_commas})" + ({index_exprs})" ); // This might take a long time. conn.execute(&sql)?; @@ -1843,8 +1853,11 @@ fn resolve_table_name<'a>(layout: &'a Layout, name: &'_ str) -> Result<&'a Table fn resolve_column_names<'a, T: AsRef>( table: &'a Table, field_names: &[T], -) -> Result, StoreError> { - fn lookup<'a>(table: &'a Table, field: &str) -> Result<&'a SqlName, StoreError> { +) -> Result)>, StoreError> { + fn lookup<'a>( + table: &'a Table, + field: &str, + ) -> Result<(&'a SqlName, &'a ColumnType), StoreError> { table .column_for_field(field) .or_else(|_error| { @@ -1853,21 +1866,37 @@ fn resolve_column_names<'a, T: AsRef>( .column(&sql_name) .ok_or_else(|| StoreError::UnknownField(field.to_string())) }) - .map(|column| &column.name) + .map(|column| (&column.name, &column.column_type)) } field_names .iter() .map(|f| { if f.as_ref() == BLOCK_RANGE_COLUMN || f.as_ref() == BLOCK_COLUMN { - Ok(table.block_column()) + Ok((table.block_column(), None)) } else { - lookup(table, f.as_ref()) + lookup(table, f.as_ref()).map(|(name, column_type)| (name, Some(column_type))) } }) .collect() } +fn resolve_index_exprs(column_names_to_types: Vec<(&SqlName, Option<&ColumnType>)>) -> String { + column_names_to_types + .iter() + .map(|(name, column_type)| match column_type { + Some(ColumnType::String) => { + format!("left({}, {})", name, STRING_PREFIX_SIZE) + } + Some(ColumnType::Bytes) => { + format!("substring({}, 1, {})", name, BYTE_ARRAY_PREFIX_SIZE) + } + _ => name.to_string(), + }) + .collect::>() + .join(",") +} + /// A helper to log progress during pruning that is kicked off from /// `transact_block_operations` struct OngoingPruneReporter { From 0520895561e7fd155557373cb9744b2eceaa1cbc Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Mon, 28 Aug 2023 13:27:39 +0530 Subject: [PATCH 0410/2104] node/manager , store: add ability to create parital indexes over block_range in `graphman index create` --- node/src/bin/manager.rs | 6 ++++++ node/src/manager/commands/index.rs | 13 ++++++++++++- store/postgres/src/deployment_store.rs | 16 ++++++++++++++-- store/postgres/src/subgraph_store.rs | 3 ++- 4 files changed, 34 insertions(+), 4 deletions(-) diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index ece55b44c5d..77e78d45c28 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -664,6 +664,10 @@ pub enum IndexCommand { possible_values = &["btree", "hash", "gist", "spgist", "gin", "brin"] )] method: String, + + #[clap(long)] + /// Specifies a starting block number for creating a partial index. + after: Option, }, /// Lists existing indexes for a given Entity List { @@ -1405,6 +1409,7 @@ async fn main() -> anyhow::Result<()> { entity, fields, method, + after, } => { commands::index::create( subgraph_store, @@ -1413,6 +1418,7 @@ async fn main() -> anyhow::Result<()> { &entity, fields, method, + after, ) .await } diff --git a/node/src/manager/commands/index.rs b/node/src/manager/commands/index.rs index ee6e66d5fa8..59721e68816 100644 --- a/node/src/manager/commands/index.rs +++ b/node/src/manager/commands/index.rs @@ -24,6 +24,10 @@ fn validate_fields>(fields: &[T]) -> Result<(), anyhow::Error> { } Ok(()) } + +/// `after` allows for the creation of a partial index +/// starting from a specified block number. This can improve +/// performance for queries that are close to the subgraph head. pub async fn create( store: Arc, pool: ConnectionPool, @@ -31,6 +35,7 @@ pub async fn create( entity_name: &str, field_names: Vec, index_method: String, + after: Option, ) -> Result<(), anyhow::Error> { validate_fields(&field_names)?; let deployment_locator = search.locate_unique(&pool)?; @@ -39,7 +44,13 @@ pub async fn create( .parse::() .map_err(|()| anyhow!("unknown index method `{}`", index_method))?; match store - .create_manual_index(&deployment_locator, entity_name, field_names, index_method) + .create_manual_index( + &deployment_locator, + entity_name, + field_names, + index_method, + after, + ) .await { Ok(()) => Ok(()), diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 50052f3024e..95534344d73 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -709,6 +709,7 @@ impl DeploymentStore { entity_name: &str, field_names: Vec, index_method: Method, + after: Option, ) -> Result<(), StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); @@ -726,11 +727,22 @@ impl DeploymentStore { let index_exprs = resolve_index_exprs(column_names_to_types); let table_name = &table.name; let index_name = format!("manual_{table_name}_{column_names_sep_by_underscores}"); - let sql = format!( + let mut sql = format!( "create index concurrently if not exists {index_name} \ on {schema_name}.{table_name} using {index_method} \ - ({index_exprs})" + ({index_exprs}) ", ); + + // If 'after' is provided and the table is not immutable, add a WHERE clause for partial indexing + if let Some(after) = after { + if !table.immutable { + sql.push_str(&format!( + " where coalesce(upper({}), 2147483647) > {}", + BLOCK_RANGE_COLUMN, after + )); + } + } + // This might take a long time. conn.execute(&sql)?; // check if the index creation was successfull diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index cd98606db2c..6f14af8ea3f 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1117,10 +1117,11 @@ impl SubgraphStoreInner { entity_name: &str, field_names: Vec, index_method: Method, + after: Option, ) -> Result<(), StoreError> { let (store, site) = self.store(&deployment.hash)?; store - .create_manual_index(site, entity_name, field_names, index_method) + .create_manual_index(site, entity_name, field_names, index_method, after) .await } From 2a336df75559225309eac1db2cb084f5e9dfabcb Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Tue, 29 Aug 2023 12:49:38 +0530 Subject: [PATCH 0411/2104] store: improve index name, errors for immutable tables on `create_manual_index` --- store/postgres/src/deployment_store.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 95534344d73..a09efc03e8a 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -726,7 +726,15 @@ impl DeploymentStore { let column_names_sep_by_underscores = column_names.join("_"); let index_exprs = resolve_index_exprs(column_names_to_types); let table_name = &table.name; - let index_name = format!("manual_{table_name}_{column_names_sep_by_underscores}"); + let index_name = format!( + "manual_{table_name}_{column_names_sep_by_underscores}{}", + if let Some(after_value) = after { + format!("_{}", after_value) + } else { + String::new() + } + ); + let mut sql = format!( "create index concurrently if not exists {index_name} \ on {schema_name}.{table_name} using {index_method} \ @@ -740,6 +748,11 @@ impl DeploymentStore { " where coalesce(upper({}), 2147483647) > {}", BLOCK_RANGE_COLUMN, after )); + } else { + return Err(CancelableError::Error(StoreError::Unknown(anyhow!( + "Partial index not allowed on immutable table `{}`", + table_name + )))); } } From 6bcf9879c677783e9e858056928f5923642becc6 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Wed, 23 Aug 2023 16:48:07 +0530 Subject: [PATCH 0412/2104] chain, graph: implement `handler_kinds()` method on Datasource --- chain/arweave/src/data_source.rs | 18 ++++++++++++- chain/cosmos/src/data_source.rs | 34 ++++++++++++++++++++++++ chain/ethereum/src/data_source.rs | 40 +++++++++++++++++++++++++++++ chain/near/src/data_source.rs | 17 ++++++++++++ chain/substreams/src/data_source.rs | 9 +++++-- graph/src/blockchain/mock.rs | 6 ++++- graph/src/blockchain/mod.rs | 4 ++- graph/src/data_source/mod.rs | 13 +++++++++- graph/src/data_source/offchain.rs | 6 +++++ 9 files changed, 141 insertions(+), 6 deletions(-) diff --git a/chain/arweave/src/data_source.rs b/chain/arweave/src/data_source.rs index 3937c258268..86a5878df4e 100644 --- a/chain/arweave/src/data_source.rs +++ b/chain/arweave/src/data_source.rs @@ -12,13 +12,15 @@ use graph::{ }, semver, }; +use std::collections::HashSet; use std::sync::Arc; use crate::chain::Chain; use crate::trigger::ArweaveTrigger; pub const ARWEAVE_KIND: &str = "arweave"; - +const BLOCK_HANDLER_KIND: &str = "block"; +const TRANSACTION_HANDLER_KIND: &str = "transaction"; /// Runtime representation of a data source. #[derive(Clone, Debug)] pub struct DataSource { @@ -47,6 +49,20 @@ impl blockchain::DataSource for DataSource { self.source.start_block } + fn handler_kinds(&self) -> HashSet<&str> { + let mut kinds = HashSet::new(); + + if self.handler_for_block().is_some() { + kinds.insert(BLOCK_HANDLER_KIND); + } + + if self.handler_for_transaction().is_some() { + kinds.insert(TRANSACTION_HANDLER_KIND); + } + + kinds + } + fn match_and_decode( &self, trigger: &::TriggerData, diff --git a/chain/cosmos/src/data_source.rs b/chain/cosmos/src/data_source.rs index 70168e340de..bedff2f9251 100644 --- a/chain/cosmos/src/data_source.rs +++ b/chain/cosmos/src/data_source.rs @@ -18,6 +18,10 @@ use crate::codec; use crate::trigger::CosmosTrigger; pub const COSMOS_KIND: &str = "cosmos"; +const BLOCK_HANDLER_KIND: &str = "block"; +const EVENT_HANDLER_KIND: &str = "event"; +const TRANSACTION_HANDLER_KIND: &str = "transaction"; +const MESSAGE_HANDLER_KIND: &str = "message"; const DYNAMIC_DATA_SOURCE_ERROR: &str = "Cosmos subgraphs do not support dynamic data sources"; const TEMPLATE_ERROR: &str = "Cosmos subgraphs do not support templates"; @@ -49,6 +53,36 @@ impl blockchain::DataSource for DataSource { self.source.start_block } + fn handler_kinds(&self) -> HashSet<&str> { + let mut kinds = HashSet::new(); + + let Mapping { + block_handlers, + event_handlers, + transaction_handlers, + message_handlers, + .. + } = &self.mapping; + + if !block_handlers.is_empty() { + kinds.insert(BLOCK_HANDLER_KIND); + } + + if !event_handlers.is_empty() { + kinds.insert(EVENT_HANDLER_KIND); + } + + if !transaction_handlers.is_empty() { + kinds.insert(TRANSACTION_HANDLER_KIND); + } + + if !message_handlers.is_empty() { + kinds.insert(MESSAGE_HANDLER_KIND); + } + + kinds + } + fn match_and_decode( &self, trigger: &::TriggerData, diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index 6c13e385c58..bd1855965a3 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -9,6 +9,7 @@ use graph::prelude::futures03::future::try_join; use graph::prelude::futures03::stream::FuturesOrdered; use graph::prelude::{Link, SubgraphManifestValidationError}; use graph::slog::{o, trace}; +use std::collections::HashSet; use std::num::NonZeroU32; use std::str::FromStr; use std::sync::Arc; @@ -35,6 +36,9 @@ use crate::trigger::{EthereumBlockTriggerType, EthereumTrigger, MappingTrigger}; // The recommended kind is `ethereum`, `ethereum/contract` is accepted for backwards compatibility. const ETHEREUM_KINDS: &[&str] = &["ethereum/contract", "ethereum"]; +const EVENT_HANDLER_KIND: &str = "event"; +const CALL_HANDLER_KIND: &str = "call"; +const BLOCK_HANDLER_KIND: &str = "block"; /// Runtime representation of a data source. // Note: Not great for memory usage that this needs to be `Clone`, considering how there may be tens @@ -106,6 +110,29 @@ impl blockchain::DataSource for DataSource { self.address.as_ref().map(|x| x.as_bytes()) } + fn handler_kinds(&self) -> HashSet<&str> { + let mut kinds = HashSet::new(); + + let Mapping { + event_handlers, + call_handlers, + block_handlers, + .. + } = &self.mapping; + + if !event_handlers.is_empty() { + kinds.insert(EVENT_HANDLER_KIND); + } + if !call_handlers.is_empty() { + kinds.insert(CALL_HANDLER_KIND); + } + for handler in block_handlers.iter() { + kinds.insert(handler.kind()); + } + + kinds + } + fn start_block(&self) -> BlockNumber { self.start_block } @@ -1090,6 +1117,19 @@ pub struct MappingBlockHandler { pub filter: Option, } +impl MappingBlockHandler { + pub fn kind(&self) -> &str { + match &self.filter { + Some(filter) => match filter { + BlockHandlerFilter::Call => "block_filter_call", + BlockHandlerFilter::Once => "block_filter_once", + BlockHandlerFilter::Polling { .. } => "block_filter_polling", + }, + None => BLOCK_HANDLER_KIND, + } + } +} + #[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] #[serde(tag = "kind", rename_all = "lowercase")] pub enum BlockHandlerFilter { diff --git a/chain/near/src/data_source.rs b/chain/near/src/data_source.rs index 51cf21e730e..7fcac3ca9a0 100644 --- a/chain/near/src/data_source.rs +++ b/chain/near/src/data_source.rs @@ -12,12 +12,15 @@ use graph::{ }, semver, }; +use std::collections::HashSet; use std::sync::Arc; use crate::chain::Chain; use crate::trigger::{NearTrigger, ReceiptWithOutcome}; pub const NEAR_KIND: &str = "near"; +const BLOCK_HANDLER_KIND: &str = "block"; +const RECEIPT_HANDLER_KIND: &str = "receipt"; /// Runtime representation of a data source. #[derive(Clone, Debug)] @@ -75,6 +78,20 @@ impl blockchain::DataSource for DataSource { self.source.start_block } + fn handler_kinds(&self) -> HashSet<&str> { + let mut kinds = HashSet::new(); + + if self.handler_for_block().is_some() { + kinds.insert(BLOCK_HANDLER_KIND); + } + + if self.handler_for_receipt().is_some() { + kinds.insert(RECEIPT_HANDLER_KIND); + } + + kinds + } + fn match_and_decode( &self, trigger: &::TriggerData, diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index 79df499a9ad..90912eeee4f 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use anyhow::{anyhow, Error}; use graph::{ @@ -24,7 +24,7 @@ const DYNAMIC_DATA_SOURCE_ERROR: &str = "Substreams do not support dynamic data const TEMPLATE_ERROR: &str = "Substreams do not support templates"; const ALLOWED_MAPPING_KIND: [&str; 1] = ["substreams/graph-entities"]; - +const SUBSTREAMS_HANDLER_KIND: &str = "substreams"; #[derive(Clone, Debug, PartialEq)] /// Represents the DataSource portion of the manifest once it has been parsed /// and the substream spkg has been downloaded + parsed. @@ -80,6 +80,11 @@ impl blockchain::DataSource for DataSource { None } + fn handler_kinds(&self) -> HashSet<&str> { + // This is placeholder, substreams do not have a handler kind. + vec![SUBSTREAMS_HANDLER_KIND].into_iter().collect() + } + // match_and_decode only seems to be used on the default trigger processor which substreams // bypasses so it should be fine to leave it unimplemented. fn match_and_decode( diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 03cf5903544..60be17026d2 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -9,7 +9,7 @@ use crate::{ use anyhow::Error; use async_trait::async_trait; use serde::Deserialize; -use std::{convert::TryFrom, sync::Arc}; +use std::{collections::HashSet, convert::TryFrom, sync::Arc}; use super::{ block_stream::{self, BlockStream, FirehoseCursor}, @@ -70,6 +70,10 @@ impl DataSource for MockDataSource { todo!() } + fn handler_kinds(&self) -> HashSet<&str> { + todo!() + } + fn name(&self) -> &str { todo!() } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 2d3ae31a6d6..9d74b4dabb7 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -37,7 +37,7 @@ use serde::{Deserialize, Serialize}; use slog::Logger; use std::{ any::Any, - collections::HashMap, + collections::{HashMap, HashSet}, fmt::{self, Debug}, str::FromStr, sync::Arc, @@ -275,6 +275,8 @@ pub trait DataSource: 'static + Sized + Send + Sync + Clone { } fn runtime(&self) -> Option>>; + fn handler_kinds(&self) -> HashSet<&str>; + /// Checks if `trigger` matches this data source, and if so decodes it into a `MappingTrigger`. /// A return of `Ok(None)` mean the trigger does not match. /// diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 95640121e78..52c56764db3 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -22,7 +22,11 @@ use anyhow::Error; use semver::Version; use serde::{de::IntoDeserializer as _, Deserialize, Deserializer}; use slog::{Logger, SendSyncRefUnwindSafeKV}; -use std::{collections::BTreeMap, fmt, sync::Arc}; +use std::{ + collections::{BTreeMap, HashSet}, + fmt, + sync::Arc, +}; use thiserror::Error; #[derive(Debug)] @@ -158,6 +162,13 @@ impl DataSource { } } + pub fn handler_kinds(&self) -> HashSet<&str> { + match self { + Self::Onchain(ds) => ds.handler_kinds(), + Self::Offchain(ds) => vec![ds.handler_kind()].into_iter().collect(), + } + } + pub fn match_and_decode( &self, trigger: &TriggerData, diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index 490b505bb46..f6a04735532 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -33,6 +33,8 @@ lazy_static! { .into_iter() .collect(); } + +const OFFCHAIN_HANDLER_KIND: &str = "offchain"; const NOT_DONE_VALUE: i32 = -1; #[derive(Debug, Clone, PartialEq, Eq)] @@ -158,6 +160,10 @@ impl DataSource { // required for each kind SPEC_VERSION_0_0_7 } + + pub fn handler_kind(&self) -> &str { + OFFCHAIN_HANDLER_KIND + } } impl DataSource { From f2d1d6fedd16781a3f34e739e3c6bdb765ddedd0 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Wed, 23 Aug 2023 21:45:17 +0530 Subject: [PATCH 0413/2104] graph, store: Add "handlers" to subgraph_features table --- graph/src/blockchain/mock.rs | 4 +- graph/src/data/subgraph/mod.rs | 13 ++++++ server/index-node/src/schema.graphql | 1 + .../down.sql | 2 + .../up.sql | 6 +++ store/postgres/src/primary.rs | 43 +++++++++++++------ store/test-store/tests/postgres/subgraph.rs | 4 ++ .../block-handlers/test/test.js | 33 ++++++++++++++ .../non-fatal-errors/test/test.js | 2 + 9 files changed, 94 insertions(+), 14 deletions(-) create mode 100644 store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/down.sql create mode 100644 store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/up.sql diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 60be17026d2..03eb80453d5 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -71,7 +71,9 @@ impl DataSource for MockDataSource { } fn handler_kinds(&self) -> HashSet<&str> { - todo!() + vec!["mock_handler_1", "mock_handler_2"] + .into_iter() + .collect() } fn name(&self) -> &str { diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index a587050bf0d..70f01bd91cf 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -507,6 +507,7 @@ pub struct DeploymentFeatures { pub features: Vec, pub data_source_kinds: Vec, pub network: String, + pub handler_kinds: Vec, } impl IntoValue for DeploymentFeatures { @@ -517,6 +518,7 @@ impl IntoValue for DeploymentFeatures { apiVersion: self.api_version, features: self.features, dataSources: self.data_source_kinds, + handlers: self.handler_kinds, network: self.network, } } @@ -695,6 +697,13 @@ impl SubgraphManifest { .map(|v| v.version().map(|v| v.to_string())) .flatten(); + let handler_kinds = self + .data_sources + .iter() + .map(|ds| ds.handler_kinds()) + .flatten() + .collect::>(); + let features: Vec = self .features .iter() @@ -722,6 +731,10 @@ impl SubgraphManifest { features, spec_version, data_source_kinds: data_source_kinds.into_iter().collect_vec(), + handler_kinds: handler_kinds + .into_iter() + .map(|s| s.to_string()) + .collect_vec(), network, } } diff --git a/server/index-node/src/schema.graphql b/server/index-node/src/schema.graphql index 823882ab2ab..4d7e0677934 100644 --- a/server/index-node/src/schema.graphql +++ b/server/index-node/src/schema.graphql @@ -148,6 +148,7 @@ type SubgraphFeatures { specVersion: String! features: [Feature!]! dataSources: [String!]! + handlers: [String!]! network: String } diff --git a/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/down.sql b/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/down.sql new file mode 100644 index 00000000000..4aad5cb0599 --- /dev/null +++ b/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/down.sql @@ -0,0 +1,2 @@ +alter table subgraphs.subgraph_features +drop column handlers; diff --git a/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/up.sql b/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/up.sql new file mode 100644 index 00000000000..5be75ae6c81 --- /dev/null +++ b/store/postgres/migrations/2023-08-23-143628_add_handlers_to_subgraph_features/up.sql @@ -0,0 +1,6 @@ +truncate table subgraphs.subgraph_features; + +alter table + subgraphs.subgraph_features +add + column handlers text [] not null default '{}'; \ No newline at end of file diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 5f640722565..d4c87cb003f 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -85,6 +85,7 @@ table! { api_version -> Nullable, features -> Array, data_sources -> Array, + handlers -> Array, network -> Text, } } @@ -1126,6 +1127,7 @@ impl<'a> Connection<'a> { f::api_version, f::features, f::data_sources, + f::handlers, f::network, )) .first::<( @@ -1134,18 +1136,22 @@ impl<'a> Connection<'a> { Option, Vec, Vec, + Vec, String, )>(conn) .optional()?; let features = features.map( - |(id, spec_version, api_version, features, data_sources, network)| DeploymentFeatures { - id, - spec_version, - api_version, - features, - data_source_kinds: data_sources, - network: network, + |(id, spec_version, api_version, features, data_sources, handlers, network)| { + DeploymentFeatures { + id, + spec_version, + api_version, + features, + data_source_kinds: data_sources, + handler_kinds: handlers, + network: network, + } }, ); @@ -1155,14 +1161,25 @@ impl<'a> Connection<'a> { pub fn create_subgraph_features(&self, features: DeploymentFeatures) -> Result<(), StoreError> { use subgraph_features as f; + let DeploymentFeatures { + id, + spec_version, + api_version, + features, + data_source_kinds, + handler_kinds, + network, + } = features; + let conn = self.conn.as_ref(); let changes = ( - f::id.eq(features.id), - f::spec_version.eq(features.spec_version), - f::api_version.eq(features.api_version), - f::features.eq(features.features), - f::data_sources.eq(features.data_source_kinds), - f::network.eq(features.network), + f::id.eq(id), + f::spec_version.eq(spec_version), + f::api_version.eq(api_version), + f::features.eq(features), + f::data_sources.eq(data_source_kinds), + f::handlers.eq(handler_kinds), + f::network.eq(network), ); insert_into(f::table) diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index a6b04dc4c77..68f372c051d 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -509,6 +509,7 @@ fn subgraph_features() { features, data_source_kinds, network, + handler_kinds, } = get_subgraph_features(id.to_string()).unwrap(); assert_eq!(NAME, subgraph_id.as_str()); @@ -523,6 +524,9 @@ fn subgraph_features() { features ); assert_eq!(1, data_source_kinds.len()); + assert_eq!(handler_kinds.len(), 2); + assert!(handler_kinds.contains(&"mock_handler_1".to_string())); + assert!(handler_kinds.contains(&"mock_handler_2".to_string())); test_store::remove_subgraph(&id); let features = get_subgraph_features(id.to_string()); diff --git a/tests/integration-tests/block-handlers/test/test.js b/tests/integration-tests/block-handlers/test/test.js index e5398a9fb67..b74fc45414e 100644 --- a/tests/integration-tests/block-handlers/test/test.js +++ b/tests/integration-tests/block-handlers/test/test.js @@ -4,6 +4,7 @@ const { system, patching } = require("gluegun"); const { createApolloFetch } = require("apollo-fetch"); const Web3 = require("web3"); +const assert = require("assert"); const Contract = artifacts.require("./Contract.sol"); const srcDir = path.join(__dirname, ".."); @@ -211,4 +212,36 @@ contract("Contract", (accounts) => { initializes: [{ id: "1", block: "1" }], }); }); + + it("test subgraphFeatures endpoint returns handlers correctly", async () => { + let meta = await fetchSubgraph({ + query: `{ _meta { deployment } }`, + }); + + let deployment = meta.data._meta.deployment; + console.log("deployment", deployment); + + let subgraph_features = await fetchSubgraphs({ + query: `query GetSubgraphFeatures($deployment: String!) { + subgraphFeatures(subgraphId: $deployment) { + specVersion + apiVersion + features + dataSources + network + handlers + } + }`, + variables: { deployment }, + }); + + expect(subgraph_features.data.subgraphFeatures.handlers) + .to.be.an("array") + .that.include.members([ + "block_filter_polling", + "block_filter_once", + "block", + "event", + ]); + }); }); diff --git a/tests/integration-tests/non-fatal-errors/test/test.js b/tests/integration-tests/non-fatal-errors/test/test.js index 0f68e2a6004..84bd6efc0c5 100644 --- a/tests/integration-tests/non-fatal-errors/test/test.js +++ b/tests/integration-tests/non-fatal-errors/test/test.js @@ -97,6 +97,7 @@ contract("Contract", (accounts) => { features dataSources network + handlers } }`, variables: { deployment }, @@ -108,6 +109,7 @@ contract("Contract", (accounts) => { apiVersion: "0.0.6", features: ["nonFatalErrors"], dataSources: ["ethereum/contract"], + handlers: ["block"], network: "test", }, }); From cb7d78e40ff05b4dd3e70eec195696c07206dc80 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Thu, 24 Aug 2023 11:54:54 +0530 Subject: [PATCH 0414/2104] docs: docs for subgraph_features table --- docs/implementation/metadata.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/implementation/metadata.md b/docs/implementation/metadata.md index a999c064a2a..562bc371e71 100644 --- a/docs/implementation/metadata.md +++ b/docs/implementation/metadata.md @@ -141,3 +141,16 @@ correctly across index node restarts. The table `subgraphs.table_stats` stores which tables for a deployment should have the 'account-like' optimization turned on. + +### `subgraphs.subgraph_features` + +Details about features that a deployment uses, Maintained in the primary. + +| Column | Type | Use | +|----------------|-----------|-------------| +| `id` | `text!` | primary key | +| `spec_version` | `text!` | | +| `api_version` | `text` | | +| `features` | `text[]!` | | +| `data_sources` | `text[]!` | | +| `handlers` | `text[]!` | | From 70a2c99eab1da27e970fe6fe22459fbd26417f28 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Fri, 15 Sep 2023 15:59:29 +0100 Subject: [PATCH 0415/2104] add trace_id to substreams logger (#4868) --- chain/substreams/src/mapper.rs | 7 ++++++- graph/src/blockchain/block_stream.rs | 2 +- graph/src/blockchain/substreams_block_stream.rs | 6 +++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 91707d3e138..9efa69bc5b0 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -3,6 +3,7 @@ use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; use graph::prelude::{async_trait, BlockHash, BlockNumber, BlockPtr, Logger}; +use graph::slog::o; use graph::substreams::Clock; use graph::substreams_rpc::response::Message as SubstreamsMessage; use prost::Message; @@ -13,10 +14,14 @@ pub struct Mapper {} impl SubstreamsMapper for Mapper { async fn to_block_stream_event( &self, - logger: &Logger, + logger: &mut Logger, message: Option, ) -> Result>, SubstreamsError> { match message { + Some(SubstreamsMessage::Session(session_init)) => { + *logger = logger.new(o!("trace_id" => session_init.trace_id)); + return Ok(None); + } Some(SubstreamsMessage::BlockUndoSignal(undo)) => { let valid_block = match undo.last_valid_block { Some(clock) => clock, diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 301b85f610a..6dfec8abbd6 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -316,7 +316,7 @@ pub trait FirehoseMapper: Send + Sync { pub trait SubstreamsMapper: Send + Sync { async fn to_block_stream_event( &self, - logger: &Logger, + logger: &mut Logger, response: Option, // adapter: &Arc>, // filter: &C::TriggerFilter, diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 19f767312b5..ebcb37a1328 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -180,7 +180,7 @@ fn stream_blocks>( try_stream! { let endpoint = client.firehose_endpoint()?; - let logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); + let mut logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); loop { info!( @@ -224,7 +224,7 @@ fn stream_blocks>( match process_substreams_response( response, mapper.as_ref(), - &logger, + &mut logger, ).await { Ok(block_response) => { match block_response { @@ -288,7 +288,7 @@ enum BlockResponse { async fn process_substreams_response>( result: Result, mapper: &F, - logger: &Logger, + logger: &mut Logger, ) -> Result>, Error> { let response = match result { Ok(v) => v, From 452bec516a6b76cd332dc238a1989af42d53f531 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 11 Sep 2023 16:15:29 -0700 Subject: [PATCH 0416/2104] Revert "graph, graphql: Add feature flag to disable child optimization" This reverts commit 9c65a8d8fa4daaff5150fb29c09f8289fe0e9757. --- graph/src/env/store.rs | 7 ------- graphql/src/store/prefetch.rs | 6 +----- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 161c1a81e86..48150df9f4c 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -109,10 +109,6 @@ pub struct EnvVarsStore { /// is 10_000 which corresponds to 10MB. Setting this to 0 disables /// write batching. pub write_batch_size: usize, - /// Disable the optimization that skips certain child queries for - /// entities. Only as a safety valve. Remove after 2023-09-30 if the - /// optimization has not caused any issues - pub disable_child_optimization: bool, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -154,7 +150,6 @@ impl From for EnvVarsStore { history_slack_factor: x.history_slack_factor.0, write_batch_duration: Duration::from_secs(x.write_batch_duration_in_secs), write_batch_size: x.write_batch_size * 1_000, - disable_child_optimization: x.disable_child_optimization.0, } } } @@ -208,8 +203,6 @@ pub struct InnerStore { write_batch_duration_in_secs: u64, #[envconfig(from = "GRAPH_STORE_WRITE_BATCH_SIZE", default = "10000")] write_batch_size: usize, - #[envconfig(from = "GRAPH_STORE_DISABLE_CHILD_OPTIMIZATION", default = "false")] - disable_child_optimization: EnvVarBoolean, } #[derive(Clone, Copy, Debug)] diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index ff6e1e5781e..c2531977e1e 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -730,11 +730,7 @@ fn fetch( // See if we can short-circuit query execution and just reuse what // we already have in memory. We could do this probably even with // multiple windows, but this covers the most common case. - if !ENV_VARS.store.disable_child_optimization - && windows.len() == 1 - && windows[0].link.has_child_ids() - && selects_id_only(field, &query) - { + if windows.len() == 1 && windows[0].link.has_child_ids() && selects_id_only(field, &query) { let mut windows = windows; // unwrap: we checked that len is 1 let window = windows.pop().unwrap(); From 215adc5ce379c46cfd5ff4008b853c45c1fe26d9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 11 Sep 2023 16:15:38 -0700 Subject: [PATCH 0417/2104] Revert "graph, graphql: Optimize away unneeded child queries" This reverts commit 2d6c531f6cdc96507e1b550af773eb3818d5f7a9. --- graph/src/components/store/mod.rs | 80 +------------------------------ graph/src/data/query/error.rs | 10 +--- graphql/src/store/prefetch.rs | 52 +++----------------- 3 files changed, 9 insertions(+), 133 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 136ea83d9bf..5f08bcce61c 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -30,7 +30,7 @@ use crate::blockchain::Block; use crate::components::store::write::EntityModification; use crate::data::store::scalar::Bytes; use crate::data::store::*; -use crate::data::value::{Object, Word}; +use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::schema::InputSchema; use crate::util::intern; @@ -483,84 +483,6 @@ pub enum EntityLink { Parent(EntityType, ParentLink), } -impl EntityLink { - /// Return a list of objects that have only the `id`, parent id, and - /// typename set using the child ids from `self` when `self` is - /// `Parent`. If `self` is `Direct`, return `None` - /// - /// The list that is returned is sorted and truncated to `first` many - /// entries. - /// - /// This makes it possible to avoid running a query when all that is - /// needed is the `id` of the children - pub fn to_basic_objects(self, parents: &Vec, first: usize) -> Option> { - use crate::data::value::Value as V; - - fn basic_object(entity_type: &EntityType, parent: &str, child: String) -> Object { - let mut obj = Vec::new(); - obj.push((ID.clone(), V::String(child))); - obj.push((Word::from("__typename"), V::String(entity_type.to_string()))); - obj.push((PARENT_ID.clone(), V::String(parent.to_string()))); - Object::from_iter(obj) - } - - fn basic_objects( - entity_type: &EntityType, - parent: &str, - children: Vec, - ) -> Vec { - children - .into_iter() - .map(|child| basic_object(entity_type, parent, child)) - .collect() - } - - fn obj_key<'a>(obj: &'a Object) -> Option<(&'a str, &'a str)> { - match (obj.get(&*PARENT_ID), obj.get(ID.as_str())) { - (Some(V::String(p)), Some(V::String(id))) => Some((p, id)), - _ => None, - } - } - - fn obj_cmp(a: &Object, b: &Object) -> std::cmp::Ordering { - obj_key(a).cmp(&obj_key(b)) - } - - match self { - EntityLink::Direct(_, _) => return None, - EntityLink::Parent(entity_type, link) => { - let mut objects = Vec::new(); - match link { - ParentLink::List(ids) => { - for (parent, children) in parents.iter().zip(ids) { - objects.extend(basic_objects(&entity_type, parent, children)); - } - } - ParentLink::Scalar(ids) => { - for (parent, child) in parents.iter().zip(ids) { - if let Some(child) = child { - objects.push(basic_object(&entity_type, parent, child)); - } - } - } - } - // Sort the objects by parent id and child id just as - // running a query would - objects.sort_by(obj_cmp); - objects.truncate(first); - Some(objects) - } - } - } - - pub fn has_child_ids(&self) -> bool { - match self { - EntityLink::Direct(_, _) => false, - EntityLink::Parent(_, _) => true, - } - } -} - /// Window results of an `EntityQuery` query along the parent's id: /// the `order_by`, `order_direction`, and `range` of the query apply to /// entities that belong to the same parent. Only entities that belong to diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index 4764b47c1af..23715f1d614 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -75,8 +75,6 @@ pub enum QueryExecutionError { InvalidSubgraphManifest, ResultTooBig(usize, usize), DeploymentNotFound(String), - IdMissing, - IdNotString, } impl QueryExecutionError { @@ -134,9 +132,7 @@ impl QueryExecutionError { | InvalidSubgraphManifest | ValidationError(_, _) | ResultTooBig(_, _) - | DeploymentNotFound(_) - | IdMissing - | IdNotString => false, + | DeploymentNotFound(_) => false, } } } @@ -283,9 +279,7 @@ impl fmt::Display for QueryExecutionError { SubgraphManifestResolveError(e) => write!(f, "failed to resolve subgraph manifest: {}", e), InvalidSubgraphManifest => write!(f, "invalid subgraph manifest file"), ResultTooBig(actual, limit) => write!(f, "the result size of {} is larger than the allowed limit of {}", actual, limit), - DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name), - IdMissing => write!(f, "Entity is missing an `id` attribute"), - IdNotString => write!(f, "Entity is missing an `id` attribute") + DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name) } } } diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index c2531977e1e..454a8817ff9 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -1,11 +1,12 @@ //! Run a GraphQL query and fetch all the entitied needed to build the //! final result +use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; -use graph::data::store::{ID, PARENT_ID}; +use graph::data::store::PARENT_ID; use graph::data::value::{Object, Word}; -use graph::prelude::{r, CacheWeight, CheapClone, EntityQuery, EntityRange}; +use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; use graph::util::cache_weight; use std::collections::BTreeMap; @@ -170,11 +171,11 @@ impl ValueExt for r::Value { } impl Node { - fn id(&self) -> Result { + fn id(&self) -> Result { match self.get("id") { - None => Err(QueryExecutionError::IdMissing), + None => Err(anyhow!("Entity is missing an `id` attribute")), Some(r::Value::String(s)) => Ok(s.clone()), - _ => Err(QueryExecutionError::IdNotString), + _ => Err(anyhow!("Entity has non-string `id` attribute")), } } @@ -657,30 +658,6 @@ fn execute_field( .map_err(|e| vec![e]) } -/// Check whether `field` only selects the `id` of its children and whether -/// it is safe to skip running `query` if we have all child ids in memory -/// already. -fn selects_id_only(field: &a::Field, query: &EntityQuery) -> bool { - if query.filter.is_some() || query.range.skip != 0 { - return false; - } - match &query.order { - EntityOrder::Ascending(attr, _) => { - if attr != ID.as_str() { - return false; - } - } - _ => { - return false; - } - } - field - .selection_set - .single_field() - .map(|field| field.name.as_str() == ID.as_str()) - .unwrap_or(false) -} - /// Query child entities for `parents` from the store. The `join` indicates /// in which child field to look for the parent's id/join field. When /// `is_single` is `true`, there is at most one child per parent. @@ -727,23 +704,6 @@ fn fetch( if windows.is_empty() { return Ok((vec![], Trace::None)); } - // See if we can short-circuit query execution and just reuse what - // we already have in memory. We could do this probably even with - // multiple windows, but this covers the most common case. - if windows.len() == 1 && windows[0].link.has_child_ids() && selects_id_only(field, &query) { - let mut windows = windows; - // unwrap: we checked that len is 1 - let window = windows.pop().unwrap(); - let parent_ids = parents - .iter() - .map(|parent| parent.id()) - .collect::>() - .map_err(QueryExecutionError::from)?; - // unwrap: we checked in the if condition that the window has child ids - let first = query.range.first.unwrap_or(EntityRange::FIRST) as usize; - let objs = window.link.to_basic_objects(&parent_ids, first).unwrap(); - return Ok((objs.into_iter().map(Node::from).collect(), Trace::None)); - } query.collection = EntityCollection::Window(windows); } resolver From 385fd3ff701027edd7e4f2ea3bf523fe3d297762 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 11 Sep 2023 16:15:51 -0700 Subject: [PATCH 0418/2104] Revert "graph, graphql, store: Fix ParentLink entries" This reverts commit 906f5e69881df1975115934a8715801d04f92e1e. --- graph/src/components/store/mod.rs | 2 +- graphql/src/store/prefetch.rs | 49 +++++++++---------- store/postgres/src/relational_queries.rs | 8 +-- .../tests/postgres/relational_bytes.rs | 2 +- 4 files changed, 25 insertions(+), 36 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 5f08bcce61c..1afed696c23 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -461,7 +461,7 @@ pub enum ParentLink { /// The parent stores the id of one child. The ith entry in the /// vector contains the id of the child of the parent with id /// `EntityWindow.ids[i]` - Scalar(Vec>), + Scalar(Vec), } /// How many children a parent can have when the child stores diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 454a8817ff9..4aafec966a5 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -296,12 +296,10 @@ impl<'a> JoinCond<'a> { // those and the parent ids let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() - .map(|(id, node)| { - ( - id, - node.get(child_field) - .and_then(|value| value.as_str().map(|s| s.to_string())), - ) + .filter_map(|(id, node)| { + node.get(child_field) + .and_then(|value| value.as_str()) + .map(|child_id| (id, child_id.to_owned())) }) .unzip(); @@ -313,28 +311,25 @@ impl<'a> JoinCond<'a> { // parent ids let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() - .map(|(id, node)| { - ( - id, - node.get(child_field) - .and_then(|value| match value { - r::Value::List(values) => { - let values: Vec<_> = values - .iter() - .filter_map(|value| { - value.as_str().map(|value| value.to_owned()) - }) - .collect(); - if values.is_empty() { - None - } else { - Some(values) - } + .filter_map(|(id, node)| { + node.get(child_field) + .and_then(|value| match value { + r::Value::List(values) => { + let values: Vec<_> = values + .iter() + .filter_map(|value| { + value.as_str().map(|value| value.to_owned()) + }) + .collect(); + if values.is_empty() { + None + } else { + Some(values) } - _ => None, - }) - .unwrap_or(Vec::new()), - ) + } + _ => None, + }) + .map(|child_ids| (id, child_ids)) }) .unzip(); (ids, ParentLink::List(child_ids)) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index ab28edb12df..57b558bc17e 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -2103,13 +2103,7 @@ enum ParentIds { impl ParentIds { fn new(link: ParentLink) -> Self { match link { - ParentLink::Scalar(child_ids) => { - // Remove `None` child ids; query generation doesn't require - // that parent and child ids are in strict 1:1 - // correspondence - let child_ids = child_ids.into_iter().filter_map(|c| c).collect(); - ParentIds::Scalar(child_ids) - } + ParentLink::Scalar(child_ids) => ParentIds::Scalar(child_ids), ParentLink::List(child_ids) => { // Postgres will only accept child_ids, which is a Vec> // if all Vec are the same length. We therefore pad diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index d6b41f08061..554abbd591b 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -555,7 +555,7 @@ fn query() { ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], link: EntityLink::Parent( THING.clone(), - ParentLink::Scalar(vec![Some(ROOT.to_owned()), Some(ROOT.to_owned())]), + ParentLink::Scalar(vec![ROOT.to_owned(), ROOT.to_owned()]), ), column_names: AttributeNames::All, }]); From 24bc7b0a232569ef11fba505efa06703fdeebaaa Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 20 Sep 2023 12:49:31 +0100 Subject: [PATCH 0419/2104] Test coverage for bundlr manifest + filename use case (#4865) --- graph/src/components/link_resolver.rs | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/graph/src/components/link_resolver.rs b/graph/src/components/link_resolver.rs index 0c51b030202..aff56bf3d40 100644 --- a/graph/src/components/link_resolver.rs +++ b/graph/src/components/link_resolver.rs @@ -141,3 +141,35 @@ pub enum ArweaveClientError { #[error("Unknown error")] Unknown(#[from] reqwest::Error), } + +#[cfg(test)] +mod test { + use serde_derive::Deserialize; + + use crate::{ + components::link_resolver::{ArweaveClient, ArweaveResolver}, + data_source::offchain::Base64, + }; + + // This test ensures that passing txid/filename works when the txid refers to manifest. + // the actual data seems to have some binary header and footer so these ranges were found + // by inspecting the data with hexdump. + #[tokio::test] + async fn fetch_bundler_url() { + let url = Base64::from("Rtdn3QWEzM88MPC2dpWyV5waO7Vuz3VwPl_usS2WoHM/DriveManifest.json"); + #[derive(Deserialize, Debug, PartialEq)] + struct Manifest { + pub manifest: String, + } + + let client = ArweaveClient::default(); + let no_header = &client.get(&url).await.unwrap()[1295..320078]; + let content: Manifest = serde_json::from_slice(no_header).unwrap(); + assert_eq!( + content, + Manifest { + manifest: "arweave/paths".to_string(), + } + ); + } +} From d0c721545d5be2e20f0836965951e46e3132b185 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 20 Sep 2023 13:09:06 +0100 Subject: [PATCH 0420/2104] Move the substreams block stream process to earlier in the pipeline when parallel processing is possible (#4851) --- Cargo.lock | 50 ++-- chain/ethereum/src/chain.rs | 13 + chain/near/src/chain.rs | 13 + chain/substreams/examples/substreams.rs | 2 +- chain/substreams/src/block_ingestor.rs | 2 +- chain/substreams/src/block_stream.rs | 25 +- chain/substreams/src/chain.rs | 34 ++- chain/substreams/src/mapper.rs | 271 +++++++++++++++++- chain/substreams/src/trigger.rs | 267 ++--------------- core/src/subgraph/runner.rs | 6 +- core/src/subgraph/stream.rs | 31 +- graph/Cargo.toml | 9 +- graph/src/blockchain/block_stream.rs | 32 ++- graph/src/blockchain/firehose_block_stream.rs | 10 +- graph/src/blockchain/mod.rs | 9 - graph/src/blockchain/polling_block_stream.rs | 8 +- .../src/blockchain/substreams_block_stream.rs | 8 +- graph/src/components/store/traits.rs | 6 + .../subgraph/proof_of_indexing/event.rs | 24 +- .../subgraph/proof_of_indexing/mod.rs | 62 ++-- graph/src/data/store/mod.rs | 12 +- graphql/Cargo.toml | 4 +- runtime/wasm/src/host_exports.rs | 44 +-- store/postgres/Cargo.toml | 2 +- store/postgres/src/writable.rs | 4 + store/test-store/tests/graph/entity_cache.rs | 4 + store/test-store/tests/postgres/store.rs | 6 +- tests/src/fixture/mod.rs | 31 +- 28 files changed, 599 insertions(+), 390 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f356795ae4..057c40dd1bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -605,12 +605,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "const_fn_assert" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27d614f23f34f7b5165a77dc1591f497e2518f9cec4b4f4b92bfc4dc6cf7a190" - [[package]] name = "constant_time_eq" version = "0.1.5" @@ -1239,7 +1233,7 @@ dependencies = [ "serde_json", "sha3", "thiserror", - "uint", + "uint 0.9.1", ] [[package]] @@ -1266,7 +1260,7 @@ dependencies = [ "impl-rlp", "impl-serde", "primitive-types", - "uint", + "uint 0.9.1", ] [[package]] @@ -1600,8 +1594,8 @@ dependencies = [ "slog-async", "slog-envlogger", "slog-term", - "stable-hash 0.3.3", - "stable-hash 0.4.2", + "stable-hash 0.3.4", + "stable-hash 0.4.4", "strum", "strum_macros", "thiserror", @@ -1773,8 +1767,8 @@ dependencies = [ "indexmap 2.0.0", "lazy_static", "parking_lot 0.12.1", - "stable-hash 0.3.3", - "stable-hash 0.4.2", + "stable-hash 0.3.4", + "stable-hash 0.4.4", ] [[package]] @@ -1962,7 +1956,7 @@ dependencies = [ "pretty_assertions", "rand", "serde", - "stable-hash 0.3.3", + "stable-hash 0.3.4", "uuid", ] @@ -2288,12 +2282,11 @@ dependencies = [ [[package]] name = "ibig" -version = "0.3.2" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5022ee7f7a2feb0bd2fdc4b8ec882cd14903cebf33e7c1847e3f3a282f8b7" +checksum = "d7d04a53d0ca4a37b47741ff98dd517a0e1e6a0ec22a72c748e50197052d595b" dependencies = [ "cfg-if 1.0.0", - "const_fn_assert", "num-traits", "rand", "static_assertions", @@ -3310,7 +3303,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", - "uint", + "uint 0.9.1", ] [[package]] @@ -4237,9 +4230,8 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stable-hash" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10196e68950ed99c0d2db7a30ffaf4dfe0bbf2f9af2ae0457ee8ad396e0a2dd7" +version = "0.3.4" +source = "git+https://github.com/graphprotocol/stable-hash?branch=old#7af76261e8098c58bfadd5b7c31810e1c0fdeccb" dependencies = [ "blake3 0.3.8", "firestorm 0.4.6", @@ -4251,9 +4243,8 @@ dependencies = [ [[package]] name = "stable-hash" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af75bd21beb162eab69de76abbb803d4111735ead00d5086dcc6f4ddb3b53cc9" +version = "0.4.4" +source = "git+https://github.com/graphprotocol/stable-hash?branch=main#e50aabef55b8c4de581ca5c4ffa7ed8beed7e998" dependencies = [ "blake3 0.3.8", "firestorm 0.5.0", @@ -4261,6 +4252,7 @@ dependencies = [ "lazy_static", "leb128", "num-traits", + "uint 0.8.5", "xxhash-rust", ] @@ -4992,6 +4984,18 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "uint" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9db035e67dfaf7edd9aebfe8676afcd63eed53c8a4044fed514c8cccf1835177" +dependencies = [ + "byteorder", + "crunchy", + "rustc-hex", + "static_assertions", +] + [[package]] name = "uint" version = "0.9.1" diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index c3ad5ca6861..1a125ad48a9 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -10,6 +10,7 @@ use graph::prelude::{ BlockHash, ComponentLoggerConfig, ElasticComponentLoggerConfig, EthereumBlock, EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, MetricsRegistry, }; +use graph::schema::InputSchema; use graph::{ blockchain::{ block_stream::{ @@ -102,6 +103,18 @@ impl BlockStreamBuilder for EthereumStreamBuilder { ))) } + async fn build_substreams( + &self, + _chain: &Chain, + _schema: Arc, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _subgraph_current_block: Option, + _filter: Arc<::TriggerFilter>, + ) -> Result>> { + unimplemented!() + } + async fn build_polling( &self, chain: &Chain, diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index b689493bdd9..f25c36b71df 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -9,6 +9,7 @@ use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::FirehoseEndpoint; use graph::prelude::{MetricsRegistry, TryFutureExt}; +use graph::schema::InputSchema; use graph::{ anyhow::Result, blockchain::{ @@ -40,6 +41,18 @@ pub struct NearStreamBuilder {} #[async_trait] impl BlockStreamBuilder for NearStreamBuilder { + async fn build_substreams( + &self, + _chain: &Chain, + _schema: Arc, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _subgraph_current_block: Option, + _filter: Arc<::TriggerFilter>, + ) -> Result>> { + unimplemented!() + } + async fn build_firehose( &self, chain: &Chain, diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index 619aaf0398f..b7caaf08dce 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -68,7 +68,7 @@ async fn main() -> Result<(), Error> { client, None, None, - Arc::new(Mapper {}), + Arc::new(Mapper { schema: None }), package.modules.clone(), module_name.to_string(), vec![12369621], diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs index 98a0729fa11..c1a047d7714 100644 --- a/chain/substreams/src/block_ingestor.rs +++ b/chain/substreams/src/block_ingestor.rs @@ -125,7 +125,7 @@ impl SubstreamsBlockIngestor { #[async_trait] impl BlockIngestor for SubstreamsBlockIngestor { async fn run(self: Box) { - let mapper = Arc::new(Mapper {}); + let mapper = Arc::new(Mapper { schema: None }); let mut latest_cursor = self.fetch_head_cursor().await; let mut backoff = ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index 736db1fcd7e..c0e14c9b9a4 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -12,6 +12,7 @@ use graph::{ components::store::DeploymentLocator, data::subgraph::UnifiedMappingApiVersion, prelude::{async_trait, BlockNumber, BlockPtr}, + schema::InputSchema, slog::o, }; @@ -30,17 +31,18 @@ impl BlockStreamBuilder { /// is very similar, so we can re-use the configuration and the builder for it. /// This is probably something to improve but for now it works. impl BlockStreamBuilderTrait for BlockStreamBuilder { - async fn build_firehose( + async fn build_substreams( &self, chain: &Chain, + schema: Arc, deployment: DeploymentLocator, block_cursor: FirehoseCursor, - _start_blocks: Vec, subgraph_current_block: Option, - filter: Arc, - _unified_api_version: UnifiedMappingApiVersion, + filter: Arc<::TriggerFilter>, ) -> Result>> { - let mapper = Arc::new(Mapper {}); + let mapper = Arc::new(Mapper { + schema: Some(schema), + }); let logger = chain .logger_factory @@ -62,6 +64,19 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { ))) } + async fn build_firehose( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _start_blocks: Vec, + _subgraph_current_block: Option, + _filter: Arc, + _unified_api_version: UnifiedMappingApiVersion, + ) -> Result>> { + unimplemented!() + } + async fn build_polling( &self, _chain: &Chain, diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index c1c3b15fbc8..0c05d7fea54 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -3,12 +3,11 @@ use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersA use anyhow::Error; use graph::blockchain::client::ChainClient; use graph::blockchain::{ - BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, EmptyNodeCapabilities, - NoopRuntimeAdapter, + BasicBlockchainBuilder, BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter, }; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, EntityKey}; use graph::firehose::FirehoseEndpoints; -use graph::prelude::{BlockHash, CheapClone, LoggerFactory, MetricsRegistry}; +use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; use graph::{ blockchain::{ self, @@ -20,13 +19,29 @@ use graph::{ prelude::{async_trait, BlockNumber, ChainStore}, slog::Logger, }; + use std::sync::Arc; +// ParsedChanges are an internal representation of the equivalent operations defined on the +// graph-out format used by substreams. +// Unset serves as a sentinel value, if for some reason an unknown value is sent or the value +// was empty then it's probably an unintended behaviour. This code was moved here for performance +// reasons, but the validation is still performed during trigger processing so while Unset will +// very likely just indicate an error somewhere, as far as the stream is concerned we just pass +// that along and let the downstream components deal with it. +#[derive(Debug, Clone)] +pub enum ParsedChanges { + Unset, + Delete(EntityKey), + Upsert { key: EntityKey, entity: Entity }, +} + #[derive(Default, Debug, Clone)] pub struct Block { pub hash: BlockHash, pub number: BlockNumber, pub changes: EntityChanges, + pub parsed_changes: Vec, } impl blockchain::Block for Block { @@ -112,19 +127,18 @@ impl Blockchain for Chain { &self, deployment: DeploymentLocator, store: impl DeploymentCursorTracker, - start_blocks: Vec, + _start_blocks: Vec, filter: Arc, - unified_api_version: UnifiedMappingApiVersion, + _unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { self.block_stream_builder - .build_firehose( + .build_substreams( self, + store.input_schema(), deployment, store.firehose_cursor(), - start_blocks, store.block_ptr(), filter, - unified_api_version, ) .await } @@ -177,7 +191,7 @@ impl Blockchain for Chain { } } -impl BlockchainBuilder for BasicBlockchainBuilder { +impl blockchain::BlockchainBuilder for BasicBlockchainBuilder { fn build(self) -> super::Chain { let BasicBlockchainBuilder { logger_factory, diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 9efa69bc5b0..49323c43469 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -1,14 +1,33 @@ -use crate::{Block, Chain, EntityChanges, TriggerData}; +use std::collections::HashMap; +use std::str::FromStr; +use std::sync::Arc; + +use crate::codec::entity_change; +use crate::{codec, Block, Chain, EntityChanges, ParsedChanges, TriggerData}; use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; -use graph::prelude::{async_trait, BlockHash, BlockNumber, BlockPtr, Logger}; +use graph::components::store::{EntityKey, EntityType}; +use graph::data::store::scalar::Bytes; +use graph::data::store::IdType; +use graph::data::value::Word; +use graph::data_source::CausalityRegion; +use graph::prelude::BigDecimal; +use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, BlockPtr, Logger, Value}; +use graph::schema::InputSchema; use graph::slog::o; use graph::substreams::Clock; use graph::substreams_rpc::response::Message as SubstreamsMessage; use prost::Message; -pub struct Mapper {} +// Mapper will transform the proto content coming from substreams in the graph-out format +// into the internal Block representation. If schema is passed then additional transformation +// into from the substreams block representation is performed into the Entity model used by +// the store. If schema is None then only the original block is passed. This None should only +// be used for block ingestion where entity content is empty and gets discarded. +pub struct Mapper { + pub schema: Option>, +} #[async_trait] impl SubstreamsMapper for Mapper { @@ -67,6 +86,15 @@ impl SubstreamsMapper for Mapper { }, }; + let parsed_changes = match self.schema.as_ref() { + Some(schema) => parse_changes(&changes, schema)?, + None => vec![], + }; + let mut triggers = vec![]; + if changes.entity_changes.len() >= 1 { + triggers.push(TriggerData {}); + } + // Even though the trigger processor for substreams doesn't care about TriggerData // there are a bunch of places in the runner that check if trigger data // empty and skip processing if so. This will prolly breakdown @@ -80,8 +108,9 @@ impl SubstreamsMapper for Mapper { hash, number, changes, + parsed_changes, }, - vec![TriggerData {}], + triggers, logger, ), FirehoseCursor::from(cursor.clone()), @@ -94,3 +123,237 @@ impl SubstreamsMapper for Mapper { } } } + +fn parse_changes( + changes: &EntityChanges, + schema: &Arc, +) -> anyhow::Result> { + let mut parsed_changes = vec![]; + for entity_change in changes.entity_changes.iter() { + let mut parsed_data: HashMap = HashMap::default(); + let entity_type = EntityType::new(entity_change.entity.to_string()); + + // Make sure that the `entity_id` gets set to a value + // that is safe for roundtrips through the database. In + // particular, if the type of the id is `Bytes`, we have + // to make sure that the `entity_id` starts with `0x` as + // that will be what the key for such an entity have + // when it is read from the database. + // + // Needless to say, this is a very ugly hack, and the + // real fix is what's described in [this + // issue](https://github.com/graphprotocol/graph-node/issues/4663) + let entity_id: String = match schema.id_type(&entity_type)? { + IdType::String => entity_change.id.clone(), + IdType::Bytes => { + if entity_change.id.starts_with("0x") { + entity_change.id.clone() + } else { + format!("0x{}", entity_change.id) + } + } + }; + let key = EntityKey { + entity_type, + entity_id: Word::from(entity_id), + causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data + }; + let id = schema.id_value(&key)?; + parsed_data.insert(Word::from("id"), id); + + let changes = match entity_change.operation() { + entity_change::Operation::Create | entity_change::Operation::Update => { + for field in entity_change.fields.iter() { + let new_value: &codec::value::Typed = match &field.new_value { + Some(codec::Value { + typed: Some(new_value), + }) => &new_value, + _ => continue, + }; + + let value: Value = decode_value(new_value)?; + *parsed_data + .entry(Word::from(field.name.as_str())) + .or_insert(Value::Null) = value; + } + let entity = schema + .make_entity(parsed_data) + .map_err(anyhow::Error::from)?; + + ParsedChanges::Upsert { key, entity } + } + entity_change::Operation::Delete => ParsedChanges::Delete(key), + entity_change::Operation::Unset => ParsedChanges::Unset, + }; + parsed_changes.push(changes); + } + + Ok(parsed_changes) +} + +fn decode_value(value: &crate::codec::value::Typed) -> anyhow::Result { + use codec::value::Typed; + + match value { + Typed::Int32(new_value) => Ok(Value::Int(*new_value)), + + Typed::Bigdecimal(new_value) => BigDecimal::from_str(new_value) + .map(Value::BigDecimal) + .map_err(|err| anyhow::Error::from(err)), + + Typed::Bigint(new_value) => BigInt::from_str(new_value) + .map(Value::BigInt) + .map_err(|err| anyhow::Error::from(err)), + + Typed::String(new_value) => { + let mut string = new_value.clone(); + + // Strip null characters since they are not accepted by Postgres. + if string.contains('\u{0000}') { + string = string.replace('\u{0000}', ""); + } + Ok(Value::String(string)) + } + + Typed::Bytes(new_value) => base64::decode(new_value) + .map(|bs| Value::Bytes(Bytes::from(bs))) + .map_err(|err| anyhow::Error::from(err)), + + Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), + + Typed::Array(arr) => arr + .value + .iter() + .filter_map(|item| item.typed.as_ref().map(decode_value)) + .collect::>>() + .map(Value::List), + } +} + +#[cfg(test)] +mod test { + use std::{ops::Add, str::FromStr}; + + use super::decode_value; + use crate::codec::value::Typed; + use crate::codec::{Array, Value}; + use graph::{ + data::store::scalar::Bytes, + prelude::{BigDecimal, BigInt, Value as GraphValue}, + }; + + #[test] + fn validate_substreams_field_types() { + struct Case { + name: String, + value: Value, + expected_value: GraphValue, + } + + let cases = vec![ + Case { + name: "string value".to_string(), + value: Value { + typed: Some(Typed::String( + "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d" + .to_string(), + )), + }, + expected_value: GraphValue::String( + "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d".to_string(), + ), + }, + Case { + name: "bytes value".to_string(), + value: Value { + typed: Some(Typed::Bytes( + base64::encode( + hex::decode( + "445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", + ) + .unwrap(), + ) + .into_bytes(), + )), + }, + expected_value: GraphValue::Bytes( + Bytes::from_str( + "0x445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", + ) + .unwrap(), + ), + }, + Case { + name: "int value for block".to_string(), + value: Value { + typed: Some(Typed::Int32(12369760)), + }, + expected_value: GraphValue::Int(12369760), + }, + Case { + name: "negative int value".to_string(), + value: Value { + typed: Some(Typed::Int32(-12369760)), + }, + expected_value: GraphValue::Int(-12369760), + }, + Case { + name: "big int".to_string(), + value: Value { + typed: Some(Typed::Bigint("123".to_string())), + }, + expected_value: GraphValue::BigInt(BigInt::from(123u64)), + }, + Case { + name: "big int > u64".to_string(), + value: Value { + typed: Some(Typed::Bigint( + BigInt::from(u64::MAX).add(BigInt::from(1)).to_string(), + )), + }, + expected_value: GraphValue::BigInt(BigInt::from(u64::MAX).add(BigInt::from(1))), + }, + Case { + name: "big decimal value".to_string(), + value: Value { + typed: Some(Typed::Bigdecimal("3133363633312e35".to_string())), + }, + expected_value: GraphValue::BigDecimal(BigDecimal::new( + BigInt::from(3133363633312u64), + 35, + )), + }, + Case { + name: "bool value".to_string(), + value: Value { + typed: Some(Typed::Bool(true)), + }, + expected_value: GraphValue::Bool(true), + }, + Case { + name: "string array".to_string(), + value: Value { + typed: Some(Typed::Array(Array { + value: vec![ + Value { + typed: Some(Typed::String("1".to_string())), + }, + Value { + typed: Some(Typed::String("2".to_string())), + }, + Value { + typed: Some(Typed::String("3".to_string())), + }, + ], + })), + }, + expected_value: GraphValue::List(vec!["1".into(), "2".into(), "3".into()]), + }, + ]; + + for case in cases.into_iter() { + let value: GraphValue = decode_value(&case.value.typed.unwrap()).unwrap(); + assert_eq!(case.expected_value, value, "failed case: {}", case.name) + } + } +} diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index d3aeb91c020..b74bc0046eb 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, str::FromStr, sync::Arc}; +use std::sync::Arc; use anyhow::Error; use graph::{ @@ -6,17 +6,12 @@ use graph::{ self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities, MappingTriggerTrait, }, components::{ - store::{DeploymentLocator, EntityKey, EntityType, SubgraphFork}, + store::{DeploymentLocator, SubgraphFork}, subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, }, - data::{ - store::{scalar::Bytes, IdType}, - value::Word, - }, - data_source::{self, CausalityRegion}, + data_source::{self}, prelude::{ - anyhow, async_trait, BigDecimal, BigInt, BlockHash, BlockNumber, BlockState, - RuntimeHostBuilder, Value, + anyhow, async_trait, BlockHash, BlockNumber, BlockState, CheapClone, RuntimeHostBuilder, }, slog::Logger, substreams::Modules, @@ -24,8 +19,7 @@ use graph::{ use graph_runtime_wasm::module::ToAscPtr; use lazy_static::__Deref; -use crate::codec; -use crate::{codec::entity_change::Operation, Block, Chain, NoopDataSourceTemplate}; +use crate::{Block, Chain, NoopDataSourceTemplate, ParsedChanges}; #[derive(Eq, PartialEq, PartialOrd, Ord, Debug)] pub struct TriggerData {} @@ -189,95 +183,37 @@ where _subgraph_metrics: &Arc, _instrument: bool, ) -> Result, MappingError> { - for entity_change in block.changes.entity_changes.iter() { - match entity_change.operation() { - Operation::Unset => { + for parsed_change in block.parsed_changes.clone().into_iter() { + match parsed_change { + ParsedChanges::Unset => { // Potentially an issue with the server side or // we are running an outdated version. In either case we should abort. return Err(MappingError::Unknown(anyhow!("Detected UNSET entity operation, either a server error or there's a new type of operation and we're running an outdated protobuf"))); } - Operation::Create | Operation::Update => { - let schema = state.entity_cache.schema.as_ref(); - let entity_type = EntityType::new(entity_change.entity.to_string()); - // Make sure that the `entity_id` gets set to a value - // that is safe for roundtrips through the database. In - // particular, if the type of the id is `Bytes`, we have - // to make sure that the `entity_id` starts with `0x` as - // that will be what the key for such an entity have - // when it is read from the database. - // - // Needless to say, this is a very ugly hack, and the - // real fix is what's described in [this - // issue](https://github.com/graphprotocol/graph-node/issues/4663) - let entity_id: String = match schema.id_type(&entity_type)? { - IdType::String => entity_change.id.clone(), - IdType::Bytes => { - if entity_change.id.starts_with("0x") { - entity_change.id.clone() - } else { - format!("0x{}", entity_change.id) - } - } - }; - - let mut data: HashMap = HashMap::from_iter(vec![]); - for field in entity_change.fields.iter() { - let new_value: &codec::value::Typed = match &field.new_value { - Some(codec::Value { - typed: Some(new_value), - }) => new_value, - _ => continue, - }; - - let value: Value = decode_value(new_value)?; - *data - .entry(Word::from(field.name.as_str())) - .or_insert(Value::Null) = value; - } - + ParsedChanges::Upsert { key, entity } => { write_poi_event( proof_of_indexing, &ProofOfIndexingEvent::SetEntity { - entity_type: entity_type.as_str(), - id: &entity_id, - // TODO: This should be an entity so we do not have to build the intermediate HashMap - data: &data, + entity_type: key.entity_type.as_str(), + id: &key.entity_id, + data: &entity, }, causality_region, logger, ); - let key = EntityKey { - entity_type: entity_type, - entity_id: Word::from(entity_id), - causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data - }; - - let id = state.entity_cache.schema.id_value(&key)?; - data.insert(Word::from("id"), id); - - let entity = state.entity_cache.make_entity(data).map_err(|err| { - MappingError::Unknown(anyhow!("Failed to make entity: {}", err)) - })?; - state.entity_cache.set(key, entity)?; } - Operation::Delete => { - let entity_type: &str = &entity_change.entity; - let entity_id: String = entity_change.id.clone(); - let key = EntityKey { - entity_type: EntityType::new(entity_type.to_string()), - entity_id: entity_id.clone().into(), - causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data - }; - - state.entity_cache.remove(key); + ParsedChanges::Delete(entity_key) => { + let entity_type = entity_key.entity_type.cheap_clone(); + let id = entity_key.entity_id.clone(); + state.entity_cache.remove(entity_key); write_poi_event( proof_of_indexing, &ProofOfIndexingEvent::RemoveEntity { - entity_type, - id: &entity_id, + entity_type: entity_type.as_str(), + id: id.as_str(), }, causality_region, logger, @@ -289,170 +225,3 @@ where Ok(state) } } - -fn decode_value(value: &crate::codec::value::Typed) -> Result { - use codec::value::Typed; - - match value { - Typed::Int32(new_value) => Ok(Value::Int(*new_value)), - - Typed::Bigdecimal(new_value) => BigDecimal::from_str(new_value) - .map(Value::BigDecimal) - .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), - - Typed::Bigint(new_value) => BigInt::from_str(new_value) - .map(Value::BigInt) - .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), - - Typed::String(new_value) => { - let mut string = new_value.clone(); - - // Strip null characters since they are not accepted by Postgres. - if string.contains('\u{0000}') { - string = string.replace('\u{0000}', ""); - } - Ok(Value::String(string)) - } - - Typed::Bytes(new_value) => base64::decode(new_value) - .map(|bs| Value::Bytes(Bytes::from(bs))) - .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), - - Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), - - Typed::Array(arr) => arr - .value - .iter() - .filter_map(|item| item.typed.as_ref().map(decode_value)) - .collect::, MappingError>>() - .map(Value::List), - } -} - -#[cfg(test)] -mod test { - use std::{ops::Add, str::FromStr}; - - use crate::codec::value::Typed; - use crate::codec::{Array, Value}; - use crate::trigger::decode_value; - use graph::{ - data::store::scalar::Bytes, - prelude::{BigDecimal, BigInt, Value as GraphValue}, - }; - - #[test] - fn validate_substreams_field_types() { - struct Case { - name: String, - value: Value, - expected_value: GraphValue, - } - - let cases = vec![ - Case { - name: "string value".to_string(), - value: Value { - typed: Some(Typed::String( - "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d" - .to_string(), - )), - }, - expected_value: GraphValue::String( - "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d".to_string(), - ), - }, - Case { - name: "bytes value".to_string(), - value: Value { - typed: Some(Typed::Bytes( - base64::encode( - hex::decode( - "445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", - ) - .unwrap(), - ) - .into_bytes(), - )), - }, - expected_value: GraphValue::Bytes( - Bytes::from_str( - "0x445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", - ) - .unwrap(), - ), - }, - Case { - name: "int value for block".to_string(), - value: Value { - typed: Some(Typed::Int32(12369760)), - }, - expected_value: GraphValue::Int(12369760), - }, - Case { - name: "negative int value".to_string(), - value: Value { - typed: Some(Typed::Int32(-12369760)), - }, - expected_value: GraphValue::Int(-12369760), - }, - Case { - name: "big int".to_string(), - value: Value { - typed: Some(Typed::Bigint("123".to_string())), - }, - expected_value: GraphValue::BigInt(BigInt::from(123u64)), - }, - Case { - name: "big int > u64".to_string(), - value: Value { - typed: Some(Typed::Bigint( - BigInt::from(u64::MAX).add(BigInt::from(1)).to_string(), - )), - }, - expected_value: GraphValue::BigInt(BigInt::from(u64::MAX).add(BigInt::from(1))), - }, - Case { - name: "big decimal value".to_string(), - value: Value { - typed: Some(Typed::Bigdecimal("3133363633312e35".to_string())), - }, - expected_value: GraphValue::BigDecimal(BigDecimal::new( - BigInt::from(3133363633312u64), - 35, - )), - }, - Case { - name: "bool value".to_string(), - value: Value { - typed: Some(Typed::Bool(true)), - }, - expected_value: GraphValue::Bool(true), - }, - Case { - name: "string array".to_string(), - value: Value { - typed: Some(Typed::Array(Array { - value: vec![ - Value { - typed: Some(Typed::String("1".to_string())), - }, - Value { - typed: Some(Typed::String("2".to_string())), - }, - Value { - typed: Some(Typed::String("3".to_string())), - }, - ], - })), - }, - expected_value: GraphValue::List(vec!["1".into(), "2".into(), "3".into()]), - }, - ]; - - for case in cases.into_iter() { - let value: GraphValue = decode_value(&case.value.typed.unwrap()).unwrap(); - assert_eq!(case.expected_value, value, "failed case: {}", case.name) - } - } -} diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index ce01d79490d..00baf22ecd4 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -6,7 +6,9 @@ use crate::subgraph::stream::new_block_stream; use atomic_refcell::AtomicRefCell; use graph::blockchain::block_stream::{BlockStreamEvent, BlockWithTriggers, FirehoseCursor}; use graph::blockchain::{Block, Blockchain, DataSource as _, TriggerFilter as _}; -use graph::components::store::{EmptyStore, EntityKey, GetScope, StoredDynamicDataSource}; +use graph::components::store::{ + EmptyStore, EntityKey, GetScope, ReadStore, StoredDynamicDataSource, +}; use graph::components::{ store::ModificationsAndCache, subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, @@ -723,7 +725,7 @@ where for trigger in triggers { // Using an `EmptyStore` and clearing the cache for each trigger is a makeshift way to // get causality region isolation. - let schema = self.inputs.store.input_schema(); + let schema = ReadStore::input_schema(&self.inputs.store); let mut block_state = BlockState::::new(EmptyStore::new(schema), LfuCache::new()); // PoI ignores offchain events. diff --git a/core/src/subgraph/stream.rs b/core/src/subgraph/stream.rs index 2f6253acfbd..c1d767e3fcf 100644 --- a/core/src/subgraph/stream.rs +++ b/core/src/subgraph/stream.rs @@ -1,12 +1,10 @@ use crate::subgraph::inputs::IndexingInputs; +use anyhow::bail; use graph::blockchain::block_stream::{BlockStream, BufferedBlockStream}; use graph::blockchain::Blockchain; use graph::prelude::{CheapClone, Error, SubgraphInstanceMetrics}; use std::sync::Arc; -const BUFFERED_BLOCK_STREAM_SIZE: usize = 100; -const BUFFERED_FIREHOSE_STREAM_SIZE: usize = 1; - pub async fn new_block_stream( inputs: &IndexingInputs, filter: &C::TriggerFilter, @@ -14,12 +12,7 @@ pub async fn new_block_stream( ) -> Result>, Error> { let is_firehose = inputs.chain.chain_client().is_firehose(); - let buffer_size = match is_firehose { - true => BUFFERED_FIREHOSE_STREAM_SIZE, - false => BUFFERED_BLOCK_STREAM_SIZE, - }; - - let block_stream = inputs + match inputs .chain .new_block_stream( inputs.deployment.clone(), @@ -28,13 +21,17 @@ pub async fn new_block_stream( Arc::new(filter.clone()), inputs.unified_api_version.clone(), ) - .await; - if is_firehose && block_stream.is_err() { - metrics.firehose_connection_errors.inc(); + .await + { + Ok(block_stream) => Ok(BufferedBlockStream::spawn_from_stream( + block_stream.buffer_size_hint(), + block_stream, + )), + Err(e) => { + if is_firehose { + metrics.firehose_connection_errors.inc(); + } + bail!(e); + } } - - Ok(BufferedBlockStream::spawn_from_stream( - block_stream?, - buffer_size, - )) } diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 6726dc881a5..4c535afb43c 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -26,7 +26,7 @@ graphql-parser = "0.4.0" lazy_static = "1.4.0" num-bigint = { version = "^0.2.6", features = ["serde"] } num_cpus = "1.16.0" -num-traits = "0.2.16" +num-traits = "=0.2.16" rand = "0.8.4" regex = "1.5.4" semver = { version = "1.0.18", features = ["serde"] } @@ -36,8 +36,11 @@ serde_json = { version = "1.0", features = ["arbitrary_precision"] } serde_regex = "1.1.0" serde_yaml = "0.9.21" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } -stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } -stable-hash = { version = "0.4.2" } +# TODO: This should be reverted to the latest version once it's published +# stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } +# stable-hash = { version = "0.4.2" } +stable-hash = { git = "https://github.com/graphprotocol/stable-hash", branch = "main"} +stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", branch = "old", package = "stable-hash" } strum = "0.21.0" strum_macros = "0.25.2" slog-async = "2.5.0" diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 6dfec8abbd6..e5fbacb7d1c 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -11,17 +11,22 @@ use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; +use crate::schema::InputSchema; use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; +pub const BUFFERED_BLOCK_STREAM_SIZE: usize = 100; +pub const FIREHOSE_BUFFER_STREAM_SIZE: usize = 1; +pub const SUBSTREAMS_BUFFER_STREAM_SIZE: usize = 100; + pub struct BufferedBlockStream { inner: Pin, Error>> + Send>>, } impl BufferedBlockStream { pub fn spawn_from_stream( - stream: Box>, size_hint: usize, + stream: Box>, ) -> Box> { let (sender, receiver) = mpsc::channel::, Error>>(size_hint); crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await }); @@ -66,7 +71,11 @@ impl BufferedBlockStream { } } -impl BlockStream for BufferedBlockStream {} +impl BlockStream for BufferedBlockStream { + fn buffer_size_hint(&self) -> usize { + unreachable!() + } +} impl Stream for BufferedBlockStream { type Item = Result, Error>; @@ -82,6 +91,7 @@ impl Stream for BufferedBlockStream { pub trait BlockStream: Stream, Error>> + Unpin + Send { + fn buffer_size_hint(&self) -> usize; } /// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added @@ -111,6 +121,16 @@ pub trait BlockStreamBuilder: Send + Sync { unified_api_version: UnifiedMappingApiVersion, ) -> Result>>; + async fn build_substreams( + &self, + chain: &C, + schema: Arc, + deployment: DeploymentLocator, + block_cursor: FirehoseCursor, + subgraph_current_block: Option, + filter: Arc, + ) -> Result>>; + async fn build_polling( &self, chain: &C, @@ -463,7 +483,11 @@ mod test { number: u64, } - impl BlockStream for TestStream {} + impl BlockStream for TestStream { + fn buffer_size_hint(&self) -> usize { + 1 + } + } impl Stream for TestStream { type Item = Result, Error>; @@ -495,7 +519,7 @@ mod test { }); let guard = SharedCancelGuard::new(); - let mut stream = BufferedBlockStream::spawn_from_stream(stream, buffer_size) + let mut stream = BufferedBlockStream::spawn_from_stream(buffer_size, stream) .map_err(CancelableError::Error) .cancelable(&guard, || Err(CancelableError::Cancel)); diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index 93e44f31336..a25f268a358 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -1,4 +1,6 @@ -use super::block_stream::{BlockStream, BlockStreamEvent, FirehoseMapper}; +use super::block_stream::{ + BlockStream, BlockStreamEvent, FirehoseMapper, FIREHOSE_BUFFER_STREAM_SIZE, +}; use super::client::ChainClient; use super::{Blockchain, TriggersAdapter}; use crate::blockchain::block_stream::FirehoseCursor; @@ -421,7 +423,11 @@ impl Stream for FirehoseBlockStream { } } -impl BlockStream for FirehoseBlockStream {} +impl BlockStream for FirehoseBlockStream { + fn buffer_size_hint(&self) -> usize { + FIREHOSE_BUFFER_STREAM_SIZE + } +} fn must_check_subgraph_continuity( logger: &Logger, diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 9d74b4dabb7..a0352de8aaf 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -136,15 +136,6 @@ impl ChainStoreBlock { } } -// // ChainClient represents the type of client used to ingest data from the chain. For most chains -// // this will be either firehose or some sort of rpc client. -// // If a specific chain requires more than one adapter this should be handled by the chain specifically -// // as it's not common behavior across chains. -// pub enum ChainClient { -// Firehose(FirehoseEndpoints), -// Rpc(C::Client), -// } - #[async_trait] // This is only `Debug` because some tests require that pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { diff --git a/graph/src/blockchain/polling_block_stream.rs b/graph/src/blockchain/polling_block_stream.rs index daebeef2bd4..a9c52294c18 100644 --- a/graph/src/blockchain/polling_block_stream.rs +++ b/graph/src/blockchain/polling_block_stream.rs @@ -9,7 +9,7 @@ use std::time::Duration; use super::block_stream::{ BlockStream, BlockStreamEvent, BlockWithTriggers, ChainHeadUpdateStream, FirehoseCursor, - TriggersAdapter, + TriggersAdapter, BUFFERED_BLOCK_STREAM_SIZE, }; use super::{Block, BlockPtr, Blockchain}; @@ -463,7 +463,11 @@ where } } -impl BlockStream for PollingBlockStream {} +impl BlockStream for PollingBlockStream { + fn buffer_size_hint(&self) -> usize { + BUFFERED_BLOCK_STREAM_SIZE + } +} impl Stream for PollingBlockStream { type Item = Result, Error>; diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index ebcb37a1328..df94e58a76e 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -1,4 +1,4 @@ -use super::block_stream::SubstreamsMapper; +use super::block_stream::{SubstreamsMapper, SUBSTREAMS_BUFFER_STREAM_SIZE}; use super::client::ChainClient; use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; use crate::blockchain::Blockchain; @@ -321,4 +321,8 @@ impl Stream for SubstreamsBlockStream { } } -impl BlockStream for SubstreamsBlockStream {} +impl BlockStream for SubstreamsBlockStream { + fn buffer_size_hint(&self) -> usize { + SUBSTREAMS_BUFFER_STREAM_SIZE + } +} diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 636561d5173..eed14e4266d 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -249,6 +249,8 @@ impl ReadStore for Arc { } pub trait DeploymentCursorTracker: Sync + Send + 'static { + fn input_schema(&self) -> Arc; + /// Get a pointer to the most recently processed block in the subgraph. fn block_ptr(&self) -> Option; @@ -266,6 +268,10 @@ impl DeploymentCursorTracker for Arc { fn firehose_cursor(&self) -> FirehoseCursor { (**self).firehose_cursor() } + + fn input_schema(&self) -> Arc { + (**self).input_schema() + } } /// A view of the store for indexing. All indexing-related operations need diff --git a/graph/src/components/subgraph/proof_of_indexing/event.rs b/graph/src/components/subgraph/proof_of_indexing/event.rs index 6a7ae64a70b..4fc2e90c171 100644 --- a/graph/src/components/subgraph/proof_of_indexing/event.rs +++ b/graph/src/components/subgraph/proof_of_indexing/event.rs @@ -1,7 +1,7 @@ -use crate::data::value::Word; -use crate::prelude::{impl_slog_value, Value}; +use crate::components::subgraph::Entity; +use crate::prelude::impl_slog_value; use stable_hash_legacy::StableHasher; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::fmt; use strum_macros::IntoStaticStr; @@ -13,7 +13,7 @@ pub enum ProofOfIndexingEvent<'a> { SetEntity { entity_type: &'a str, id: &'a str, - data: &'a HashMap, + data: &'a Entity, }, /// For when a deterministic error has happened. /// @@ -77,7 +77,9 @@ impl stable_hash_legacy::StableHash for ProofOfIndexingEvent<'_> { } => { entity_type.stable_hash(sequence_number.next_child(), state); id.stable_hash(sequence_number.next_child(), state); - data.stable_hash(sequence_number.next_child(), state); + + stable_hash_legacy::utils::AsUnorderedSet(*data) + .stable_hash(sequence_number.next_child(), state); } DeterministicError { redacted_events } => { redacted_events.stable_hash(sequence_number.next_child(), state) @@ -103,7 +105,8 @@ impl stable_hash::StableHash for ProofOfIndexingEvent<'_> { } => { entity_type.stable_hash(field_address.child(0), state); id.stable_hash(field_address.child(1), state); - data.stable_hash(field_address.child(2), state); + stable_hash::utils::AsUnorderedSet(*data) + .stable_hash::<_>(field_address.child(2), state); 2 } Self::DeterministicError { redacted_events } => { @@ -135,7 +138,14 @@ impl fmt::Debug for ProofOfIndexingEvent<'_> { } => { builder.field("entity_type", entity_type); builder.field("id", id); - builder.field("data", &data.iter().collect::>()); + builder.field( + "data", + &data + .sorted_ref() + .iter() + .cloned() + .collect::>(), + ); } Self::DeterministicError { redacted_events } => { builder.field("redacted_events", redacted_events); diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 457f39c7514..2569f7dac93 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -35,7 +35,10 @@ pub type SharedProofOfIndexing = Option>>; #[cfg(test)] mod tests { use super::*; - use crate::prelude::{BlockPtr, DeploymentHash, Value}; + use crate::{ + prelude::{BlockPtr, DeploymentHash, Value}, + schema::InputSchema, + }; use maplit::hashmap; use online::ProofOfIndexingFinisher; use reference::*; @@ -110,8 +113,8 @@ mod tests { let online = hex::encode(finisher.finish()); let offline = hex::encode(offline); - assert_eq!(&online, &offline); - assert_eq!(&online, hardcoded); + assert_eq!(&online, &offline, "case: {}", case.name); + assert_eq!(&online, hardcoded, "case: {}", case.name); if let Some(prev) = cache.insert(offline, case.name) { panic!("Found conflict for case: {} == {}", case.name, prev); @@ -130,14 +133,35 @@ mod tests { /// in each case the reference and online versions match #[test] fn online_vs_reference() { - let data = hashmap! { - "val".into() => Value::Int(1) - }; - let data_empty = hashmap! {}; - let data2 = hashmap! { - "key".into() => Value::String("s".to_owned()), - "null".into() => Value::Null, - }; + let id = DeploymentHash::new("Qm123").unwrap(); + + let data_schema = + InputSchema::parse("type User @entity { id: String!, val: Int }", id.clone()).unwrap(); + let data = data_schema + .make_entity(hashmap! { + "id".into() => Value::String("id".to_owned()), + "val".into() => Value::Int(1) + }) + .unwrap(); + + let empty_schema = + InputSchema::parse("type User @entity { id: String! }", id.clone()).unwrap(); + let data_empty = empty_schema + .make_entity(hashmap! { "id".into() => Value::String("id".into())}) + .unwrap(); + + let data2_schema = InputSchema::parse( + "type User @entity { id: String!, key: String!, null: String }", + id, + ) + .unwrap(); + let data2 = data2_schema + .make_entity(hashmap! { + "id".into() => Value::String("id".to_owned()), + "key".into() => Value::String("s".to_owned()), + "null".into() => Value::Null, + }) + .unwrap(); let mut cases = vec![ // Simple case of basically nothing @@ -155,8 +179,8 @@ mod tests { // Add an event Case { name: "one_event", - legacy: "9241634bfc8a9a12c796a0de6f326326a74967cd477ee7ce78fbac20a9e9c303", - fast: "bb3c37659d4bc799b9dcf3d17b1b1e93847f5fc0b2c50ee6a27f13b5c07f7e97", + legacy: "96640d7a35405524bb21da8d86f7a51140634f44568cf9f7df439d0b2b01a435", + fast: "8bb3373fb55e02bde3202bac0eeecf1bd9a676856a4dd6667bd809aceda41885", data: PoI { subgraph_id: DeploymentHash::new("test").unwrap(), block_hash: H256::repeat_byte(1), @@ -182,8 +206,8 @@ mod tests { // Try adding a couple more blocks, including an empty block on the end Case { name: "multiple_blocks", - legacy: "775fa30bbaef2a8659456a317923a36f46e3715e6c9cf43203dd3486af4e361f", - fast: "3bb882049e8f4a11cd4a7a005c6ce3b3c779a0e90057a9556c595660e626268d", + legacy: "a0346ee0d7e0518f73098b6f9dc020f1cf564fb88e09779abfdf5da736de5e82", + fast: "8b0097ad96b21f7e4bd8dcc41985e6e5506b808f1185016ab1073dd8745238ce", data: PoI { subgraph_id: DeploymentHash::new("b").unwrap(), block_hash: H256::repeat_byte(3), @@ -220,8 +244,8 @@ mod tests { // Try adding another causality region Case { name: "causality_regions", - legacy: "13e6fd2b581911c80d935d4f098b40ef3d87cbc564b5a635c81b06091a381e54", - fast: "b2cb70acd4a1337a67df810fe4c5c2fb3d3a3b2b8eb137dbb592bd6014869362", + legacy: "cc9449860e5b19b76aa39d6e05c5a560d1cb37a93d4bf64669feb47cfeb452fa", + fast: "2041af28678e68406247a5cfb5fe336947da75256c79b35c2f61fc7985091c0e", data: PoI { subgraph_id: DeploymentHash::new("b").unwrap(), block_hash: H256::repeat_byte(3), @@ -282,8 +306,8 @@ mod tests { // Back to the one event case, but try adding some data. Case { name: "data", - legacy: "cd3020511cf4c88dd2be542aca4f95bb2a67b06e29f444bcdf44009933b8ff31", - fast: "a992ba24702615a3f591014f7351acf85a35b75e1f8646fc8d77509c4b5d31ed", + legacy: "d304672a249293ee928d99d9cb0576403bdc4b6dbadeb49b98f527277297cdcc", + fast: "421ef30a03be64014b9eef2b999795dcabfc601368040df855635e7886eb3822", data: PoI { subgraph_id: DeploymentHash::new("test").unwrap(), block_hash: H256::repeat_byte(1), diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 0551d4d6bce..f77d59daed8 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -4,7 +4,7 @@ use crate::{ prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, schema::InputSchema, - util::intern::AtomPool, + util::intern::{self, AtomPool}, util::intern::{Error as InternError, NullValue, Object}, }; use crate::{data::subgraph::DeploymentHash, prelude::EntityChange}; @@ -643,6 +643,16 @@ lazy_static! { #[derive(Clone, PartialEq, Eq, Serialize)] pub struct Entity(Object); +impl<'a> IntoIterator for &'a Entity { + type Item = (Word, Value); + + type IntoIter = intern::ObjectOwningIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.clone().into_iter() + } +} + pub trait IntoEntityIterator: IntoIterator {} impl> IntoEntityIterator for T {} diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 6f88882fd3e..f73658b8de4 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -11,8 +11,8 @@ graphql-tools = "0.2.1" indexmap = "2.0" Inflector = "0.11.3" lazy_static = "1.2.0" -stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } -stable-hash = { version = "0.4.2" } +stable-hash = { git = "https://github.com/graphprotocol/stable-hash", branch = "main"} +stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", branch = "old", package = "stable-hash" } defer = "0.1" parking_lot = "0.12" anyhow = "1.0" diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index d6820298c74..a22d0a1376d 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -162,28 +162,6 @@ impl HostExports { stopwatch: &StopwatchMetrics, gas: &GasCounter, ) -> Result<(), HostExportError> { - let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); - write_poi_event( - proof_of_indexing, - &ProofOfIndexingEvent::SetEntity { - entity_type: &entity_type, - id: &entity_id, - data: &data, - }, - &self.poi_causality_region, - logger, - ); - poi_section.end(); - - let key = EntityKey { - entity_type: EntityType::new(entity_type), - entity_id: entity_id.into(), - causality_region: self.data_source_causality_region, - }; - self.check_entity_type_access(&key.entity_type)?; - - gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; - fn check_id(key: &EntityKey, prev_id: &str) -> Result<(), anyhow::Error> { if prev_id != key.entity_id.as_str() { Err(anyhow!( @@ -198,6 +176,15 @@ impl HostExports { } } + let key = EntityKey { + entity_type: EntityType::new(entity_type), + entity_id: entity_id.into(), + causality_region: self.data_source_causality_region, + }; + self.check_entity_type_access(&key.entity_type)?; + + gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; + // Set the id if there isn't one yet, and make sure that a // previously set id agrees with the one in the `key` match data.get(&store::ID) { @@ -217,6 +204,19 @@ impl HostExports { .make_entity(data.into_iter().map(|(key, value)| (key, value))) .map_err(|e| HostExportError::Deterministic(anyhow!(e)))?; + let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); + write_poi_event( + proof_of_indexing, + &ProofOfIndexingEvent::SetEntity { + entity_type: &key.entity_type.as_str(), + id: &key.entity_id.as_str(), + data: &entity, + }, + &self.poi_causality_region, + logger, + ); + poi_section.end(); + state.entity_cache.set(key, entity)?; Ok(()) diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 2fcf2f4485c..eaf463dd985 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -30,7 +30,7 @@ postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" uuid = { version = "1.4.1", features = ["v4"] } -stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } +stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", branch = "old", package = "stable-hash" } diesel_derives = "1.4.1" anyhow = "1.0.75" git-testament = "0.2.4" diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index d99a2f46fcc..04a73c82eec 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1457,6 +1457,10 @@ impl DeploymentCursorTracker for WritableStore { fn firehose_cursor(&self) -> FirehoseCursor { self.block_cursor.lock().unwrap().clone() } + + fn input_schema(&self) -> Arc { + self.store.input_schema() + } } #[async_trait::async_trait] diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index b03c5d7b6ee..32fcf499fe5 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -86,6 +86,10 @@ impl DeploymentCursorTracker for MockStore { fn firehose_cursor(&self) -> FirehoseCursor { unimplemented!() } + + fn input_schema(&self) -> Arc { + todo!() + } } #[async_trait] diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 28b4fd89984..b6a5fe3bf5a 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -11,7 +11,7 @@ use std::{collections::HashSet, sync::Mutex}; use std::{marker::PhantomData, str::FromStr}; use test_store::*; -use graph::components::store::{DeploymentLocator, EntityKey, WritableStore}; +use graph::components::store::{DeploymentLocator, EntityKey, ReadStore, WritableStore}; use graph::data::subgraph::*; use graph::{ blockchain::DataSource, @@ -330,7 +330,7 @@ fn delete_entity() { #[test] fn get_entity_1() { run_test(|_, writable, _| async move { - let schema = writable.input_schema(); + let schema = ReadStore::input_schema(&writable); let key = EntityKey::data(USER.to_owned(), "1".to_owned()); let result = writable.get(&key).unwrap(); @@ -357,7 +357,7 @@ fn get_entity_1() { #[test] fn get_entity_3() { run_test(|_, writable, _| async move { - let schema = writable.input_schema(); + let schema = ReadStore::input_schema(&writable); let key = EntityKey::data(USER.to_owned(), "3".to_owned()); let result = writable.get(&key).unwrap(); diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 829f0f8ea6f..dae161d6cad 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -33,6 +33,7 @@ use graph::prelude::{ SubgraphName, SubgraphRegistrar, SubgraphStore as _, SubgraphVersionSwitchingMode, TriggerProcessor, }; +use graph::schema::InputSchema; use graph::slog::crit; use graph_core::polling_monitor::{arweave_service, ipfs_service}; use graph_core::{ @@ -580,6 +581,18 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { .await } + async fn build_substreams( + &self, + _chain: &C, + _schema: Arc, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _subgraph_current_block: Option, + _filter: Arc, + ) -> anyhow::Result>> { + unimplemented!(); + } + async fn build_polling( &self, _chain: &C, @@ -607,6 +620,18 @@ impl BlockStreamBuilder for StaticStreamBuilder where C::TriggerData: Clone, { + async fn build_substreams( + &self, + _chain: &C, + _schema: Arc, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _subgraph_current_block: Option, + _filter: Arc, + ) -> anyhow::Result>> { + unimplemented!() + } + async fn build_firehose( &self, _chain: &C, @@ -647,7 +672,11 @@ struct StaticStream { stream: Pin, Error>> + Send>>, } -impl BlockStream for StaticStream {} +impl BlockStream for StaticStream { + fn buffer_size_hint(&self) -> usize { + 1 + } +} impl Stream for StaticStream { type Item = Result, Error>; From e253ee14cda2d8456a86ae8f4e3f74a1a7979953 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Mon, 18 Sep 2023 23:15:28 +0800 Subject: [PATCH 0421/2104] chain: pass polling triggers only after start_block --- chain/ethereum/src/ethereum_adapter.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 4f075b53219..e583c9fee1a 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -760,8 +760,9 @@ impl EthereumAdapter { .iter() .find_map(|(start_block, interval)| { let has_once_trigger = (*interval == 0) && (block_number == *start_block); - let has_polling_trigger = - *interval > 0 && ((block_number - start_block) % *interval) == 0; + let has_polling_trigger = block_number >= *start_block + && *interval > 0 + && ((block_number - start_block) % *interval) == 0; if has_once_trigger || has_polling_trigger { let mut triggers = Vec::new(); @@ -1712,7 +1713,10 @@ pub(crate) fn parse_block_triggers( .iter() .any(|(start_block, interval)| match interval { 0 => false, - _ => (block_number - *start_block) % *interval == 0, + _ => { + block_number >= *start_block + && (block_number - *start_block) % *interval == 0 + } }); let has_once_trigger = From 02c4328d2d7b538ef31d1e0ea57163a535d6f058 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 15:12:10 +0100 Subject: [PATCH 0422/2104] build(deps): bump chrono from 0.4.26 to 0.4.31 (#4876) Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.26 to 0.4.31. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.26...v0.4.31) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 20 ++++---------------- graph/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 057c40dd1bb..046e0a00f3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -494,18 +494,17 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", - "time 0.1.44", "wasm-bindgen", - "winapi", + "windows-targets 0.48.0", ] [[package]] @@ -1527,7 +1526,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.107", - "time 0.3.17", + "time", ] [[package]] @@ -4470,17 +4469,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - [[package]] name = "time" version = "0.3.17" diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 4c535afb43c..60d7a31f251 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -13,7 +13,7 @@ bytes = "1.0.1" cid = "0.10.1" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2", "chrono"] } diesel_derives = "1.4" -chrono = "0.4.25" +chrono = "0.4.31" envconfig = "0.10.0" Inflector = "0.11.3" isatty = "0.1.9" From ce27a4d88658dd5b4d18d493c4362f837038fbe3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 15:27:57 +0100 Subject: [PATCH 0423/2104] build(deps): bump webpki from 0.22.0 to 0.22.1 (#4857) Bumps [webpki](https://github.com/briansmith/webpki) from 0.22.0 to 0.22.1. - [Commits](https://github.com/briansmith/webpki/commits) --- updated-dependencies: - dependency-name: webpki dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 046e0a00f3d..b6d0e0acc43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5518,9 +5518,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", From 2f17c1770844bbd0eab218d767b8e93cdd2e85cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:30:20 +0100 Subject: [PATCH 0424/2104] build(deps): bump bollard from 0.10.1 to 0.15.0 (#4875) * build(deps): bump bollard from 0.10.1 to 0.15.0 Bumps [bollard](https://github.com/fussybeaver/bollard) from 0.10.1 to 0.15.0. - [Release notes](https://github.com/fussybeaver/bollard/releases) - [Commits](https://github.com/fussybeaver/bollard/compare/v0.10.1...v0.15.0) --- updated-dependencies: - dependency-name: bollard dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix api changes --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Filipe Azevedo --- Cargo.lock | 151 ++++++++++++-------------------------- tests/Cargo.toml | 4 +- tests/src/docker_utils.rs | 14 ++-- 3 files changed, 58 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6d0e0acc43..7efddd1239b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -114,7 +114,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.12", + "syn 2.0.32", ] [[package]] @@ -136,7 +136,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.12", + "syn 2.0.32", ] [[package]] @@ -392,43 +392,41 @@ dependencies = [ [[package]] name = "bollard" -version = "0.10.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "699194c00f3a2effd3358d47f880646818e3d483190b17ebcdf598c654fb77e9" +checksum = "f03db470b3c0213c47e978da93200259a1eb4dae2e5512cba9955e2b540a6fc6" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "bollard-stubs", "bytes", - "chrono", - "ct-logs", - "dirs-next", "futures-core", "futures-util", "hex", "http", "hyper", - "hyper-unix-connector", + "hyperlocal", "log", - "pin-project", + "pin-project-lite", "serde", "serde_derive", "serde_json", + "serde_repr", "serde_urlencoded", "thiserror", "tokio", - "tokio-util 0.6.7", + "tokio-util 0.7.1", "url", "winapi", ] [[package]] name = "bollard-stubs" -version = "1.41.0" +version = "1.43.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +checksum = "b58071e8fd9ec1e930efd28e3a90c1251015872a2ce49f81f36421b86466932e" dependencies = [ - "chrono", "serde", + "serde_repr", "serde_with", ] @@ -881,50 +879,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ct-logs" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" -dependencies = [ - "sct 0.6.1", -] - -[[package]] -name = "darling" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 1.0.107", -] - -[[package]] -name = "darling_macro" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.107", -] - [[package]] name = "data-encoding" version = "2.3.2" @@ -2253,12 +2207,12 @@ dependencies = [ ] [[package]] -name = "hyper-unix-connector" -version = "0.2.2" +name = "hyperlocal" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ef1fd95d34b4ff007d3f0590727b5cf33572cace09b42032fc817dc8b16557" +checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c" dependencies = [ - "anyhow", + "futures-util", "hex", "hyper", "pin-project", @@ -2291,12 +2245,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -2375,6 +2323,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -3756,7 +3705,7 @@ checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ "log", "ring", - "sct 0.7.0", + "sct", "webpki", ] @@ -3847,16 +3796,6 @@ dependencies = [ "syn 1.0.107", ] -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -3919,22 +3858,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.32", ] [[package]] @@ -3967,6 +3906,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "serde_spanned" version = "0.6.3" @@ -3990,25 +3940,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "1.9.4" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad9fdbb69badc8916db738c25efd04f0a65297d26c2f8de4b62e57b8c12bc72" +checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "rustversion", + "base64 0.21.0", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.0.0", "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1569374bd54623ec8bd592cf22ba6e03c0f177ff55fbc8c29a49e296e7adecf" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.107", + "serde_json", + "time 0.3.17", ] [[package]] @@ -4299,7 +4242,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.12", + "syn 2.0.32", ] [[package]] @@ -4325,9 +4268,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.12" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -4566,7 +4509,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.12", + "syn 2.0.32", ] [[package]] diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 8a60a0e1c34..3eab613b448 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true anyhow = "1.0" assert-json-diff = "2.0.2" async-stream = "0.3.5" -bollard = "0.10" +bollard = "0.15" futures = { version = "0.3", features = ["compat"] } graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } @@ -27,7 +27,7 @@ uuid = { version = "1.4.1", features = ["v4"] } [dev-dependencies] anyhow = "1.0.75" -bollard = "0.10" +bollard = "0.15" lazy_static = "1.4.0" tokio-stream = "0.1" cid = "0.10.1" diff --git a/tests/src/docker_utils.rs b/tests/src/docker_utils.rs index f48d94795c8..b88a5cda662 100644 --- a/tests/src/docker_utils.rs +++ b/tests/src/docker_utils.rs @@ -131,6 +131,7 @@ impl ServiceContainer { .create_container( Some(container::CreateContainerOptions { name: container_name.clone(), + platform: None, }), service.config(), ) @@ -153,7 +154,7 @@ impl ServiceContainer { } pub async fn exposed_ports(&self) -> Result { - use bollard::models::ContainerSummaryInner; + use bollard::models::ContainerSummary; let results = { let mut filters = HashMap::new(); @@ -167,7 +168,7 @@ impl ServiceContainer { }; let ports = match &results.as_slice() { - &[ContainerSummaryInner { + &[ContainerSummary { ports: Some(ports), .. }] => ports, unexpected_response => panic!( @@ -239,18 +240,21 @@ impl ServiceContainer { .await?; // 2. Start Exec - let mut stream = docker.client.start_exec(&message.id, None); + let mut stream = match docker.client.start_exec(&message.id, None).await.unwrap() { + StartExecResults::Attached { output, input: _ } => output, + StartExecResults::Detached => unreachable!(), + }; let mut std_err = vec![]; loop { match stream.next().await { - Some(Ok(StartExecResults::Attached { log })) => match log { + Some(Ok(log)) => match log { container::LogOutput::StdErr { message } => { std_err.push(String::from_utf8(message.to_vec()).unwrap()); } _ => {} }, None => break, - _ => { /* consume stream */ } + _ => {} } } From fd21d936ebeb977bd58fb04a82f026747437345b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:48:32 +0100 Subject: [PATCH 0425/2104] build(deps): bump blake3 from 1.4.1 to 1.5.0 (#4889) Bumps [blake3](https://github.com/BLAKE3-team/BLAKE3) from 1.4.1 to 1.5.0. - [Release notes](https://github.com/BLAKE3-team/BLAKE3/releases) - [Commits](https://github.com/BLAKE3-team/BLAKE3/compare/1.4.1...1.5.0) --- updated-dependencies: - dependency-name: blake3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 28 +++++++++++++--------------- server/index-node/Cargo.toml | 2 +- store/postgres/Cargo.toml | 2 +- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7efddd1239b..671db9a331c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -86,9 +86,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "ascii" @@ -328,7 +328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.4", "constant_time_eq 0.1.5", ] @@ -339,7 +339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.4", "constant_time_eq 0.1.5", ] @@ -360,16 +360,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec 0.7.4", "cc", "cfg-if 1.0.0", "constant_time_eq 0.3.0", - "digest 0.10.7", ] [[package]] @@ -1013,7 +1012,6 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.2", "crypto-common", - "subtle", ] [[package]] @@ -1828,7 +1826,7 @@ dependencies = [ name = "graph-server-index-node" version = "0.32.0" dependencies = [ - "blake3 1.4.1", + "blake3 1.5.0", "either", "futures 0.3.16", "git-testament", @@ -1885,7 +1883,7 @@ dependencies = [ "Inflector", "anyhow", "async-trait", - "blake3 1.4.1", + "blake3 1.5.0", "clap", "derive_more", "diesel", @@ -2522,7 +2520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" dependencies = [ "anyhow", - "arrayvec 0.7.2", + "arrayvec 0.7.4", "async-trait", "beef", "futures-channel", @@ -2811,7 +2809,7 @@ checksum = "15e5d911412e631e1de11eb313e4dd71f73fd964401102aab23d6c8327c431ba" dependencies = [ "blake2b_simd", "blake2s_simd", - "blake3 1.4.1", + "blake3 1.5.0", "core2", "digest 0.10.7", "multihash-derive", @@ -2998,7 +2996,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a7f3fcf5e45fc28b84dcdab6b983e77f197ec01f325a33f404ba6855afd1070" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.4", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -5417,7 +5415,7 @@ name = "web3" version = "0.19.0-graph" source = "git+https://github.com/graphprotocol/rust-web3?branch=graph-patches-onto-0.18#7f8eb6dfcc13a4186f9b42f91de950646bc4a833" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.4", "base64 0.13.1", "bytes", "derive_more", diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index 38d290c5fb8..eb6ae794852 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -4,7 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] -blake3 = "1.4" +blake3 = "1.5" either = "1.9.0" futures = "0.3.4" graph = { path = "../../graph" } diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index eaf463dd985..48084a53d46 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true [dependencies] async-trait = "0.1.50" -blake3 = "1.4" +blake3 = "1.5" derive_more = { version = "0.99.17" } diesel = { version = "1.4.8", features = [ "postgres", From 13284649d80b31efa6c9986180b26c8119b0ca36 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 13 Sep 2023 13:28:15 -0700 Subject: [PATCH 0426/2104] all: Turn POI_OBJECT and POI_DIGEST into fns on InputSchema --- core/src/subgraph/runner.rs | 11 ++++++----- graph/src/data/subgraph/schema.rs | 7 ------- graph/src/schema/input_schema.rs | 25 +++++++++++++++++++++++-- store/postgres/src/deployment_store.rs | 10 +++++----- store/postgres/src/relational.rs | 17 ++++++++++------- 5 files changed, 44 insertions(+), 26 deletions(-) diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 00baf22ecd4..7a5146934e8 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -14,9 +14,8 @@ use graph::components::{ subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, }; use graph::data::store::scalar::Bytes; -use graph::data::subgraph::schema::POI_DIGEST; use graph::data::subgraph::{ - schema::{SubgraphError, SubgraphHealth, POI_OBJECT}, + schema::{SubgraphError, SubgraphHealth}, SubgraphFeature, }; use graph::data_source::{ @@ -1079,12 +1078,13 @@ async fn update_proof_of_indexing( key: EntityKey, digest: Bytes, ) -> Result<(), Error> { + let digest_name = entity_cache.schema.poi_digest(); let data = vec![ ( graph::data::store::ID.clone(), Value::from(key.entity_id.to_string()), ), - (POI_DIGEST.clone(), Value::from(digest)), + (digest_name, Value::from(digest)), ]; let poi = entity_cache.make_entity(data)?; entity_cache.set(key, poi) @@ -1097,7 +1097,7 @@ async fn update_proof_of_indexing( for (causality_region, stream) in proof_of_indexing.drain() { // Create the special POI entity key specific to this causality_region let entity_key = EntityKey { - entity_type: POI_OBJECT.to_owned(), + entity_type: entity_cache.schema.poi_type().clone(), // There are two things called causality regions here, one is the causality region for // the poi which is a string and the PoI entity id. The other is the data source @@ -1109,10 +1109,11 @@ async fn update_proof_of_indexing( }; // Grab the current digest attribute on this entity + let poi_digest = entity_cache.schema.poi_digest().clone(); let prev_poi = entity_cache .get(&entity_key, GetScope::Store) .map_err(Error::from)? - .map(|entity| match entity.get(POI_DIGEST.as_str()) { + .map(|entity| match entity.get(poi_digest.as_str()) { Some(Value::Bytes(b)) => b.clone(), _ => panic!("Expected POI entity to have a digest and for it to be bytes"), }); diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index 6baae86ec31..ba7c049916d 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -2,7 +2,6 @@ use anyhow::{anyhow, bail, Error}; use hex; -use lazy_static::lazy_static; use rand::rngs::OsRng; use rand::Rng; use std::collections::BTreeSet; @@ -13,17 +12,11 @@ use super::DeploymentHash; use crate::data::graphql::TryFromValue; use crate::data::store::Value; use crate::data::subgraph::SubgraphManifest; -use crate::data::value::Word; use crate::prelude::*; use crate::util::stable_hash_glue::impl_stable_hash; use crate::{blockchain::Blockchain, components::store::EntityType}; pub const POI_TABLE: &str = "poi2$"; -lazy_static! { - pub static ref POI_OBJECT: EntityType = EntityType::new("Poi$".to_string()); - /// The name of the digest attribute of POI entities - pub static ref POI_DIGEST: Word = Word::from("digest"); -} #[derive(Copy, Clone, PartialEq, Eq, Debug, Deserialize)] #[serde(rename_all = "lowercase")] diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index de26dd30149..db53b0079b4 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -12,7 +12,7 @@ use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, Va use crate::data::store::{ self, scalar, EntityValidationError, IntoEntityIterator, TryIntoEntityIterator, }; -use crate::data::subgraph::schema::POI_DIGEST; +use crate::data::value::Word; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; use crate::schema::api_schema; @@ -21,6 +21,11 @@ use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; use super::{ApiSchema, Schema, SchemaValidationError}; +/// The name of the PoI entity type +const POI_OBJECT: &str = "Poi$"; +/// The name of the digest attribute of POI entities +const POI_DIGEST: &str = "digest"; + /// The internal representation of a subgraph schema, i.e., the /// `schema.graphql` file that is part of a subgraph. Any code that deals /// with writing a subgraph should use this struct. Code that deals with @@ -38,6 +43,8 @@ pub struct Inner { // Maps each entity type to its field names field_names: HashMap>, pool: Arc, + + poi_type: EntityType, } impl CheapClone for InputSchema { @@ -76,12 +83,15 @@ impl InputSchema { }), ); + let poi_type = EntityType::new(POI_OBJECT.to_string()); + Self { inner: Arc::new(Inner { schema, immutable_types, field_names, pool, + poi_type, }), } } @@ -379,13 +389,24 @@ impl InputSchema { .map(|fields| fields.contains(&field)) .unwrap_or(false) } + + pub fn poi_type(&self) -> &EntityType { + &self.inner.poi_type + } + + pub fn poi_digest(&self) -> Word { + Word::from(POI_DIGEST) + } } /// Create a new pool that contains the names of all the types defined /// in the document and the names of all their fields fn atom_pool(document: &s::Document) -> AtomPool { let mut pool = AtomPool::new(); - pool.intern(POI_DIGEST.as_str()); // Attribute of PoI object + + pool.intern(POI_OBJECT); // Name of PoI entity type + pool.intern(POI_DIGEST); // Attribute of PoI object + for definition in &document.definitions { match definition { s::Definition::TypeDefinition(typedef) => match typedef { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index a09efc03e8a..648b208fbf5 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -37,7 +37,7 @@ use std::time::{Duration, Instant}; use graph::components::store::EntityCollection; use graph::components::subgraph::{ProofOfIndexingFinisher, ProofOfIndexingVersion}; use graph::constraint_violation; -use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, POI_DIGEST, POI_OBJECT}; +use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError}; use graph::prelude::{ anyhow, debug, info, o, warn, web3, AttributeNames, BlockNumber, BlockPtr, CheapClone, DeploymentHash, DeploymentState, Entity, EntityQuery, Error, Logger, QueryExecutionError, @@ -955,6 +955,8 @@ impl DeploymentStore { let indexer = *indexer; let site2 = site.cheap_clone(); let store = self.cheap_clone(); + let info = self.subgraph_info(&site)?; + let poi_digest = info.input.poi_digest(); let entities: Option<(Vec, BlockPtr)> = self .with_conn(move |conn, cancel| { @@ -1000,7 +1002,7 @@ impl DeploymentStore { site.deployment.cheap_clone(), block_ptr.number, EntityCollection::All(vec![( - POI_OBJECT.cheap_clone(), + info.input.poi_type().clone(), AttributeNames::All, )]), ); @@ -1025,7 +1027,7 @@ impl DeploymentStore { .into_iter() .map(|e| { let causality_region = e.id(); - let digest = match e.get(POI_DIGEST.as_str()) { + let digest = match e.get(poi_digest.as_str()) { Some(Value::Bytes(b)) => Ok(b.clone()), other => Err(anyhow::anyhow!( "Entity has non-bytes digest attribute: {:?}", @@ -1037,8 +1039,6 @@ impl DeploymentStore { }) .collect::, anyhow::Error>>()?; - let info = self.subgraph_info(&site2).map_err(anyhow::Error::from)?; - let mut finisher = ProofOfIndexingFinisher::new(&block_ptr, &site2.deployment, &indexer, info.poi_version); for (name, region) in by_causality_region.drain() { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 8a01080c17e..01e9bf81c13 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -55,7 +55,7 @@ use crate::{ use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; use graph::data::store::BYTES_SCALAR; -use graph::data::subgraph::schema::{POI_DIGEST, POI_OBJECT, POI_TABLE}; +use graph::data::subgraph::schema::POI_TABLE; use graph::prelude::{ anyhow, info, BlockNumber, DeploymentHash, Entity, EntityChange, EntityOperation, Logger, QueryExecutionError, StoreError, StoreEvent, ValueType, BLOCK_NUMBER_MAX, @@ -346,7 +346,7 @@ impl Layout { }) .collect::, _>>()?; if catalog.use_poi { - tables.push(Self::make_poi_table(&catalog, tables.len())) + tables.push(Self::make_poi_table(&schema, &catalog, tables.len())) } let tables: Vec<_> = tables.into_iter().map(Arc::new).collect(); @@ -388,16 +388,19 @@ impl Layout { }) } - fn make_poi_table(catalog: &Catalog, position: usize) -> Table { + fn make_poi_table(schema: &InputSchema, catalog: &Catalog, position: usize) -> Table { + let poi_type = schema.poi_type(); + let poi_digest = schema.poi_digest(); + let table_name = SqlName::verbatim(POI_TABLE.to_owned()); Table { - object: POI_OBJECT.to_owned(), + object: poi_type.to_owned(), qualified_name: SqlName::qualified_name(&catalog.site.namespace, &table_name), name: table_name, columns: vec![ Column { - name: SqlName::from(POI_DIGEST.as_str()), - field: POI_DIGEST.to_string(), + name: SqlName::from(poi_digest.as_str()), + field: poi_digest.to_string(), field_type: q::Type::NonNullType(Box::new(q::Type::NamedType( BYTES_SCALAR.to_owned(), ))), @@ -429,7 +432,7 @@ impl Layout { } pub fn supports_proof_of_indexing(&self) -> bool { - self.tables.contains_key(&*POI_OBJECT) + self.tables.contains_key(self.input_schema.poi_type()) } pub fn create_relational_schema( From 25a7a8ce14641920b696a6d9d9423a0af5223dbe Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 13 Sep 2023 16:44:20 -0700 Subject: [PATCH 0427/2104] graph: Remove some * imports in components::store --- graph/src/components/store/err.rs | 4 ++-- graph/src/components/store/mod.rs | 13 ++++++++++--- graph/src/components/store/traits.rs | 5 +++++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index f5052a3a179..b9ef0a59f98 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -1,7 +1,7 @@ -use super::{BlockNumber, DeploymentHash, DeploymentSchemaVersion}; -use crate::data::store::EntityValidationError; +use super::{BlockNumber, DeploymentSchemaVersion}; use crate::prelude::QueryExecutionError; use crate::util::intern::Error as InternError; +use crate::{data::store::EntityValidationError, prelude::DeploymentHash}; use anyhow::{anyhow, Error}; use diesel::result::Error as DieselError; diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 1afed696c23..40626b4f850 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -4,7 +4,10 @@ mod traits; pub mod write; pub use entity_cache::{EntityCache, GetScope, ModificationsAndCache}; +use futures03::future::{FutureExt, TryFutureExt}; +use slog::{trace, Logger}; +pub use super::subgraph::Entity; use diesel::types::{FromSql, ToSql}; pub use err::StoreError; use itertools::Itertools; @@ -26,15 +29,19 @@ use std::sync::{Arc, RwLock}; use std::time::Duration; use std::{fmt, io}; -use crate::blockchain::Block; +use crate::blockchain::{Block, BlockHash, BlockPtr}; +use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; +use crate::constraint_violation; use crate::data::store::scalar::Bytes; -use crate::data::store::*; +use crate::data::store::Value; use crate::data::value::Word; use crate::data_source::CausalityRegion; +use crate::env::ENV_VARS; +use crate::prelude::{Attribute, DeploymentHash, SubscriptionFilter, ValueType}; use crate::schema::InputSchema; use crate::util::intern; -use crate::{constraint_violation, prelude::*}; +use crate::util::stats::MovingStats; /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index eed14e4266d..077c29008ec 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -1,14 +1,19 @@ +use anyhow::Error; +use async_trait::async_trait; use web3::types::{Address, H256}; use super::*; use crate::blockchain::block_stream::FirehoseCursor; +use crate::components::metrics::stopwatch::StopwatchMetrics; use crate::components::server::index_node::VersionInfo; +use crate::components::subgraph::SubgraphVersionSwitchingMode; use crate::components::transaction_receipt; use crate::components::versions::ApiVersion; use crate::data::query::Trace; use crate::data::subgraph::{status, DeploymentFeatures}; use crate::data::value::Object; use crate::data::{query::QueryTarget, subgraph::schema::*}; +use crate::prelude::{DeploymentState, NodeId, QueryExecutionError, SubgraphName}; use crate::schema::{ApiSchema, InputSchema}; pub trait SubscriptionManager: Send + Sync + 'static { From 238f3d8b62fda64030adeb687b7a17d73861ded2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 13 Sep 2023 16:45:31 -0700 Subject: [PATCH 0428/2104] graph: Use the GraphQL types from graph::prelude --- graph/src/components/store/mod.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 40626b4f850..fe608c61671 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -17,7 +17,6 @@ pub use write::Batch; use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; -use graphql_parser::schema as s; use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::collections::btree_map::Entry; @@ -38,7 +37,7 @@ use crate::data::store::Value; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::env::ENV_VARS; -use crate::prelude::{Attribute, DeploymentHash, SubscriptionFilter, ValueType}; +use crate::prelude::{s, Attribute, DeploymentHash, SubscriptionFilter, ValueType}; use crate::schema::InputSchema; use crate::util::intern; use crate::util::stats::MovingStats; @@ -75,14 +74,14 @@ impl fmt::Display for EntityType { } } -impl<'a> From<&s::ObjectType<'a, String>> for EntityType { - fn from(object_type: &s::ObjectType<'a, String>) -> Self { +impl<'a> From<&s::ObjectType> for EntityType { + fn from(object_type: &s::ObjectType) -> Self { EntityType::new(object_type.name.clone()) } } -impl<'a> From<&s::InterfaceType<'a, String>> for EntityType { - fn from(interface_type: &s::InterfaceType<'a, String>) -> Self { +impl<'a> From<&s::InterfaceType> for EntityType { + fn from(interface_type: &s::InterfaceType) -> Self { EntityType::new(interface_type.name.clone()) } } From e31d1641f3d5c5b5e7aa01a3bf3ba075344f572f Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 14 Sep 2023 15:10:55 -0700 Subject: [PATCH 0429/2104] graph, graphql, store: Remove EntityType from base GraphQL schema EntityType is a concept that only makes sense for subgraph input and API schemas but not in the generic GraphQL machinery. There, we identify object and interface types by their string name --- graph/src/schema/api.rs | 5 ++--- graph/src/schema/input_schema.rs | 7 ++----- graph/src/schema/mod.rs | 23 +++++++++++------------ graphql/src/execution/ast.rs | 3 +-- graphql/src/introspection/resolver.rs | 2 +- graphql/src/store/query.rs | 16 +++++++--------- store/postgres/src/deployment_store.rs | 4 ++-- store/postgres/src/relational.rs | 2 +- 8 files changed, 27 insertions(+), 35 deletions(-) diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 6d120ade266..7bfec8a455a 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -5,7 +5,6 @@ use graphql_parser::{schema::TypeDefinition, Pos}; use inflector::Inflector; use lazy_static::lazy_static; -use crate::components::store::EntityType; use crate::data::graphql::{ObjectOrInterface, ObjectTypeExt}; use crate::schema::{ast, META_FIELD_NAME, META_FIELD_TYPE}; @@ -136,12 +135,12 @@ impl ApiSchema { &self.schema } - pub fn types_for_interface(&self) -> &BTreeMap> { + pub fn types_for_interface(&self) -> &BTreeMap> { &self.schema.types_for_interface } /// Returns `None` if the type implements no interfaces. - pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { + pub fn interfaces_for_type(&self, type_name: &str) -> Option<&Vec> { self.schema.interfaces_for_type(type_name) } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index db53b0079b4..6a13c6d2866 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -286,10 +286,7 @@ impl InputSchema { } pub fn types_for_interface(&self, intf: &s::InterfaceType) -> Option<&Vec> { - self.inner - .schema - .types_for_interface - .get(&EntityType::new(intf.name.clone())) + self.inner.schema.types_for_interface.get(&intf.name) } pub fn find_object_type(&self, entity_type: &EntityType) -> Option<&s::ObjectType> { @@ -313,7 +310,7 @@ impl InputSchema { self.inner.schema.document.get_object_type_definitions() } - pub fn interface_types(&self) -> &BTreeMap> { + pub fn interface_types(&self) -> &BTreeMap> { &self.inner.schema.types_for_interface } diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 4361255debf..3c4247308a2 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -1,4 +1,3 @@ -use crate::components::store::EntityType; use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; use crate::data::store::ValueType; @@ -120,10 +119,10 @@ pub struct Schema { pub document: s::Document, // Maps type name to implemented interfaces. - pub interfaces_for_type: BTreeMap>, + pub interfaces_for_type: BTreeMap>, // Maps an interface name to the list of entities that implement it. - pub types_for_interface: BTreeMap>, + pub types_for_interface: BTreeMap>, } impl Schema { @@ -152,8 +151,8 @@ impl Schema { document: &s::Document, ) -> Result< ( - BTreeMap>, - BTreeMap>, + BTreeMap>, + BTreeMap>, ), SchemaValidationError, > { @@ -162,20 +161,20 @@ impl Schema { let mut types_for_interface = BTreeMap::from_iter(document.definitions.iter().filter_map(|d| match d { Definition::TypeDefinition(TypeDefinition::Interface(t)) => { - Some((EntityType::from(t), vec![])) + Some((t.name.to_string(), vec![])) } _ => None, })); let mut interfaces_for_type = BTreeMap::<_, Vec<_>>::new(); for object_type in document.get_object_type_definitions() { - for implemented_interface in object_type.implements_interfaces.clone() { + for implemented_interface in &object_type.implements_interfaces { let interface_type = document .definitions .iter() .find_map(|def| match def { Definition::TypeDefinition(TypeDefinition::Interface(i)) - if i.name.eq(&implemented_interface) => + if i.name.eq(implemented_interface) => { Some(i.clone()) } @@ -188,11 +187,11 @@ impl Schema { Self::validate_interface_implementation(object_type, &interface_type)?; interfaces_for_type - .entry(EntityType::from(object_type)) + .entry(object_type.name.to_owned()) .or_default() .push(interface_type); types_for_interface - .get_mut(&EntityType::new(implemented_interface)) + .get_mut(implemented_interface) .unwrap() .push(object_type.clone()); } @@ -208,12 +207,12 @@ impl Schema { } /// Returned map has one an entry for each interface in the schema. - pub fn types_for_interface(&self) -> &BTreeMap> { + pub fn types_for_interface(&self) -> &BTreeMap> { &self.types_for_interface } /// Returns `None` if the type implements no interfaces. - pub fn interfaces_for_type(&self, type_name: &EntityType) -> Option<&Vec> { + pub fn interfaces_for_type(&self, type_name: &str) -> Option<&Vec> { self.interfaces_for_type.get(type_name) } diff --git a/graphql/src/execution/ast.rs b/graphql/src/execution/ast.rs index 20b08ec0e24..b4d318acb02 100644 --- a/graphql/src/execution/ast.rs +++ b/graphql/src/execution/ast.rs @@ -1,7 +1,6 @@ use std::collections::HashSet; use graph::{ - components::store::EntityType, data::graphql::ObjectOrInterface, prelude::{anyhow, q, r, s, QueryExecutionError, ValueMap}, schema::{ast::ObjectType, ApiSchema}, @@ -351,7 +350,7 @@ pub(crate) fn resolve_object_types( .ok_or_else(|| QueryExecutionError::AbstractTypeError(name.to_string()))? { s::TypeDefinition::Interface(intf) => { - for obj_ty in &schema.types_for_interface()[&EntityType::new(intf.name.to_string())] { + for obj_ty in &schema.types_for_interface()[&intf.name] { let obj_ty = schema.object_type(obj_ty); set.insert(obj_ty.into()); } diff --git a/graphql/src/introspection/resolver.rs b/graphql/src/introspection/resolver.rs index c1a0e0560dd..ef775c93afe 100644 --- a/graphql/src/introspection/resolver.rs +++ b/graphql/src/introspection/resolver.rs @@ -196,7 +196,7 @@ fn object_interfaces( ) -> r::Value { r::Value::List( schema - .interfaces_for_type(&object_type.into()) + .interfaces_for_type(&object_type.name) .unwrap_or(&vec![]) .iter() .map(|typedef| interface_type_object(schema, type_objects, typedef)) diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 18faeda9b53..ca1fa113044 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -27,7 +27,7 @@ pub(crate) fn build_query<'a>( entity: impl Into>, block: BlockNumber, field: &a::Field, - types_for_interface: &'a BTreeMap>, + types_for_interface: &'a BTreeMap>, max_first: u32, max_skip: u32, mut column_names: SelectedAttributes, @@ -39,8 +39,7 @@ pub(crate) fn build_query<'a>( let selected_columns = column_names.get(object); vec![((*object).into(), selected_columns)] } - ObjectOrInterface::Interface(interface) => types_for_interface - [&EntityType::from(*interface)] + ObjectOrInterface::Interface(interface) => types_for_interface[&interface.name] .iter() .map(|o| { let selected_columns = column_names.get(o); @@ -575,13 +574,12 @@ fn build_order_by( })? } ObjectOrInterface::Interface(_) => { - let object_types = schema - .types_for_interface() - .get(&EntityType::new(entity.name().to_string())) - .ok_or(QueryExecutionError::EntityFieldError( + let object_types = schema.types_for_interface().get(entity.name()).ok_or( + QueryExecutionError::EntityFieldError( entity.name().to_owned(), parent_field_name.clone(), - ))?; + ), + )?; if let Some(first_entity) = object_types.first() { sast::get_field(first_entity, parent_field_name.as_str()).ok_or_else( @@ -636,7 +634,7 @@ fn build_order_by( ObjectOrInterface::Interface(interface) => { let entity_types = schema .types_for_interface() - .get(&EntityType::new(interface.name.to_string())) + .get(&interface.name) .map(|object_types| { object_types .iter() diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 648b208fbf5..4d68987d4de 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -298,10 +298,10 @@ impl DeploymentStore { let entity_type_str = entity_type.to_string(); let types_with_shared_interface = Vec::from_iter( schema - .interfaces_for_type(entity_type) + .interfaces_for_type(entity_type.as_str()) .into_iter() .flatten() - .flat_map(|interface| &types_for_interface[&EntityType::from(interface)]) + .flat_map(|interface| &types_for_interface[&interface.name]) .map(EntityType::from) .filter(|type_name| type_name != entity_type), ); diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 01e9bf81c13..0e6ad2fc2da 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -312,7 +312,7 @@ impl Layout { // they have a String `id` field // see also: id-type-for-unimplemented-interfaces let id_type = types.iter().next().cloned().unwrap_or(IdType::String); - Ok((interface.clone(), id_type)) + Ok((EntityType::from(interface.as_str()), id_type)) } }) }); From 392cea8bf8e23ebb7aaeb5c660f104741562613e Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 09:57:16 -0700 Subject: [PATCH 0430/2104] graphql: Rename Join::perform to add_children The `perform` function didn't really have anything to do with the `Join` struct --- graphql/src/store/prefetch.rs | 97 ++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 47 deletions(-) diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 4aafec966a5..d734207bcbf 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -373,52 +373,6 @@ impl<'a> Join<'a> { Join { child_type, conds } } - /// Perform the join. The child nodes are distributed into the parent nodes - /// according to the `parent_id` returned by the database in each child as - /// attribute `g$parent_id`, and are stored in the `response_key` entry - /// in each parent's `children` map. - /// - /// The `children` must contain the nodes in the correct order for each - /// parent; we simply pick out matching children for each parent but - /// otherwise maintain the order in `children` - fn perform(parents: &mut [&mut Node], children: Vec, response_key: &str) { - let children: Vec<_> = children.into_iter().map(Rc::new).collect(); - - if parents.len() == 1 { - let parent = parents.first_mut().expect("we just checked"); - parent.set_children(response_key.to_owned(), children); - return; - } - - // Build a map parent_id -> Vec that we will use to add - // children to their parent. This relies on the fact that interfaces - // make sure that id's are distinct across all implementations of the - // interface. - let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); - for child in children.iter() { - match child - .get(&*PARENT_ID) - .expect("the query that produces 'child' ensures there is always a g$parent_id") - { - r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), - _ => unreachable!("the parent_id returned by the query is always a string"), - } - } - - // Add appropriate children using grouped map - for parent in parents { - // Set the `response_key` field in `parent`. Make sure that even if `parent` has no - // matching `children`, the field gets set (to an empty `Vec`). - // - // This `insert` will overwrite in the case where the response key occurs both at the - // interface level and in nested object type conditions. The values for the interface - // query are always joined first, and may then be overwritten by the merged selection - // set under the object type condition. See also: e0d6da3e-60cf-41a5-b83c-b60a7a766d4a - let values = parent.id().ok().and_then(|id| grouped.get(&*id).cloned()); - parent.set_children(response_key.to_owned(), values.unwrap_or_default()); - } - } - fn windows( &self, parents: &[&mut Node], @@ -456,6 +410,55 @@ impl<'a> Join<'a> { } } +/// Link children to their parents. The child nodes are distributed into the +/// parent nodes according to the `parent_id` returned by the database in +/// each child as attribute `g$parent_id`, and are stored in the +/// `response_key` entry in each parent's `children` map. +/// +/// The `children` must contain the nodes in the correct order for each +/// parent; we simply pick out matching children for each parent but +/// otherwise maintain the order in `children` +/// +/// If `parents` only has one entry, add all children to that one parent. In +/// particular, this is what happens for toplevel queries. +fn add_children(parents: &mut [&mut Node], children: Vec, response_key: &str) { + let children: Vec<_> = children.into_iter().map(Rc::new).collect(); + + if parents.len() == 1 { + let parent = parents.first_mut().expect("we just checked"); + parent.set_children(response_key.to_owned(), children); + return; + } + + // Build a map parent_id -> Vec that we will use to add + // children to their parent. This relies on the fact that interfaces + // make sure that id's are distinct across all implementations of the + // interface. + let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); + for child in children.iter() { + match child + .get(&*PARENT_ID) + .expect("the query that produces 'child' ensures there is always a g$parent_id") + { + r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), + _ => unreachable!("the parent_id returned by the query is always a string"), + } + } + + // Add appropriate children using grouped map + for parent in parents { + // Set the `response_key` field in `parent`. Make sure that even if `parent` has no + // matching `children`, the field gets set (to an empty `Vec`). + // + // This `insert` will overwrite in the case where the response key occurs both at the + // interface level and in nested object type conditions. The values for the interface + // query are always joined first, and may then be overwritten by the merged selection + // set under the object type condition. See also: e0d6da3e-60cf-41a5-b83c-b60a7a766d4a + let values = parent.id().ok().and_then(|id| grouped.get(&*id).cloned()); + parent.set_children(response_key.to_owned(), values.unwrap_or_default()); + } +} + /// Run the query in `ctx` in such a manner that we only perform one query /// per 'level' in the query. A query like `musicians { id bands { id } }` /// will perform two queries: one for musicians, and one for bands, regardless @@ -602,7 +605,7 @@ fn execute_selection_set<'a>( &field.selection_set, ) { Ok((children, trace)) => { - Join::perform(&mut parents, children, field.response_key()); + add_children(&mut parents, children, field.response_key()); let weight = parents.iter().map(|parent| parent.weight()).sum::(); check_result_size(ctx, weight)?; From 781d99b01ccf630e41862a844f65062e3d482697 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 10:29:21 -0700 Subject: [PATCH 0431/2104] gaphql: Handle queries at the root special in prefetch --- graphql/src/store/prefetch.rs | 50 +++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index d734207bcbf..938c6cf3239 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -410,6 +410,29 @@ impl<'a> Join<'a> { } } +/// Distinguish between a root GraphQL query and nested queries. For root +/// queries, there is no parent type, and it doesn't really make sense to +/// worry about join conditions since there is only one parent (the root). +/// In particular, the parent type for root queries is `Query` which is not +/// an entity type, and we would create a `Join` with a fake entity type for +/// the parent type +enum MaybeJoin<'a> { + Root { child_type: ObjectOrInterface<'a> }, + Nested(Join<'a>), +} + +impl<'a> MaybeJoin<'a> { + fn child_type(&self) -> ObjectOrInterface<'_> { + match self { + MaybeJoin::Root { child_type } => child_type.clone(), + MaybeJoin::Nested(Join { + child_type, + conds: _, + }) => child_type.clone(), + } + } +} + /// Link children to their parents. The child nodes are distributed into the /// parent nodes according to the `parent_id` returned by the database in /// each child as attribute `g$parent_id`, and are stored in the @@ -538,6 +561,7 @@ fn execute_selection_set<'a>( ) -> Result<(Vec, Trace), Vec> { let schema = &ctx.query.schema; let mut errors: Vec = Vec::new(); + let at_root = is_root_node(parents.iter()); // Process all field groups in order for (object_type, fields) in selection_set.interior_fields() { @@ -549,7 +573,7 @@ fn execute_selection_set<'a>( } // Filter out parents that do not match the type condition. - let mut parents: Vec<&mut Node> = if is_root_node(parents.iter()) { + let mut parents: Vec<&mut Node> = if at_root { parents.iter_mut().collect() } else { parents @@ -570,12 +594,16 @@ fn execute_selection_set<'a>( .object_or_interface(field_type.field_type.get_base_type()) .expect("we only collect fields that are objects or interfaces"); - let join = Join::new( - ctx.query.schema.as_ref(), - object_type, - child_type, - &field.name, - ); + let join = if at_root { + MaybeJoin::Root { child_type } + } else { + MaybeJoin::Nested(Join::new( + ctx.query.schema.as_ref(), + object_type, + child_type, + &field.name, + )) + }; // "Select by Specific Attribute Names" is an experimental feature and can be disabled completely. // If this environment variable is set, the program will use an empty collection that, @@ -633,7 +661,7 @@ fn execute_field( resolver: &StoreResolver, ctx: &ExecutionContext, parents: &[&mut Node], - join: &Join<'_>, + join: &MaybeJoin<'_>, field: &a::Field, field_definition: &s::Field, selected_attrs: SelectedAttributes, @@ -663,13 +691,13 @@ fn fetch( resolver: &StoreResolver, ctx: &ExecutionContext, parents: &[&mut Node], - join: &Join<'_>, + join: &MaybeJoin<'_>, field: &a::Field, multiplicity: ChildMultiplicity, selected_attrs: SelectedAttributes, ) -> Result<(Vec, Trace), QueryExecutionError> { let mut query = build_query( - join.child_type, + join.child_type(), resolver.block_number(), field, ctx.query.schema.types_for_interface(), @@ -695,7 +723,7 @@ fn fetch( ); } - if !is_root_node(parents.iter().map(|p| &**p)) { + if let MaybeJoin::Nested(join) = join { // For anything but the root node, restrict the children we select // by the parent list let windows = join.windows(parents, multiplicity, &query.collection); From 663a5b50c47a2d9cfbe1d808ba385066bf987387 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Thu, 14 Sep 2023 13:34:38 -0700 Subject: [PATCH 0432/2104] all: Make InputSchema a factory for EntityType This makes it impossible to construct an EntityType that does not exist in the InputSchema --- chain/substreams/src/mapper.rs | 4 +- graph/src/components/store/entity_cache.rs | 4 +- graph/src/components/store/mod.rs | 79 +++---- graph/src/components/store/traits.rs | 2 + graph/src/components/store/write.rs | 25 ++- graph/src/data/graphql/object_or_interface.rs | 9 - graph/src/data/query/error.rs | 6 + graph/src/data/store/mod.rs | 3 +- graph/src/schema/api.rs | 10 +- graph/src/schema/ast.rs | 4 +- graph/src/schema/input_schema.rs | 66 +++++- graph/src/util/intern.rs | 10 + graphql/src/store/prefetch.rs | 20 +- graphql/src/store/query.rs | 165 +++++++++------ graphql/src/store/resolver.rs | 4 +- node/src/manager/commands/listen.rs | 29 ++- runtime/test/src/test.rs | 7 +- runtime/wasm/src/host_exports.rs | 12 +- server/index-node/src/schema.rs | 2 +- store/postgres/src/copy.rs | 2 +- store/postgres/src/deployment.rs | 14 +- store/postgres/src/deployment_store.rs | 26 ++- store/postgres/src/detail.rs | 45 ++-- store/postgres/src/fork.rs | 4 +- store/postgres/src/query_store.rs | 7 +- store/postgres/src/relational.rs | 34 ++- store/postgres/src/relational/ddl_tests.rs | 11 +- store/postgres/src/relational/query_tests.rs | 3 +- store/postgres/src/relational_queries.rs | 10 +- store/postgres/src/subgraph_store.rs | 8 +- store/postgres/src/writable.rs | 4 +- .../tests/chain/ethereum/manifest.rs | 23 ++- store/test-store/tests/core/interfaces.rs | 6 +- store/test-store/tests/graph/entity_cache.rs | 38 ++-- .../test-store/tests/graphql/introspection.rs | 2 +- store/test-store/tests/graphql/query.rs | 14 +- store/test-store/tests/postgres/graft.rs | 11 +- store/test-store/tests/postgres/relational.rs | 195 ++++++++---------- .../tests/postgres/relational_bytes.rs | 91 ++++---- store/test-store/tests/postgres/store.rs | 114 +++++----- store/test-store/tests/postgres/writable.rs | 7 +- 41 files changed, 657 insertions(+), 473 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 49323c43469..1aad546e9bd 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -7,7 +7,7 @@ use crate::{codec, Block, Chain, EntityChanges, ParsedChanges, TriggerData}; use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; -use graph::components::store::{EntityKey, EntityType}; +use graph::components::store::EntityKey; use graph::data::store::scalar::Bytes; use graph::data::store::IdType; use graph::data::value::Word; @@ -131,7 +131,7 @@ fn parse_changes( let mut parsed_changes = vec![]; for entity_change in changes.entity_changes.iter() { let mut parsed_data: HashMap = HashMap::default(); - let entity_type = EntityType::new(entity_change.entity.to_string()); + let entity_type = schema.entity_type(&entity_change.entity)?; // Make sure that the `entity_id` gets set to a value // that is safe for roundtrips through the database. In diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index cf20c3d4b41..fb95a131ff8 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -13,7 +13,7 @@ use crate::schema::InputSchema; use crate::util::intern::Error as InternError; use crate::util::lfu_cache::{EvictStats, LfuCache}; -use super::{BlockNumber, DerivedEntityQuery, EntityType, LoadRelatedRequest, StoreError}; +use super::{BlockNumber, DerivedEntityQuery, LoadRelatedRequest, StoreError}; /// The scope in which the `EntityCache` should perform a `get` operation pub enum GetScope { @@ -204,7 +204,7 @@ impl EntityCache { let (base_type, field, id_is_bytes) = self.schema.get_field_related(eref)?; let query = DerivedEntityQuery { - entity_type: EntityType::new(base_type.to_string()), + entity_type: self.schema.entity_type(base_type)?, entity_field: field.name.clone().into(), value: eref.entity_id.clone(), causality_region: eref.causality_region, diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index fe608c61671..79fe86d05a3 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -3,12 +3,12 @@ mod err; mod traits; pub mod write; +use anyhow::{bail, Error}; pub use entity_cache::{EntityCache, GetScope, ModificationsAndCache}; use futures03::future::{FutureExt, TryFutureExt}; use slog::{trace, Logger}; pub use super::subgraph::Entity; -use diesel::types::{FromSql, ToSql}; pub use err::StoreError; use itertools::Itertools; use strum_macros::Display; @@ -21,17 +21,18 @@ use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::fmt; use std::fmt::Display; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; -use std::{fmt, io}; use crate::blockchain::{Block, BlockHash, BlockPtr}; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; use crate::constraint_violation; +use crate::data::graphql::ObjectOrInterface; use crate::data::store::scalar::Bytes; use crate::data::store::Value; use crate::data::value::Word; @@ -39,7 +40,7 @@ use crate::data_source::CausalityRegion; use crate::env::ENV_VARS; use crate::prelude::{s, Attribute, DeploymentHash, SubscriptionFilter, ValueType}; use crate::schema::InputSchema; -use crate::util::intern; +use crate::util::intern::{self, AtomPool}; use crate::util::stats::MovingStats; /// The type name of an entity. This is the string that is used in the @@ -51,8 +52,11 @@ impl EntityType { /// Construct a new entity type. Ideally, this is only called when /// `entity_type` either comes from the GraphQL schema, or from /// the database from fields that are known to contain a valid entity type - pub fn new(entity_type: String) -> Self { - Self(entity_type.into()) + pub fn new(pool: &Arc, entity_type: &str) -> Result { + match pool.lookup(entity_type) { + Some(_) => Ok(EntityType(Word::from(entity_type))), + None => bail!("entity type `{}` is not interned", entity_type), + } } pub fn as_str(&self) -> &str { @@ -74,56 +78,57 @@ impl fmt::Display for EntityType { } } -impl<'a> From<&s::ObjectType> for EntityType { - fn from(object_type: &s::ObjectType) -> Self { - EntityType::new(object_type.name.clone()) +impl Borrow for EntityType { + fn borrow(&self) -> &str { + &self.0 } } -impl<'a> From<&s::InterfaceType> for EntityType { - fn from(interface_type: &s::InterfaceType) -> Self { - EntityType::new(interface_type.name.clone()) +impl CheapClone for EntityType {} + +impl std::fmt::Debug for EntityType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "EntityType({})", self.0) } } -impl Borrow for EntityType { - fn borrow(&self) -> &str { - &self.0 - } +pub trait AsEntityTypeName { + fn name(&self) -> &str; } -// This conversion should only be used in tests since it makes it too -// easy to convert random strings into entity types -#[cfg(debug_assertions)] -impl From<&str> for EntityType { - fn from(s: &str) -> Self { - EntityType::new(s.to_owned()) +impl AsEntityTypeName for &str { + fn name(&self) -> &str { + self } } -impl CheapClone for EntityType {} +impl AsEntityTypeName for &String { + fn name(&self) -> &str { + self.as_str() + } +} -impl FromSql for EntityType { - fn from_sql(bytes: Option<&[u8]>) -> diesel::deserialize::Result { - let s = >::from_sql(bytes)?; - Ok(EntityType::new(s)) +impl AsEntityTypeName for &s::ObjectType { + fn name(&self) -> &str { + &self.name } } -impl ToSql for EntityType { - fn to_sql( - &self, - out: &mut diesel::serialize::Output, - ) -> diesel::serialize::Result { - >::to_sql(self.0.as_str(), out) +impl AsEntityTypeName for &s::InterfaceType { + fn name(&self) -> &str { + &self.name } } -impl std::fmt::Debug for EntityType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "EntityType({})", self.0) +impl AsEntityTypeName for ObjectOrInterface<'_> { + fn name(&self) -> &str { + match self { + ObjectOrInterface::Object(object) => &object.name, + ObjectOrInterface::Interface(interface) => &interface.name, + } } } + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityFilterDerivative(bool); @@ -225,9 +230,9 @@ impl DerivedEntityQuery { impl EntityKey { // For use in tests only #[cfg(debug_assertions)] - pub fn data(entity_type: impl Into, entity_id: impl Into) -> Self { + pub fn onchain(entity_type: &EntityType, entity_id: impl Into) -> Self { Self { - entity_type: EntityType::new(entity_type.into()), + entity_type: entity_type.clone(), entity_id: entity_id.into().into(), causality_region: CausalityRegion::ONCHAIN, } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 077c29008ec..ce28e492b94 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -564,6 +564,8 @@ pub trait QueryStore: Send + Sync { fn api_schema(&self) -> Result, QueryExecutionError>; + fn input_schema(&self) -> Result, QueryExecutionError>; + fn network_name(&self) -> &str; /// A permit should be acquired before starting query execution. diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 8a1f36e2364..0323345434a 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -922,12 +922,12 @@ mod test { .iter() .zip(blocks.iter()) .map(|(value, block)| EntityModification::Remove { - key: EntityKey::data("RowGroup".to_string(), value.to_string()), + key: EntityKey::onchain(&*ROW_GROUP_TYPE, value.to_string()), block: *block, }) .collect(); let group = RowGroup { - entity_type: EntityType::new("Entry".to_string()), + entity_type: ENTRY_TYPE.clone(), rows, immutable: false, }; @@ -964,11 +964,18 @@ mod test { check_runs(&[10, 20, 11], &[1, 2, 1], exp); } - const GQL: &str = "type Thing @entity { id: ID!, count: Int! }"; + // A very fake schema that allows us to get the entity types we need + const GQL: &str = r#" + type Thing @entity { id: ID!, count: Int! } + type RowGroup @entity { id: ID! } + type Entry @entity { id: ID! } + "#; lazy_static! { static ref DEPLOYMENT: DeploymentHash = DeploymentHash::new("batchAppend").unwrap(); static ref SCHEMA: InputSchema = InputSchema::parse(GQL, DEPLOYMENT.clone()).unwrap(); - static ref ENTITY_TYPE: EntityType = EntityType::new("Thing".to_string()); + static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); + static ref ROW_GROUP_TYPE: EntityType = SCHEMA.entity_type("RowGroup").unwrap(); + static ref ENTRY_TYPE: EntityType = SCHEMA.entity_type("Entry").unwrap(); } /// Convenient notation for changes to a fixed entity @@ -988,7 +995,7 @@ mod test { use Mod::*; let value = value.clone(); - let key = EntityKey::data("Thing", "one"); + let key = EntityKey::onchain(&*THING_TYPE, "one"); match value { Ins(block) => EntityModification::Insert { key, @@ -1028,7 +1035,7 @@ mod test { impl Group { fn new() -> Self { Self { - group: RowGroup::new(ENTITY_TYPE.clone(), false), + group: RowGroup::new(THING_TYPE.clone(), false), } } @@ -1092,7 +1099,7 @@ mod test { fn last_op() { #[track_caller] fn is_remove(group: &RowGroup, at: BlockNumber) { - let key = EntityKey::data("Thing", "one"); + let key = EntityKey::onchain(&*THING_TYPE, "one"); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1104,7 +1111,7 @@ mod test { } #[track_caller] fn is_write(group: &RowGroup, at: BlockNumber) { - let key = EntityKey::data("Thing", "one"); + let key = EntityKey::onchain(&*THING_TYPE, "one"); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1117,7 +1124,7 @@ mod test { use Mod::*; - let key = EntityKey::data("Thing", "one"); + let key = EntityKey::onchain(&*THING_TYPE, "one"); // This will result in two mods int the group: // [ InsC(1,2), InsC(2,3) ] diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index dfefdfad2c6..af690192151 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -63,15 +63,6 @@ impl<'a> From<&'a s::InterfaceType> for ObjectOrInterface<'a> { } } -impl<'a> From> for EntityType { - fn from(ooi: ObjectOrInterface) -> Self { - match ooi { - ObjectOrInterface::Object(ty) => EntityType::from(ty), - ObjectOrInterface::Interface(ty) => EntityType::from(ty), - } - } -} - impl<'a> ObjectOrInterface<'a> { pub fn is_object(self) -> bool { match self { diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index 23715f1d614..ec0d314ba11 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -322,6 +322,12 @@ impl From for QueryExecutionError { } } +impl From for QueryExecutionError { + fn from(e: anyhow::Error) -> Self { + QueryExecutionError::Panic(e.to_string()) + } +} + /// Error caused while processing a [Query](struct.Query.html) request. #[derive(Clone, Debug)] pub enum QueryError { diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index f77d59daed8..171357e682d 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1099,6 +1099,7 @@ fn entity_validation() { static ref SUBGRAPH: DeploymentHash = DeploymentHash::new("doesntmatter").unwrap(); static ref SCHEMA: InputSchema = InputSchema::parse(DOCUMENT, SUBGRAPH.clone()).expect("Failed to parse test schema"); + static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } fn make_thing(name: &str) -> Entity { @@ -1107,7 +1108,7 @@ fn entity_validation() { fn check(thing: Entity, errmsg: &str) { let id = thing.id(); - let key = EntityKey::data("Thing".to_owned(), id.clone()); + let key = EntityKey::onchain(&*THING_TYPE, id.clone()); let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { diff --git a/graph/src/schema/api.rs b/graph/src/schema/api.rs index 7bfec8a455a..2bb29a4cbcf 100644 --- a/graph/src/schema/api.rs +++ b/graph/src/schema/api.rs @@ -93,7 +93,7 @@ impl ApiSchema { /// In addition, the API schema has an introspection schema mixed into /// `api_schema`. In particular, the `Query` type has fields called /// `__schema` and `__type` - pub fn from_api_schema(mut api_schema: Schema) -> Result { + pub(crate) fn from_api_schema(mut api_schema: Schema) -> Result { add_introspection_schema(&mut api_schema.document); let query_type = api_schema @@ -123,6 +123,14 @@ impl ApiSchema { }) } + /// Create an API Schema that can be used to execute GraphQL queries. + /// This method is only meant for schemas that are not derived from a + /// subgraph schema, like the schema for the index-node server. Use + /// `InputSchema::api_schema` to get an API schema for a subgraph + pub fn from_graphql_schema(schema: Schema) -> Result { + Self::from_api_schema(schema) + } + pub fn document(&self) -> &s::Document { &self.schema.document } diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 5b82b793edb..822df504f9e 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -406,6 +406,7 @@ pub fn is_list(field_type: &s::Type) -> bool { #[test] fn entity_validation() { use crate::components::store::EntityKey; + use crate::components::store::EntityType; use crate::data::store; use crate::entity; use crate::prelude::{DeploymentHash, Entity}; @@ -432,6 +433,7 @@ fn entity_validation() { lazy_static! { static ref SUBGRAPH: DeploymentHash = DeploymentHash::new("doesntmatter").unwrap(); static ref SCHEMA: InputSchema = InputSchema::raw(DOCUMENT, "doesntmatter"); + static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } fn make_thing(name: &str) -> Entity { @@ -440,7 +442,7 @@ fn entity_validation() { fn check(thing: Entity, errmsg: &str) { let id = thing.id(); - let key = EntityKey::data("Thing".to_owned(), id.clone()); + let key = EntityKey::onchain(&*THING_TYPE, id.clone()); let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 6a13c6d2866..df57cefa3f3 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Error}; use store::Entity; use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, EntityType, LoadRelatedRequest}; +use crate::components::store::{AsEntityTypeName, EntityKey, EntityType, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; use crate::data::store::{ @@ -57,17 +57,19 @@ impl CheapClone for InputSchema { impl InputSchema { fn create(schema: Schema) -> Self { + let pool = Arc::new(atom_pool(&schema.document)); + let immutable_types = HashSet::from_iter( schema .document .get_object_type_definitions() .into_iter() .filter(|obj_type| obj_type.is_immutable()) - .map(Into::into), + .map(|obj_type| EntityType::new(&pool, &obj_type.name)) + .collect::, _>>() + .unwrap(), ); - let pool = Arc::new(atom_pool(&schema.document)); - let field_names = HashMap::from_iter( schema .document @@ -79,11 +81,13 @@ impl InputSchema { .iter() .map(|field| pool.lookup(&field.name).unwrap()) .collect(); - (EntityType::from(obj_type), fields) - }), + EntityType::new(&pool, &obj_type.name).map(|t| (t, fields)) + }) + .collect::, _>>() + .unwrap(), ); - let poi_type = EntityType::new(POI_OBJECT.to_string()); + let poi_type = EntityType::new(&pool, POI_OBJECT).unwrap(); Self { inner: Arc::new(Inner { @@ -126,6 +130,10 @@ impl InputSchema { Self::parse(document, hash).unwrap() } + pub fn schema(&self) -> &Schema { + &self.inner.schema + } + /// Generate the `ApiSchema` for use with GraphQL queries for this /// `InputSchema` pub fn api_schema(&self) -> Result { @@ -289,6 +297,11 @@ impl InputSchema { self.inner.schema.types_for_interface.get(&intf.name) } + /// Returns `None` if the type implements no interfaces. + pub fn interfaces_for_type(&self, type_name: &str) -> Option<&Vec> { + self.inner.schema.interfaces_for_type(type_name) + } + pub fn find_object_type(&self, entity_type: &EntityType) -> Option<&s::ObjectType> { self.inner .schema @@ -394,6 +407,22 @@ impl InputSchema { pub fn poi_digest(&self) -> Word { Word::from(POI_DIGEST) } + + pub fn atom(&self, s: &str) -> Option { + self.inner.pool.lookup(s) + } + + pub fn pool(&self) -> &Arc { + &self.inner.pool + } + + /// Return the entity type for `named`. If the entity type does not + /// exist, return an error. Generally, an error should only be possible + /// of `named` is based on user input. If `named` is an internal object, + /// like a `ObjectType`, it is safe to unwrap the result + pub fn entity_type(&self, named: N) -> Result { + EntityType::new(&self.inner.pool, named.name()) + } } /// Create a new pool that contains the names of all the types defined @@ -453,3 +482,26 @@ fn atom_pool(document: &s::Document) -> AtomPool { pool } + +#[cfg(test)] +mod tests { + use crate::prelude::DeploymentHash; + + use super::InputSchema; + + const SCHEMA: &str = r#" + type Thing @entity { + id: ID! + name: String! + } + "#; + + #[test] + fn entity_type() { + let id = DeploymentHash::new("test").unwrap(); + let schema = InputSchema::parse(SCHEMA, id).unwrap(); + + assert_eq!("Thing", schema.entity_type("Thing").unwrap().as_str()); + assert!(schema.entity_type("NonExistent").is_err()); + } +} diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 31da953efc2..1c93c3a389f 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -137,6 +137,16 @@ impl AtomPool { } } +impl> FromIterator for AtomPool { + fn from_iter>(iter: I) -> Self { + let mut pool = AtomPool::new(); + for s in iter { + pool.intern(s.as_ref()); + } + pool + } +} + /// A marker for an empty entry in an `Object` const TOMBSTONE_KEY: Atom = Atom(AtomInt::MAX); diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 938c6cf3239..38f48e2896a 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -13,7 +13,7 @@ use std::collections::BTreeMap; use std::rc::Rc; use std::time::Instant; -use graph::schema::{ast as sast, ApiSchema}; +use graph::schema::{ast as sast, InputSchema}; use graph::{components::store::EntityType, data::graphql::*}; use graph::{ data::graphql::ext::DirectiveFinder, @@ -255,6 +255,7 @@ struct JoinCond<'a> { impl<'a> JoinCond<'a> { fn new( + schema: &InputSchema, parent_type: &'a s::ObjectType, child_type: &'a s::ObjectType, field_name: &str, @@ -269,8 +270,8 @@ impl<'a> JoinCond<'a> { JoinRelation::Derived(JoinField::new(field)) }; JoinCond { - parent_type: parent_type.into(), - child_type: child_type.into(), + parent_type: schema.entity_type(parent_type).unwrap(), + child_type: schema.entity_type(child_type).unwrap(), relation, } } @@ -356,7 +357,7 @@ struct Join<'a> { impl<'a> Join<'a> { /// Construct a `Join` based on the parent field pointing to the child fn new( - schema: &'a ApiSchema, + schema: &'a InputSchema, parent_type: &'a s::ObjectType, child_type: ObjectOrInterface<'a>, field_name: &str, @@ -367,7 +368,7 @@ impl<'a> Join<'a> { let conds = child_types .iter() - .map(|child_type| JoinCond::new(parent_type, child_type, field_name)) + .map(|child_type| JoinCond::new(schema, parent_type, child_type, field_name)) .collect(); Join { child_type, conds } @@ -560,6 +561,7 @@ fn execute_selection_set<'a>( selection_set: &a::SelectionSet, ) -> Result<(Vec, Trace), Vec> { let schema = &ctx.query.schema; + let input_schema = resolver.store.input_schema()?; let mut errors: Vec = Vec::new(); let at_root = is_root_node(parents.iter()); @@ -598,7 +600,7 @@ fn execute_selection_set<'a>( MaybeJoin::Root { child_type } } else { MaybeJoin::Nested(Join::new( - ctx.query.schema.as_ref(), + &input_schema, object_type, child_type, &field.name, @@ -696,6 +698,7 @@ fn fetch( multiplicity: ChildMultiplicity, selected_attrs: SelectedAttributes, ) -> Result<(Vec, Trace), QueryExecutionError> { + let input_schema = resolver.store.input_schema()?; let mut query = build_query( join.child_type(), resolver.block_number(), @@ -704,7 +707,10 @@ fn fetch( ctx.max_first, ctx.max_skip, selected_attrs, - &ctx.query.schema, + &super::query::SchemaPair { + api: ctx.query.schema.clone(), + input: input_schema, + }, )?; query.trace = ctx.trace; query.query_id = Some(ctx.query.query_id.clone()); diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index ca1fa113044..01dc49cbc48 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -7,7 +7,7 @@ use graph::data::value::Object; use graph::data::value::Value as DataValue; use graph::prelude::*; use graph::schema::ast::{self as sast, FilterOp}; -use graph::schema::ApiSchema; +use graph::schema::{ApiSchema, InputSchema}; use graph::{components::store::EntityType, data::graphql::ObjectOrInterface}; use crate::execution::ast as a; @@ -20,6 +20,11 @@ enum OrderDirection { Descending, } +pub(crate) struct SchemaPair { + pub api: Arc, + pub input: Arc, +} + /// Builds a EntityQuery from GraphQL arguments. /// /// Panics if `entity` is not present in `schema`. @@ -31,19 +36,21 @@ pub(crate) fn build_query<'a>( max_first: u32, max_skip: u32, mut column_names: SelectedAttributes, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result { let entity = entity.into(); let entity_types = EntityCollection::All(match &entity { ObjectOrInterface::Object(object) => { let selected_columns = column_names.get(object); - vec![((*object).into(), selected_columns)] + let entity_type = schema.input.entity_type(*object).unwrap(); + vec![(entity_type, selected_columns)] } ObjectOrInterface::Interface(interface) => types_for_interface[&interface.name] .iter() .map(|o| { let selected_columns = column_names.get(o); - (o.into(), selected_columns) + let entity_type = schema.input.entity_type(o).unwrap(); + (entity_type, selected_columns) }) .collect(), }); @@ -172,7 +179,7 @@ fn build_range( fn build_filter( entity: ObjectOrInterface, field: &a::Field, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result, QueryExecutionError> { let where_filter = match field.argument_value("where") { Some(r::Value::Object(object)) => match build_filter_from_object(entity, object, schema) { @@ -268,7 +275,7 @@ fn build_entity_filter( /// Iterate over the list and generate an EntityFilter from it fn build_list_filter_from_value( entity: ObjectOrInterface, - schema: &ApiSchema, + schema: &SchemaPair, value: &r::Value, ) -> Result, QueryExecutionError> { // We have object like this @@ -293,10 +300,10 @@ fn build_list_filter_from_value( } /// build a filter which has list of nested filters -fn build_list_filter_from_object( +fn build_list_filter_from_object<'a>( entity: ObjectOrInterface, object: &Object, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result, QueryExecutionError> { Ok(object .iter() @@ -309,10 +316,10 @@ fn build_list_filter_from_object( } /// Parses a GraphQL input object into an EntityFilter, if present. -fn build_filter_from_object( +fn build_filter_from_object<'a>( entity: ObjectOrInterface, object: &Object, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result, QueryExecutionError> { object .iter() @@ -389,15 +396,17 @@ fn build_child_filter_from_object( entity: ObjectOrInterface, field_name: String, object: &Object, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result { let field = entity .field(&field_name) .ok_or(QueryExecutionError::InvalidFilterError)?; let type_name = &field.field_type.get_base_type(); let child_entity = schema + .api .object_or_interface(type_name) .ok_or(QueryExecutionError::InvalidFilterError)?; + let child_entity_type = schema.input.entity_type(child_entity)?; let filter = Box::new(EntityFilter::And(build_filter_from_object( child_entity, object, @@ -415,25 +424,27 @@ fn build_child_filter_from_object( if child_entity.is_interface() { Ok(EntityFilter::Or( child_entity - .object_types(schema.schema()) + .object_types(schema.api.schema()) .ok_or(QueryExecutionError::AbstractTypeError( "Interface is not implemented by any types".to_string(), ))? .iter() .map(|object_type| { - EntityFilter::Child(Child { - attr: attr.clone(), - entity_type: EntityType::new(object_type.name.to_string()), - filter: filter.clone(), - derived, + schema.input.entity_type(*object_type).map(|entity_type| { + EntityFilter::Child(Child { + attr: attr.clone(), + entity_type, + filter: filter.clone(), + derived, + }) }) }) - .collect(), + .collect::>()?, )) } else if entity.is_interface() { Ok(EntityFilter::Or( entity - .object_types(schema.schema()) + .object_types(schema.api.schema()) .ok_or(QueryExecutionError::AbstractTypeError( "Interface is not implemented by any types".to_string(), ))? @@ -456,7 +467,7 @@ fn build_child_filter_from_object( Ok(EntityFilter::Child(Child { attr, - entity_type: EntityType::new(child_entity.name().to_string()), + entity_type: child_entity_type.clone(), filter: filter.clone(), derived, })) @@ -466,7 +477,7 @@ fn build_child_filter_from_object( } else { Ok(EntityFilter::Child(Child { attr, - entity_type: EntityType::new(type_name.to_string()), + entity_type: schema.input.entity_type(*type_name)?, filter, derived, })) @@ -543,7 +554,7 @@ enum OrderByChild { fn build_order_by( entity: ObjectOrInterface, field: &a::Field, - schema: &ApiSchema, + schema: &SchemaPair, ) -> Result)>, QueryExecutionError> { match field.argument_value("orderBy") { Some(r::Value::Enum(name)) => match parse_order_by(name)? { @@ -574,12 +585,13 @@ fn build_order_by( })? } ObjectOrInterface::Interface(_) => { - let object_types = schema.types_for_interface().get(entity.name()).ok_or( - QueryExecutionError::EntityFieldError( - entity.name().to_owned(), - parent_field_name.clone(), - ), - )?; + let object_types = + schema.api.types_for_interface().get(entity.name()).ok_or( + QueryExecutionError::EntityFieldError( + entity.name().to_owned(), + parent_field_name.clone(), + ), + )?; if let Some(first_entity) = object_types.first() { sast::get_field(first_entity, parent_field_name.as_str()).ok_or_else( @@ -602,6 +614,7 @@ fn build_order_by( let base_type = field.field_type.get_base_type(); let child_entity = schema + .api .object_or_interface(base_type) .ok_or_else(|| QueryExecutionError::NamedTypeError(base_type.into()))?; let child_field = sast::get_field(child_entity, child_field_name.as_str()) @@ -627,23 +640,24 @@ fn build_order_by( let child = match child_entity { ObjectOrInterface::Object(_) => OrderByChild::Object(ObjectOrderDetails { - entity_type: EntityType::new(base_type.into()), + entity_type: schema.input.entity_type(base_type)?, join_attribute, derived, }), ObjectOrInterface::Interface(interface) => { let entity_types = schema + .api .types_for_interface() .get(&interface.name) .map(|object_types| { object_types .iter() - .map(|object_type| EntityType::new(object_type.name.clone())) - .collect::>() + .map(|object_type| schema.input.entity_type(object_type)) + .collect::, _>>() }) .ok_or(QueryExecutionError::AbstractTypeError( "Interface not implemented by any object type".to_string(), - ))?; + ))??; OrderByChild::Interface(InterfaceOrderDetails { entity_types, join_attribute, @@ -720,6 +734,7 @@ pub fn parse_subgraph_id<'a>( /// Recursively collects entities involved in a query field as `(subgraph ID, name)` tuples. pub(crate) fn collect_entities_from_query_field( + input_schema: &InputSchema, schema: &ApiSchema, object_type: sast::ObjectType, field: &a::Field, @@ -761,16 +776,20 @@ pub(crate) fn collect_entities_from_query_field( } } - Ok(entities + entities .into_iter() - .map(|(id, entity_type)| SubscriptionFilter::Entities(id, EntityType::new(entity_type))) - .collect()) + .map(|(id, entity_type)| { + input_schema + .entity_type(&entity_type) + .map(|entity_type| SubscriptionFilter::Entities(id, entity_type)) + }) + .collect::>() + .map_err(Into::into) } #[cfg(test)] mod tests { use graph::{ - components::store::EntityType, data::value::Object, prelude::{ r, AttributeNames, DeploymentHash, EntityCollection, EntityFilter, EntityRange, Value, @@ -780,12 +799,12 @@ mod tests { s::{self, Directive, Field, InputValue, ObjectType, Type, Value as SchemaValue}, EntityOrder, }, - schema::{ApiSchema, Schema}, + schema::{ApiSchema, InputSchema, Schema}, }; use graphql_parser::Pos; use std::{collections::BTreeMap, iter::FromIterator, sync::Arc}; - use super::{a, build_query}; + use super::{a, build_query, SchemaPair}; fn default_object() -> ObjectType { let subgraph_id_argument = ( @@ -825,7 +844,7 @@ mod tests { ObjectType { position: Default::default(), description: None, - name: String::new(), + name: "DefaultObject".to_string(), implements_interfaces: vec![], directives: vec![subgraph_id_directive], fields: vec![name_field, email_field], @@ -880,28 +899,42 @@ mod tests { field } - fn build_schema(raw_schema: &str) -> ApiSchema { - let document = graphql_parser::parse_schema(raw_schema) - .expect("Failed to parse raw schema") - .into_static(); + fn build_default_schema() -> SchemaPair { + // These schemas are somewhat nonsensical and just good enough to + // run the tests. The `API_SCHEMA` does not look like anything that + // would be generated from the `INPUT_SCHEMA` + const API_SCHEMA: &str = r#" + type Query { + aField(first: Int, skip: Int): [SomeType] + } - let schema = Schema::new(DeploymentHash::new("id").unwrap(), document).unwrap(); - ApiSchema::from_api_schema(schema).expect("Failed to build schema") - } + type SomeType @entity { + id: ID! + name: String! + } + "#; + const INPUT_SCHEMA: &str = r#" + type Entity1 @entity { id: ID! } + type Entity2 @entity { id: ID! } + type DefaultObject @entity { + id: ID! + name: String + email: String + } + "#; - fn build_default_schema() -> ApiSchema { - build_schema( - r#" - type Query { - aField(first: Int, skip: Int): [SomeType] - } + let id = DeploymentHash::new("id").unwrap(); + let input_schema = InputSchema::parse(INPUT_SCHEMA, id.clone()).unwrap(); + let api_schema = graphql_parser::parse_schema(API_SCHEMA) + .expect("Failed to parse raw schema") + .into_static(); + let api_schema = Schema::new(id, api_schema).unwrap(); + let api_schema = ApiSchema::from_graphql_schema(api_schema).unwrap(); - type SomeType @entity { - id: ID! - name: String! - } - "#, - ) + SchemaPair { + input: Arc::new(input_schema), + api: Arc::new(api_schema), + } } #[test] @@ -916,11 +949,14 @@ mod tests { std::u32::MAX, std::u32::MAX, Default::default(), - &schema + &schema, ) .unwrap() .collection, - EntityCollection::All(vec![(EntityType::from("Entity1"), AttributeNames::All)]) + EntityCollection::All(vec![( + schema.input.entity_type("Entity1").unwrap(), + AttributeNames::All + )]) ); assert_eq!( build_query( @@ -935,7 +971,10 @@ mod tests { ) .unwrap() .collection, - EntityCollection::All(vec![(EntityType::from("Entity2"), AttributeNames::All)]) + EntityCollection::All(vec![( + schema.input.entity_type("Entity2").unwrap(), + AttributeNames::All + )]) ); } @@ -1010,7 +1049,7 @@ mod tests { std::u32::MAX, std::u32::MAX, Default::default(), - &schema + &schema, ) .unwrap() .order, @@ -1094,7 +1133,7 @@ mod tests { std::u32::MAX, std::u32::MAX, Default::default(), - &schema, + &schema ) .unwrap() .order, diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index d59978b35e5..bfbd81b81c8 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -359,7 +359,9 @@ impl Resolver for StoreResolver { ) -> result::Result { // Collect all entities involved in the query field let object_type = schema.object_type(object_type).into(); - let entities = collect_entities_from_query_field(schema, object_type, field)?; + let input_schema = self.store.input_schema()?; + let entities = + collect_entities_from_query_field(&input_schema, schema, object_type, field)?; // Subscribe to the store and return the entity change stream Ok(self.subscription_manager.subscribe_no_payload(entities)) diff --git a/node/src/manager/commands/listen.rs b/node/src/manager/commands/listen.rs index 8fb9c7ab4c3..e033600d101 100644 --- a/node/src/manager/commands/listen.rs +++ b/node/src/manager/commands/listen.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use std::{collections::BTreeSet, io::Write}; use futures::compat::Future01CompatExt; -//use futures::future; +use graph::prelude::DeploymentHash; +use graph::schema::InputSchema; use graph::{ components::store::{EntityType, SubscriptionManager as _}, prelude::{serde_json, Error, Stream, SubscriptionFilter}, @@ -58,10 +59,32 @@ pub async fn entities( search: &DeploymentSearch, entity_types: Vec, ) -> Result<(), Error> { + // We convert the entity type names into entity types in this very + // awkward way to avoid needing to have a SubgraphStore from which we + // load the input schema + fn as_entity_types( + entity_types: Vec, + id: &DeploymentHash, + ) -> Result, Error> { + use std::fmt::Write; + + let schema = entity_types + .iter() + .fold(String::new(), |mut buf, entity_type| { + writeln!(buf, "type {entity_type} @entity {{ id: ID! }}").unwrap(); + buf + }); + let schema = InputSchema::parse(&schema, id.clone()).unwrap(); + entity_types + .iter() + .map(|et| schema.entity_type(et)) + .collect::>() + } + let locator = search.locate_unique(&primary_pool)?; - let filter = entity_types + let filter = as_entity_types(entity_types, &locator.hash)? .into_iter() - .map(|et| SubscriptionFilter::Entities(locator.hash.clone(), EntityType::new(et))) + .map(|et| SubscriptionFilter::Entities(locator.hash.clone(), et)) .collect(); println!("waiting for store events from {}", locator); diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 7b68bd41dfc..458b6d4a50f 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -436,9 +436,10 @@ fn make_thing(id: &str, value: &str) -> (String, EntityModification) { const DOCUMENT: &str = " type Thing @entity { id: String!, value: String!, extra: String }"; lazy_static! { static ref SCHEMA: InputSchema = InputSchema::raw(DOCUMENT, "doesntmatter"); + static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA }; - let key = EntityKey::data("Thing".to_string(), id); + let key = EntityKey::onchain(&*THING_TYPE, id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), EntityModification::insert(key, data, 0), @@ -963,7 +964,7 @@ async fn test_entity_store(api_version: Version) { let alex = entity! { schema => id: "alex", name: "Alex" }; let steve = entity! { schema => id: "steve", name: "Steve" }; - let user_type = EntityType::from("User"); + let user_type = schema.entity_type("User").unwrap(); test_store::insert_entities( &deployment, vec![(user_type.clone(), alex), (user_type, steve)], @@ -1407,7 +1408,7 @@ async fn test_store_set_invalid_fields() { id: ID!, name: String } - + type Binary @entity { id: Bytes!, test: String, diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index a22d0a1376d..6806f9a41ab 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -176,8 +176,9 @@ impl HostExports { } } + let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; let key = EntityKey { - entity_type: EntityType::new(entity_type), + entity_type, entity_id: entity_id.into(), causality_region: self.data_source_causality_region, }; @@ -240,8 +241,9 @@ impl HostExports { &self.poi_causality_region, logger, ); + let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; let key = EntityKey { - entity_type: EntityType::new(entity_type), + entity_type, entity_id: entity_id.into(), causality_region: self.data_source_causality_region, }; @@ -262,8 +264,9 @@ impl HostExports { gas: &GasCounter, scope: GetScope, ) -> Result>, anyhow::Error> { + let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; let store_key = EntityKey { - entity_type: EntityType::new(entity_type), + entity_type, entity_id: entity_id.into(), causality_region: self.data_source_causality_region, }; @@ -287,8 +290,9 @@ impl HostExports { entity_field: String, gas: &GasCounter, ) -> Result, anyhow::Error> { + let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; let store_key = LoadRelatedRequest { - entity_type: EntityType::new(entity_type), + entity_type, entity_id: entity_id.into(), entity_field: entity_field.into(), causality_region: self.data_source_causality_region, diff --git a/server/index-node/src/schema.rs b/server/index-node/src/schema.rs index 85df359467b..90b64c82152 100644 --- a/server/index-node/src/schema.rs +++ b/server/index-node/src/schema.rs @@ -8,7 +8,7 @@ lazy_static! { let raw_schema = include_str!("./schema.graphql"); let document = graphql_parser::parse_schema(raw_schema).unwrap(); Arc::new( - ApiSchema::from_api_schema( + ApiSchema::from_graphql_schema( Schema::new(DeploymentHash::new("indexnode").unwrap(), document).unwrap(), ) .unwrap(), diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 7399aeeedb8..eaa11916738 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -485,7 +485,7 @@ impl TableState { .into_iter() .map( |(id, entity_type, current_vid, target_vid, size, duration_ms)| { - let entity_type = EntityType::new(entity_type); + let entity_type = src_layout.input_schema.entity_type(&entity_type)?; let src = resolve_entity(src_layout, "source", &entity_type, dst_layout.site.id, id); let dst = resolve_entity( diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 523060f3395..cc54c89adff 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -907,14 +907,20 @@ pub(crate) fn health(conn: &PgConnection, id: DeploymentId) -> Result Result, StoreError> { use subgraph_manifest as sm; sm::table .filter(sm::id.eq(id)) .select(sm::entities_with_causality_region) - .get_result(conn) + .get_result::>(conn) .map_err(|e| e.into()) + .map(|ents| { + ents.into_iter() + .map(|ent| schema.entity_type(&ent).unwrap()) + .collect() + }) } /// Reverts the errors and updates the subgraph health if necessary. @@ -1080,7 +1086,11 @@ pub fn create_deployment( history_blocks: history_blocks_override, } = deployment; let earliest_block_number = start_block.as_ref().map(|ptr| ptr.number).unwrap_or(0); - let entities_with_causality_region = Vec::from_iter(entities_with_causality_region.into_iter()); + let entities_with_causality_region = Vec::from_iter( + entities_with_causality_region + .into_iter() + .map(|et| et.as_str().to_owned()), + ); let deployment_values = ( d::id.eq(site.id), diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 4d68987d4de..36de5e22919 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -236,11 +236,14 @@ impl DeploymentStore { pub(crate) fn load_deployment( &self, - site: &Site, + site: Arc, ) -> Result { let conn = self.get_conn()?; - Ok(detail::deployment_entity(&conn, site) - .with_context(|| format!("Deployment details not found for {}", site.deployment))?) + let layout = self.layout(&conn, site.clone())?; + Ok( + detail::deployment_entity(&conn, &site, &layout.input_schema) + .with_context(|| format!("Deployment details not found for {}", site.deployment))?, + ) } // Remove the data and metadata for the deployment `site`. This operation @@ -288,21 +291,16 @@ impl DeploymentStore { // if that's Fred the Dog, Fred the Cat or both. // // This assumes that there are no concurrent writes to a subgraph. - let schema = self - .subgraph_info_with_conn(conn, &layout.site)? - .api - .get(&Default::default()) - .expect("API schema should be present") - .clone(); - let types_for_interface = schema.types_for_interface(); + let schema = self.subgraph_info_with_conn(conn, &layout.site)?.input; let entity_type_str = entity_type.to_string(); let types_with_shared_interface = Vec::from_iter( schema .interfaces_for_type(entity_type.as_str()) .into_iter() .flatten() - .flat_map(|interface| &types_for_interface[&interface.name]) - .map(EntityType::from) + .flat_map(|interface| schema.types_for_interface(interface)) + .flatten() + .map(|object_type| schema.entity_type(object_type).unwrap()) .filter(|type_name| type_name != entity_type), ); @@ -1520,7 +1518,7 @@ impl DeploymentStore { let src_manifest_idx_and_name = src_deployment.manifest.template_idx_and_name()?; let dst_manifest_idx_and_name = self - .load_deployment(&dst.site)? + .load_deployment(dst.site.clone())? .manifest .template_idx_and_name()?; @@ -1861,7 +1859,7 @@ impl DeploymentStore { /// search using the latter if the search for the former fails. fn resolve_table_name<'a>(layout: &'a Layout, name: &'_ str) -> Result<&'a Table, StoreError> { layout - .table_for_entity(&EntityType::new(name.to_owned())) + .table_for_entity(&layout.input_schema.entity_type(name)?) .map(Deref::deref) .or_else(|_error| { let sql_name = SqlName::from(name); diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index db42563aab5..f35b6dfcbff 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -10,12 +10,12 @@ use diesel::prelude::{ use diesel_derives::Associations; use git_testament::{git_testament, git_testament_macros}; use graph::blockchain::BlockHash; -use graph::components::store::EntityType; use graph::data::subgraph::schema::{SubgraphError, SubgraphManifestEntity}; use graph::prelude::{ bigdecimal::ToPrimitive, BigDecimal, BlockPtr, DeploymentHash, StoreError, SubgraphDeploymentEntity, }; +use graph::schema::InputSchema; use graph::{constraint_violation, data::subgraph::status, prelude::web3::types::H256}; use itertools::Itertools; use std::collections::HashMap; @@ -360,33 +360,39 @@ struct StoredSubgraphManifest { start_block_number: Option, start_block_hash: Option, raw_yaml: Option, - entities_with_causality_region: Vec, + entities_with_causality_region: Vec, on_sync: Option, history_blocks: i32, } -impl From for SubgraphManifestEntity { - fn from(value: StoredSubgraphManifest) -> Self { +impl StoredSubgraphManifest { + fn as_manifest(self, schema: &InputSchema) -> SubgraphManifestEntity { + let e: Vec<_> = self + .entities_with_causality_region + .into_iter() + .map(|s| schema.entity_type(&s).unwrap()) + .collect(); SubgraphManifestEntity { - spec_version: value.spec_version, - description: value.description, - repository: value.repository, - features: value.features, - schema: value.schema, - raw_yaml: value.raw_yaml, - entities_with_causality_region: value.entities_with_causality_region, - history_blocks: value.history_blocks, + spec_version: self.spec_version, + description: self.description, + repository: self.repository, + features: self.features, + schema: self.schema, + raw_yaml: self.raw_yaml, + entities_with_causality_region: e, + history_blocks: self.history_blocks, } } } struct StoredDeploymentEntity(crate::detail::DeploymentDetail, StoredSubgraphManifest); -impl TryFrom for SubgraphDeploymentEntity { - type Error = StoreError; - - fn try_from(ent: StoredDeploymentEntity) -> Result { - let (detail, manifest) = (ent.0, ent.1); +impl StoredDeploymentEntity { + fn as_subgraph_deployment( + self, + schema: &InputSchema, + ) -> Result { + let (detail, manifest) = (self.0, self.1); let start_block = block( &detail.deployment, @@ -425,7 +431,7 @@ impl TryFrom for SubgraphDeploymentEntity { .map_err(|b| constraint_violation!("invalid debug fork `{}`", b))?; Ok(SubgraphDeploymentEntity { - manifest: manifest.into(), + manifest: manifest.as_manifest(schema), failed: detail.failed, health: detail.health.into(), synced: detail.synced, @@ -447,6 +453,7 @@ impl TryFrom for SubgraphDeploymentEntity { pub fn deployment_entity( conn: &PgConnection, site: &Site, + schema: &InputSchema, ) -> Result { use subgraph_deployment as d; use subgraph_manifest as m; @@ -459,7 +466,7 @@ pub fn deployment_entity( .find(site.id) .first::(conn)?; - SubgraphDeploymentEntity::try_from(StoredDeploymentEntity(detail, manifest)) + StoredDeploymentEntity(detail, manifest).as_subgraph_deployment(schema) } #[derive(Queryable, Identifiable, Insertable)] diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 58c29aebd67..1940ba1f881 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -6,7 +6,7 @@ use std::{ use graph::{ block_on, - components::store::{EntityType, SubgraphFork as SubgraphForkTrait}, + components::store::SubgraphFork as SubgraphForkTrait, data::graphql::ext::DirectiveFinder, prelude::{ info, @@ -130,7 +130,7 @@ impl SubgraphFork { } fn get_fields_of(&self, entity_type: &str) -> Result<&Vec, StoreError> { - let entity_type = EntityType::new(entity_type.to_string()); + let entity_type = self.schema.entity_type(entity_type)?; let entity: Option<&ObjectType> = self.schema.find_object_type(&entity_type); if entity.is_none() { diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index ab339a54f0f..19e35c3cf99 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -3,7 +3,7 @@ use graph::components::store::{DeploymentId, QueryStore as QueryStoreTrait}; use graph::data::query::Trace; use graph::data::value::Object; use graph::prelude::*; -use graph::schema::ApiSchema; +use graph::schema::{ApiSchema, InputSchema}; use crate::primary::Site; @@ -120,6 +120,11 @@ impl QueryStoreTrait for QueryStore { Ok(info.api.get(&self.api_version).unwrap().clone()) } + fn input_schema(&self) -> Result, QueryExecutionError> { + let info = self.store.subgraph_info(&self.site)?; + Ok(info.input) + } + fn network_name(&self) -> &str { &self.site.network } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 0e6ad2fc2da..5ddc8f837c7 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -312,7 +312,7 @@ impl Layout { // they have a String `id` field // see also: id-type-for-unimplemented-interfaces let id_type = types.iter().next().cloned().unwrap_or(IdType::String); - Ok((EntityType::from(interface.as_str()), id_type)) + Ok((schema.entity_type(interface).unwrap(), id_type)) } }) }); @@ -321,7 +321,9 @@ impl Layout { // and interfaces in the schema let id_types = object_types .iter() - .map(|obj_type| IdType::try_from(*obj_type).map(|t| (EntityType::from(*obj_type), t))) + .map(|obj_type| { + IdType::try_from(*obj_type).map(|t| (schema.entity_type(*obj_type).unwrap(), t)) + }) .chain(id_types_for_interface) .collect::>()?; @@ -331,6 +333,7 @@ impl Layout { .enumerate() .map(|(i, obj_type)| { Table::new( + schema, obj_type, &catalog, schema @@ -341,7 +344,7 @@ impl Layout { i as u32, catalog .entities_with_causality_region - .contains(&EntityType::from(*obj_type)), + .contains(&schema.entity_type(*obj_type).unwrap()), ) }) .collect::, _>>()?; @@ -554,7 +557,7 @@ impl Layout { }; let mut entities: BTreeMap = BTreeMap::new(); for data in query.load::(conn)? { - let entity_type = data.entity_type(); + let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; let key = EntityKey { @@ -589,7 +592,7 @@ impl Layout { let mut entities = BTreeMap::new(); for data in query.load::(conn)? { - let entity_type = data.entity_type(); + let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; let key = EntityKey { entity_type, @@ -625,7 +628,7 @@ impl Layout { let mut changes = Vec::new(); for entity_data in inserts_or_updates.into_iter() { - let entity_type = entity_data.entity_type(); + let entity_type = entity_data.entity_type(&self.input_schema); let data: Entity = entity_data.deserialize_with_layout(self, None)?; let entity_id = data.id(); processed_entities.insert((entity_type.clone(), entity_id.clone())); @@ -641,7 +644,7 @@ impl Layout { } for del in &deletions { - let entity_type = del.entity_type(); + let entity_type = del.entity_type(&self.input_schema); let entity_id = Word::from(del.id()); // See the doc comment of `FindPossibleDeletionsQuery` for details @@ -1050,6 +1053,7 @@ impl From for ColumnType { impl ColumnType { fn from_field_type( + schema: &InputSchema, field_type: &q::Type, catalog: &Catalog, enums: &EnumMap, @@ -1059,7 +1063,11 @@ impl ColumnType { let name = named_type(field_type); // See if its an object type defined in the schema - if let Some(id_type) = id_types.get(&EntityType::new(name.to_string())) { + if let Some(id_type) = schema + .entity_type(name) + .ok() + .and_then(|entity_type| id_types.get(&entity_type)) + { return Ok((*id_type).into()); } @@ -1139,6 +1147,7 @@ pub struct Column { impl Column { fn new( + schema: &InputSchema, table_name: &SqlName, field: &s::Field, catalog: &Catalog, @@ -1156,6 +1165,7 @@ impl Column { } else { let is_existing_text_column = catalog.is_existing_text_column(table_name, &sql_name); ColumnType::from_field_type( + schema, &field.field_type, catalog, enums, @@ -1313,6 +1323,7 @@ pub struct Table { impl Table { fn new( + schema: &InputSchema, defn: &s::ObjectType, catalog: &Catalog, fulltexts: Vec, @@ -1328,14 +1339,14 @@ impl Table { .fields .iter() .filter(|field| !field.is_derived()) - .map(|field| Column::new(&table_name, field, catalog, enums, id_types)) + .map(|field| Column::new(schema, &table_name, field, catalog, enums, id_types)) .chain(fulltexts.iter().map(Column::new_fulltext)) .collect::, StoreError>>()?; let qualified_name = SqlName::qualified_name(&catalog.site.namespace, &table_name); let immutable = defn.is_immutable(); let table = Table { - object: EntityType::from(defn), + object: schema.entity_type(defn)?, name: table_name, qualified_name, // Default `is_account_like` to `false`; the caller should call @@ -1476,7 +1487,8 @@ impl LayoutCache { fn load(conn: &PgConnection, site: Arc) -> Result, StoreError> { let (subgraph_schema, use_bytea_prefix) = deployment::schema(conn, site.as_ref())?; - let has_causality_region = deployment::entities_with_causality_region(conn, site.id)?; + let has_causality_region = + deployment::entities_with_causality_region(conn, site.id, &subgraph_schema)?; let catalog = Catalog::load(conn, site.clone(), use_bytea_prefix, has_causality_region)?; let layout = Arc::new(Layout::new(site.clone(), &subgraph_schema, catalog)?); layout.refresh(conn, site) diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index ea4bdf57390..b3e08c42468 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -12,8 +12,13 @@ fn test_layout(gql: &str) -> Layout { let schema = InputSchema::parse(gql, subgraph.clone()).expect("Test schema invalid"); let namespace = Namespace::new("sgd0815".to_owned()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); - let catalog = Catalog::for_tests(site.clone(), BTreeSet::from_iter(["FileThing".into()])) - .expect("Can not create catalog"); + let ents = { + match schema.entity_type("FileThing") { + Ok(entity_type) => BTreeSet::from_iter(vec![entity_type]), + Err(_) => BTreeSet::new(), + } + }; + let catalog = Catalog::for_tests(site.clone(), ents).expect("Can not create catalog"); Layout::new(site, &schema, catalog).expect("Failed to construct Layout") } @@ -82,7 +87,7 @@ fn generate_ddl() { fn exlusion_ddl() { let layout = test_layout(THING_GQL); let table = layout - .table_for_entity(&EntityType::new("Thing".to_string())) + .table_for_entity(&layout.input_schema.entity_type("Thing").unwrap()) .unwrap(); // When `as_constraint` is false, just create an index diff --git a/store/postgres/src/relational/query_tests.rs b/store/postgres/src/relational/query_tests.rs index 34f179fb538..a530a221d44 100644 --- a/store/postgres/src/relational/query_tests.rs +++ b/store/postgres/src/relational/query_tests.rs @@ -2,7 +2,6 @@ use std::{collections::BTreeSet, sync::Arc}; use diesel::{debug_query, pg::Pg}; use graph::{ - components::store::EntityType, prelude::{r, serde_json as json, DeploymentHash, EntityFilter}, schema::InputSchema, }; @@ -49,7 +48,7 @@ fn filter_contains(filter: EntityFilter, sql: &str) { }"; let layout = test_layout(SCHEMA); let table = layout - .table_for_entity(&EntityType::new("Thing".to_string())) + .table_for_entity(&layout.input_schema.entity_type("Thing").unwrap()) .unwrap(); let filter = QueryFilter::new(&filter, table.as_ref(), &layout, Default::default()).unwrap(); let query = debug_query::(&filter); diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 57b558bc17e..5a5ca12ea10 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -486,8 +486,8 @@ pub struct EntityDeletion { } impl EntityDeletion { - pub fn entity_type(&self) -> EntityType { - EntityType::new(self.entity.clone()) + pub fn entity_type(&self, schema: &InputSchema) -> EntityType { + schema.entity_type(&self.entity).unwrap() } pub fn id(&self) -> &str { @@ -513,8 +513,8 @@ pub struct EntityData { } impl EntityData { - pub fn entity_type(&self) -> EntityType { - EntityType::new(self.entity.clone()) + pub fn entity_type(&self, schema: &InputSchema) -> EntityType { + schema.entity_type(&self.entity).unwrap() } /// Map the `EntityData` using the schema information in `Layout` @@ -523,7 +523,7 @@ impl EntityData { layout: &Layout, parent_type: Option<&ColumnType>, ) -> Result { - let entity_type = EntityType::new(self.entity.clone()); + let entity_type = layout.input_schema.entity_type(&self.entity)?; let table = layout.table_for_entity(&entity_type)?; use serde_json::Value as j; diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 6f14af8ea3f..3745fb8cdc4 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -622,7 +622,7 @@ impl SubgraphStoreInner { node ))); } - let deployment = src_store.load_deployment(src.as_ref())?; + let deployment = src_store.load_deployment(src.clone())?; if deployment.failed { return Err(StoreError::Unknown(anyhow!( "can not copy deployment {} because it has failed", @@ -1186,8 +1186,8 @@ impl SubgraphStoreInner { store.set_history_blocks(&site, history_blocks, reorg_threshold) } - pub fn load_deployment(&self, site: &Site) -> Result { - let src_store = self.for_site(site)?; + pub fn load_deployment(&self, site: Arc) -> Result { + let src_store = self.for_site(&site)?; src_store.load_deployment(site) } @@ -1197,7 +1197,7 @@ impl SubgraphStoreInner { ) -> Result { let site = self.find_site(id)?; let src_store = self.for_site(&site)?; - src_store.load_deployment(&site) + src_store.load_deployment(site) } } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 04a73c82eec..bd6b571d19c 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -62,7 +62,7 @@ impl WritableSubgraphStore { self.0.layout(id) } - fn load_deployment(&self, site: &Site) -> Result { + fn load_deployment(&self, site: Arc) -> Result { self.0.load_deployment(site) } @@ -139,7 +139,7 @@ impl SyncStore { let graft_base = match self.writable.graft_pending(&self.site.deployment)? { Some((base_id, base_ptr)) => { let src = self.store.layout(&base_id)?; - let deployment_entity = self.store.load_deployment(&src.site)?; + let deployment_entity = self.store.load_deployment(src.site.clone())?; Some((src, base_ptr, deployment_entity)) } None => None, diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index 08cb83852e7..11aad8ad2fc 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -16,10 +16,7 @@ use graph::prelude::{ }; use graph::{ blockchain::NodeCapabilities as _, - components::{ - link_resolver::{JsonValueStream, LinkResolver as LinkResolverTrait}, - store::EntityType, - }, + components::link_resolver::{JsonValueStream, LinkResolver as LinkResolverTrait}, data::subgraph::SubgraphFeature, }; @@ -211,9 +208,12 @@ specVersion: 0.0.2 // Adds an example entity. let thing = entity! { schema => id: "datthing" }; - test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) - .await - .unwrap(); + test_store::insert_entities( + &deployment, + vec![(schema.entity_type("Thing").unwrap(), thing)], + ) + .await + .unwrap(); let error = SubgraphError { subgraph_id: deployment.hash.clone(), @@ -308,9 +308,12 @@ specVersion: 0.0.2 ); let thing = entity! { schema => id: "datthing" }; - test_store::insert_entities(&deployment, vec![(EntityType::from("Thing"), thing)]) - .await - .unwrap(); + test_store::insert_entities( + &deployment, + vec![(schema.entity_type("Thing").unwrap(), thing)], + ) + .await + .unwrap(); // Validation against subgraph that has not reached the graft point fails let unvalidated = resolve_unvalidated(YAML).await; diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index 43c4317d660..53f6854e525 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -4,7 +4,7 @@ use graph::entity; use graph::schema::InputSchema; use pretty_assertions::assert_eq; -use graph::{components::store::EntityType, data::graphql::object}; +use graph::data::graphql::object; use graph::{data::query::QueryTarget, prelude::*}; use test_store::*; @@ -17,10 +17,10 @@ async fn insert_and_query( ) -> Result { let subgraph_id = DeploymentHash::new(subgraph_id).unwrap(); let deployment = create_test_subgraph(&subgraph_id, schema).await; - + let schema = InputSchema::parse(schema, subgraph_id.clone()).unwrap(); let entities = entities .into_iter() - .map(|(entity_type, data)| (EntityType::new(entity_type.to_owned()), data)) + .map(|(entity_type, data)| (schema.entity_type(entity_type).unwrap(), data)) .collect(); insert_entities(&deployment, entities).await?; diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 32fcf499fe5..6dff24ab589 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -182,7 +182,7 @@ impl WritableStore for MockStore { fn make_band_key(id: &'static str) -> EntityKey { EntityKey { - entity_type: EntityType::new("Band".to_string()), + entity_type: SCHEMA.entity_type("Band").unwrap(), entity_id: id.into(), causality_region: CausalityRegion::ONCHAIN, } @@ -234,7 +234,7 @@ fn entity_version_map(entity_type: &str, entities: Vec) -> BTreeMap) { @@ -458,7 +460,7 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age }; EntityOperation::Set { - key: EntityKey::data(ACCOUNT.to_owned(), id.to_owned()), + key: EntityKey::onchain(&*ACCOUNT_TYPE, id), data: test_entity, } } @@ -469,7 +471,7 @@ fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { let test_wallet = create_wallet_entity(id, account_id, balance); EntityOperation::Set { - key: EntityKey::data(WALLET.to_owned(), id.to_owned()), + key: EntityKey::onchain(&*WALLET_TYPE, id), data: test_wallet, } } @@ -479,7 +481,7 @@ fn check_for_account_with_multiple_wallets() { run_store_test(|mut cache, _store, _deployment, _writable| async move { let account_id = "1"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -499,7 +501,7 @@ fn check_for_account_with_single_wallet() { run_store_test(|mut cache, _store, _deployment, _writable| async move { let account_id = "2"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -517,7 +519,7 @@ fn check_for_account_with_no_wallet() { run_store_test(|mut cache, _store, _deployment, _writable| async move { let account_id = "3"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -534,7 +536,7 @@ fn check_for_account_that_doesnt_exist() { run_store_test(|mut cache, _store, _deployment, _writable| async move { let account_id = "4"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -551,7 +553,7 @@ fn check_for_non_existent_field() { run_store_test(|mut cache, _store, _deployment, _writable| async move { let account_id = "1"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "friends".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -583,7 +585,7 @@ fn check_for_insert_async_store() { .await .unwrap(); let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -615,7 +617,7 @@ fn check_for_insert_async_not_related() { .unwrap(); let account_id = "1"; let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -634,7 +636,7 @@ fn check_for_insert_async_not_related() { fn check_for_update_async_related() { run_store_test(|mut cache, store, deployment, writable| async move { let account_id = "1"; - let entity_key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let entity_key = EntityKey::onchain(&*WALLET_TYPE, "1"); let wallet_entity_update = create_wallet_operation("1", account_id, 79_i32); let new_data = match wallet_entity_update { @@ -653,7 +655,7 @@ fn check_for_update_async_related() { .unwrap(); let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -671,7 +673,7 @@ fn check_for_update_async_related() { fn check_for_delete_async_related() { run_store_test(|mut cache, store, deployment, _writable| async move { let account_id = "1"; - let del_key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let del_key = EntityKey::onchain(&*WALLET_TYPE, "1"); // delete wallet transact_entity_operations( &store, @@ -683,7 +685,7 @@ fn check_for_delete_async_related() { .unwrap(); let request = LoadRelatedRequest { - entity_type: EntityType::new(ACCOUNT.to_string()), + entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), entity_id: account_id.into(), causality_region: CausalityRegion::ONCHAIN, @@ -701,12 +703,12 @@ fn check_for_delete_async_related() { fn scoped_get() { run_store_test(|mut cache, _store, _deployment, _writable| async move { // Key for an existing entity that is in the store - let key1 = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let key1 = EntityKey::onchain(&*WALLET_TYPE, "1"); let wallet1 = create_wallet_entity("1", "1", 67); // Create a new entity that is not in the store let wallet5 = create_wallet_entity("5", "5", 100); - let key5 = EntityKey::data(WALLET.to_owned(), "5".to_owned()); + let key5 = EntityKey::onchain(&*WALLET_TYPE, "5"); cache.set(key5.clone(), wallet5.clone()).unwrap(); // For the new entity, we can retrieve it with either scope @@ -748,7 +750,7 @@ fn no_internal_keys() { assert_eq!(None, entity.get("__typename")); assert_eq!(None, entity.get(&*PARENT_ID)); } - let key = EntityKey::data(WALLET.to_owned(), "1".to_owned()); + let key = EntityKey::onchain(&*WALLET_TYPE, "1"); let wallet = writable.get(&key).unwrap().unwrap(); check(&wallet); diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 97176704227..96ece3b2a0d 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -570,7 +570,7 @@ async fn introspection_query(schema: Schema, query: &str) -> QueryResult { trace: false, }; - let schema = Arc::new(ApiSchema::from_api_schema(schema).unwrap()); + let schema = Arc::new(ApiSchema::from_graphql_schema(schema).unwrap()); let result = match PreparedQuery::new(&logger, schema, None, query, None, 100, graphql_metrics()) { Ok(query) => { diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 8eff8ad8e4b..75de8fe3681 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -298,12 +298,16 @@ async fn insert_test_entities( manifest: SubgraphManifest, id_type: IdType, ) -> DeploymentLocator { - fn insert_ops(entities: Vec<(&str, Vec)>) -> Vec { + fn insert_ops( + schema: &InputSchema, + entities: Vec<(&str, Vec)>, + ) -> Vec { entities .into_iter() .map(|(typename, entities)| { - entities.into_iter().map(|data| EntityOperation::Set { - key: EntityKey::data(typename.to_string(), data.id()), + let entity_type = schema.entity_type(typename).unwrap(); + entities.into_iter().map(move |data| EntityOperation::Set { + key: EntityKey::onchain(&entity_type, data.id()), data, }) }) @@ -433,7 +437,7 @@ async fn insert_test_entities( ], ), ]; - let entities0 = insert_ops(entities0); + let entities0 = insert_ops(&manifest.schema, entities0); let entities1 = vec![( "Musician", @@ -442,7 +446,7 @@ async fn insert_test_entities( entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20 }, ], )]; - let entities1 = insert_ops(entities1); + let entities1 = insert_ops(&manifest.schema, entities1); insert_at(entities0, &deployment, GENESIS_PTR.clone()).await; insert_at(entities1, &deployment, BLOCK_ONE.clone()).await; diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index c6ae1fcb7e1..3489e421353 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -95,6 +95,7 @@ lazy_static! { .enumerate() .map(|(idx, hash)| BlockPtr::try_from((*hash, idx as i64)).unwrap()) .collect(); + static ref USER_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type(USER).unwrap(); } /// Test harness for running database integration tests. @@ -255,8 +256,9 @@ fn create_test_entity( favorite_color: favorite_color }; + let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(entity_type).unwrap(); EntityOperation::Set { - key: EntityKey::data(entity_type.to_string(), id), + key: EntityKey::onchain(&entity_type, id), data: test_entity, } } @@ -282,10 +284,11 @@ fn find_entities( store: &DieselSubgraphStore, deployment: &DeploymentLocator, ) -> (Vec, Vec) { + let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(USER).unwrap(); let query = EntityQuery::new( deployment.hash.clone(), BLOCK_NUMBER_MAX, - EntityCollection::All(vec![(EntityType::from(USER), AttributeNames::All)]), + EntityCollection::All(vec![(entity_type, AttributeNames::All)]), ) .order(EntityOrder::Descending( "name".to_string(), @@ -319,7 +322,7 @@ async fn check_graft( // Make our own entries for block 2 shaq.set("email", "shaq@gmail.com").unwrap(); let op = EntityOperation::Set { - key: EntityKey::data(USER.to_owned(), "3"), + key: EntityKey::onchain(&*USER_TYPE, "3"), data: shaq, }; transact_and_wait(&store, &deployment, BLOCKS[2].clone(), vec![op]) @@ -555,7 +558,7 @@ fn prune() { src.hash.clone(), block, EntityCollection::All(vec![( - EntityType::new("User".to_string()), + TEST_SUBGRAPH_SCHEMA.entity_type("User").unwrap(), AttributeNames::All, )]), ); diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 830a7113b58..bf323b66018 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -210,9 +210,15 @@ lazy_static! { id: "one", } }; - static ref SCALAR: EntityType = EntityType::from("Scalar"); - static ref NO_ENTITY: EntityType = EntityType::from("NoEntity"); - static ref NULLABLE_STRINGS: EntityType = EntityType::from("NullableStrings"); + static ref SCALAR_TYPE: EntityType = THINGS_SCHEMA.entity_type("Scalar").unwrap(); + static ref USER_TYPE: EntityType = THINGS_SCHEMA.entity_type("User").unwrap(); + static ref DOG_TYPE: EntityType = THINGS_SCHEMA.entity_type("Dog").unwrap(); + static ref CAT_TYPE: EntityType = THINGS_SCHEMA.entity_type("Cat").unwrap(); + static ref FERRET_TYPE: EntityType = THINGS_SCHEMA.entity_type("Ferret").unwrap(); + static ref MINK_TYPE: EntityType = THINGS_SCHEMA.entity_type("Mink").unwrap(); + static ref CHAIR_TYPE: EntityType = THINGS_SCHEMA.entity_type("Chair").unwrap(); + static ref NULLABLE_STRINGS_TYPE: EntityType = + THINGS_SCHEMA.entity_type("NullableStrings").unwrap(); static ref MOCK_STOPWATCH: StopwatchMetrics = StopwatchMetrics::new( Logger::root(slog::Discard, o!()), THINGS_SUBGRAPH_ID.clone(), @@ -231,14 +237,14 @@ fn remove_schema(conn: &PgConnection) { fn insert_entity_at( conn: &PgConnection, layout: &Layout, - entity_type: &str, + entity_type: &EntityType, mut entities: Vec, block: BlockNumber, ) { let entities_with_keys_owned = entities .drain(..) .map(|entity| { - let key = EntityKey::data(entity_type.to_owned(), entity.id()); + let key = EntityKey::onchain(entity_type, entity.id()); (key, entity) }) .collect::>(); @@ -246,7 +252,6 @@ fn insert_entity_at( .iter() .map(|(key, entity)| (key, entity)) .collect(); - let entity_type = EntityType::from(entity_type); let errmsg = format!( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys @@ -259,21 +264,26 @@ fn insert_entity_at( ); } -fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entities: Vec) { +fn insert_entity( + conn: &PgConnection, + layout: &Layout, + entity_type: &EntityType, + entities: Vec, +) { insert_entity_at(conn, layout, entity_type, entities, 0); } fn update_entity_at( conn: &PgConnection, layout: &Layout, - entity_type: &str, + entity_type: &EntityType, mut entities: Vec, block: BlockNumber, ) { let entities_with_keys_owned: Vec<(EntityKey, Entity)> = entities .drain(..) .map(|entity| { - let key = EntityKey::data(entity_type.to_owned(), entity.id()); + let key = EntityKey::onchain(entity_type, entity.id()); (key, entity) }) .collect(); @@ -282,7 +292,6 @@ fn update_entity_at( .map(|(key, entity)| (key, entity)) .collect(); - let entity_type = EntityType::from(entity_type); let errmsg = format!( "Failed to insert entities {}[{:?}]", entity_type, entities_with_keys @@ -296,7 +305,7 @@ fn insert_user_entity( conn: &PgConnection, layout: &Layout, id: &str, - entity_type: &str, + entity_type: &EntityType, name: &str, email: &str, age: i32, @@ -362,7 +371,7 @@ fn insert_users(conn: &PgConnection, layout: &Layout) { conn, layout, "1", - "User", + &*USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -377,7 +386,7 @@ fn insert_users(conn: &PgConnection, layout: &Layout) { conn, layout, "2", - "User", + &*USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -392,7 +401,7 @@ fn insert_users(conn: &PgConnection, layout: &Layout) { conn, layout, "3", - "User", + &*USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -409,7 +418,7 @@ fn update_user_entity( conn: &PgConnection, layout: &Layout, id: &str, - entity_type: &str, + entity_type: &EntityType, name: &str, email: &str, age: i32, @@ -438,7 +447,7 @@ fn update_user_entity( fn insert_pet( conn: &PgConnection, layout: &Layout, - entity_type: &str, + entity_type: &EntityType, id: &str, name: &str, block: BlockNumber, @@ -451,8 +460,8 @@ fn insert_pet( } fn insert_pets(conn: &PgConnection, layout: &Layout) { - insert_pet(conn, layout, "Dog", "pluto", "Pluto", 0); - insert_pet(conn, layout, "Cat", "garfield", "Garfield", 0); + insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0); + insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0); } fn create_schema(conn: &PgConnection) -> Layout { @@ -527,13 +536,13 @@ where #[test] fn find() { run_test(|conn, layout| { - insert_entity(conn, layout, "Scalar", vec![SCALAR_ENTITY.clone()]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); // Happy path: find existing entity let entity = layout .find( conn, - &EntityKey::data(SCALAR.as_str(), "one"), + &EntityKey::onchain(&*SCALAR_TYPE, "one"), BLOCK_NUMBER_MAX, ) .expect("Failed to read Scalar[one]") @@ -544,25 +553,11 @@ fn find() { let entity = layout .find( conn, - &EntityKey::data(SCALAR.as_str(), "noone"), + &EntityKey::onchain(&*SCALAR_TYPE, "noone"), BLOCK_NUMBER_MAX, ) .expect("Failed to read Scalar[noone]"); assert!(entity.is_none()); - - // Find for non-existing entity type - let err = layout.find( - conn, - &EntityKey::data(NO_ENTITY.as_str(), "one"), - BLOCK_NUMBER_MAX, - ); - match err { - Err(e) => assert_eq!("unknown table 'NoEntity'", e.to_string()), - _ => { - println!("{:?}", err); - assert!(false) - } - } }); } @@ -572,7 +567,7 @@ fn insert_null_fulltext_fields() { insert_entity( conn, layout, - "NullableStrings", + &*NULLABLE_STRINGS_TYPE, vec![EMPTY_NULLABLESTRINGS_ENTITY.clone()], ); @@ -580,7 +575,7 @@ fn insert_null_fulltext_fields() { let entity = layout .find( conn, - &EntityKey::data(NULLABLE_STRINGS.as_str(), "one"), + &EntityKey::onchain(&*NULLABLE_STRINGS_TYPE, "one"), BLOCK_NUMBER_MAX, ) .expect("Failed to read NullableStrings[one]") @@ -592,16 +587,16 @@ fn insert_null_fulltext_fields() { #[test] fn update() { run_test(|conn, layout| { - insert_entity(conn, layout, "Scalar", vec![SCALAR_ENTITY.clone()]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); entity.set("string", "updated").unwrap(); entity.remove("strings"); entity.set("bool", Value::Null).unwrap(); - let key = EntityKey::data("Scalar".to_owned(), entity.id()); + let key = EntityKey::onchain(&*SCALAR_TYPE, entity.id()); - let entity_type = EntityType::from("Scalar"); + let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let entities = vec![(key, entity.clone())]; let group = row_group_update(&entity_type, 0, entities); layout @@ -611,7 +606,7 @@ fn update() { let actual = layout .find( conn, - &EntityKey::data(SCALAR.as_str(), "one"), + &EntityKey::onchain(&*SCALAR_TYPE, "one"), BLOCK_NUMBER_MAX, ) .expect("Failed to read Scalar[one]") @@ -631,7 +626,7 @@ fn update_many() { insert_entity( conn, layout, - "Scalar", + &*SCALAR_TYPE, vec![one.clone(), two.clone(), three.clone()], ); @@ -650,10 +645,10 @@ fn update_many() { three.set("color", "red").unwrap(); // generate keys - let entity_type = EntityType::from("Scalar"); + let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let keys: Vec = ["one", "two", "three"] .iter() - .map(|id| EntityKey::data("Scalar".to_owned(), String::from(*id))) + .map(|id| EntityKey::onchain(&*SCALAR_TYPE, *id)) .collect(); let entities_vec = vec![one, two, three]; @@ -670,7 +665,7 @@ fn update_many() { layout .find( conn, - &EntityKey::data(SCALAR.as_str(), id), + &EntityKey::onchain(&*SCALAR_TYPE, id), BLOCK_NUMBER_MAX, ) .unwrap_or_else(|_| panic!("Failed to read Scalar[{}]", id)) @@ -715,7 +710,7 @@ fn update_many() { #[test] fn serialize_bigdecimal() { run_test(|conn, layout| { - insert_entity(conn, layout, "Scalar", vec![SCALAR_ENTITY.clone()]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -724,8 +719,8 @@ fn serialize_bigdecimal() { let d = BigDecimal::from_str(d).unwrap(); entity.set("bigDecimal", d).unwrap(); - let key = EntityKey::data("Scalar".to_owned(), entity.id()); - let entity_type = EntityType::from("Scalar"); + let key = EntityKey::onchain(&*SCALAR_TYPE, entity.id()); + let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let entities = vec![(key, entity.clone())]; let group = row_group_update(&entity_type, 0, entities); layout @@ -735,7 +730,7 @@ fn serialize_bigdecimal() { let actual = layout .find( conn, - &EntityKey::data(SCALAR.as_str(), "one"), + &EntityKey::onchain(&*SCALAR_TYPE, "one"), BLOCK_NUMBER_MAX, ) .expect("Failed to read Scalar[one]") @@ -750,7 +745,7 @@ fn count_scalar_entities(conn: &PgConnection, layout: &Layout) -> usize { EntityFilter::Equal("bool".into(), true.into()), EntityFilter::Equal("bool".into(), false.into()), ]); - let collection = EntityCollection::All(vec![(SCALAR.to_owned(), AttributeNames::All)]); + let collection = EntityCollection::All(vec![(SCALAR_TYPE.to_owned(), AttributeNames::All)]); let mut query = EntityQuery::new(layout.site.deployment.clone(), BLOCK_NUMBER_MAX, collection) .filter(filter); query.range.first = None; @@ -764,14 +759,14 @@ fn count_scalar_entities(conn: &PgConnection, layout: &Layout) -> usize { #[test] fn delete() { run_test(|conn, layout| { - insert_entity(conn, layout, "Scalar", vec![SCALAR_ENTITY.clone()]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); - insert_entity(conn, layout, "Scalar", vec![two]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]); // Delete where nothing is getting deleted - let key = EntityKey::data("Scalar".to_owned(), "no such entity".to_owned()); - let entity_type = EntityType::from("Scalar"); + let key = EntityKey::onchain(&*SCALAR_TYPE, "no such entity"); + let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let mut entity_keys = vec![key]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout @@ -803,18 +798,17 @@ fn insert_many_and_delete_many() { two.set("id", "two").unwrap(); let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); - insert_entity(conn, layout, "Scalar", vec![one, two, three]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]); // confidence test: there should be 3 scalar entities in store right now assert_eq!(3, count_scalar_entities(conn, layout)); // Delete entities with ids equal to "two" and "three" - let entity_type = EntityType::from("Scalar"); let entity_keys: Vec<_> = vec!["two", "three"] .into_iter() - .map(|key| EntityKey::data(entity_type.as_str(), key)) + .map(|key| EntityKey::onchain(&*SCALAR_TYPE, key)) .collect(); - let group = row_group_delete(&entity_type, 1, entity_keys); + let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); let num_removed = layout .delete(conn, &group, &MOCK_STOPWATCH) .expect("Failed to delete"); @@ -878,30 +872,25 @@ fn conflicting_entity() { // `id` is the id of an entity to create, `cat`, `dog`, and `ferret` are // the names of the types for which to check entity uniqueness fn check(conn: &PgConnection, layout: &Layout, id: Value, cat: &str, dog: &str, ferret: &str) { - let cat = EntityType::from(cat); - let dog = EntityType::from(dog); - let ferret = EntityType::from(ferret); + let conflicting = |types: Vec<&EntityType>| { + let types = types.into_iter().cloned().collect(); + layout.conflicting_entity(conn, &id.to_string(), types) + }; + + let cat_type = layout.input_schema.entity_type(cat).unwrap(); + let dog_type = layout.input_schema.entity_type(dog).unwrap(); + let ferret_type = layout.input_schema.entity_type(ferret).unwrap(); let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone() }; - insert_entity(conn, layout, cat.as_str(), vec![fred]); + insert_entity(conn, layout, &cat_type, vec![fred]); // If we wanted to create Fred the dog, which is forbidden, we'd run this: - let conflict = layout - .conflicting_entity(conn, &id.to_string(), vec![cat.clone(), ferret.clone()]) - .unwrap(); + let conflict = conflicting(vec![&cat_type, &ferret_type]).unwrap(); assert_eq!(Some(cat.to_string()), conflict); // If we wanted to manipulate Fred the cat, which is ok, we'd run: - let conflict = layout - .conflicting_entity(conn, &id.to_string(), vec![dog.clone(), ferret.clone()]) - .unwrap(); + let conflict = conflicting(vec![&dog_type, &ferret_type]).unwrap(); assert_eq!(None, conflict); - - // Chairs are not pets - let chair = EntityType::from("Chair"); - let result = layout.conflicting_entity(conn, &id.to_string(), vec![dog, ferret, chair]); - assert!(result.is_err()); - assert_eq!("unknown table 'Chair'", result.err().unwrap().to_string()); } run_test(|conn, layout| { @@ -924,15 +913,15 @@ fn revert_block() { name: name }; if block == 0 { - insert_entity_at(conn, layout, "Cat", vec![fred], block); + insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block); } else { - update_entity_at(conn, layout, "Cat", vec![fred], block); + update_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block); } }; let assert_fred = |name: &str| { let fred = layout - .find(conn, &EntityKey::data("Cat", id), BLOCK_NUMBER_MAX) + .find(conn, &EntityKey::onchain(&*CAT_TYPE, id), BLOCK_NUMBER_MAX) .unwrap() .expect("there's a fred"); assert_eq!(name, fred.get("name").unwrap().as_str().unwrap()) @@ -962,14 +951,13 @@ fn revert_block() { id: id, order: block, }; - insert_entity_at(conn, layout, "Mink", vec![marty], block); + insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block); } }; let assert_marties = |max_block, except: Vec| { let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); - let collection = - EntityCollection::All(vec![(EntityType::from("Mink"), AttributeNames::All)]); + let collection = EntityCollection::All(vec![(MINK_TYPE.clone(), AttributeNames::All)]); let filter = EntityFilter::StartsWith("id".to_string(), Value::from("marty")); let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, collection) .filter(filter) @@ -1029,7 +1017,7 @@ impl<'a> QueryChecker<'a> { conn, layout, "1", - "User", + &*USER_TYPE, "Jono", "achangedemail@email.com", 67_i32, @@ -1077,21 +1065,21 @@ impl<'a> QueryChecker<'a> { } } -fn query(entity_types: Vec<&str>) -> EntityQuery { +fn query(entity_types: &[&EntityType]) -> EntityQuery { EntityQuery::new( THINGS_SUBGRAPH_ID.clone(), BLOCK_NUMBER_MAX, EntityCollection::All( entity_types .into_iter() - .map(|entity_type| (EntityType::from(entity_type), AttributeNames::All)) + .map(|entity_type| ((*entity_type).clone(), AttributeNames::All)) .collect(), ), ) } fn user_query() -> EntityQuery { - query(vec!["User"]) + query(&vec![&*USER_TYPE]) } trait EasyOrder { @@ -1138,7 +1126,7 @@ fn check_block_finds() { conn, layout, "1", - "User", + &*USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -1173,30 +1161,19 @@ fn check_block_finds() { fn check_find() { run_test(move |conn, layout| { // find with interfaces + let types = vec![&*CAT_TYPE, &*DOG_TYPE]; let checker = QueryChecker::new(conn, layout) - .check(vec!["garfield", "pluto"], query(vec!["Cat", "Dog"])) - .check( - vec!["pluto", "garfield"], - query(vec!["Cat", "Dog"]).desc("name"), - ) + .check(vec!["garfield", "pluto"], query(&types)) + .check(vec!["pluto", "garfield"], query(&types).desc("name")) .check( vec!["garfield"], - query(vec!["Cat", "Dog"]) + query(&types) .filter(EntityFilter::StartsWith("name".into(), Value::from("Gar"))) .desc("name"), ) - .check( - vec!["pluto", "garfield"], - query(vec!["Cat", "Dog"]).desc("id"), - ) - .check( - vec!["garfield", "pluto"], - query(vec!["Cat", "Dog"]).asc("id"), - ) - .check( - vec!["garfield", "pluto"], - query(vec!["Cat", "Dog"]).unordered(), - ); + .check(vec!["pluto", "garfield"], query(&types).desc("id")) + .check(vec!["garfield", "pluto"], query(&types).asc("id")) + .check(vec!["garfield", "pluto"], query(&types).unordered()); // fulltext let checker = checker @@ -1698,10 +1675,10 @@ struct FilterChecker<'a> { impl<'a> FilterChecker<'a> { fn new(conn: &'a PgConnection, layout: &'a Layout) -> Self { let (a1, a2, a2b, a3) = ferrets(); - insert_pet(conn, layout, "Ferret", "a1", &a1, 0); - insert_pet(conn, layout, "Ferret", "a2", &a2, 0); - insert_pet(conn, layout, "Ferret", "a2b", &a2b, 0); - insert_pet(conn, layout, "Ferret", "a3", &a3, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0); Self { conn, layout } } @@ -1710,7 +1687,7 @@ impl<'a> FilterChecker<'a> { let expected_entity_ids: Vec = expected_entity_ids.into_iter().map(str::to_owned).collect(); - let query = query(vec!["Ferret"]).filter(filter).asc("id"); + let query = query(&vec![&*FERRET_TYPE]).filter(filter).asc("id"); let entities = self .layout @@ -1842,7 +1819,7 @@ fn check_filters() { update_entity_at( conn, layout, - "Ferret", + &*FERRET_TYPE, vec![entity! { layout.input_schema => id: "a1", name: "Test" diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 554abbd591b..39421d8de03 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -7,7 +7,7 @@ use graph::data::store::scalar; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; -use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry}; +use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry, StoreError}; use graph::schema::InputSchema; use hex_literal::hex; use lazy_static::lazy_static; @@ -63,7 +63,7 @@ lazy_static! { name: "Beef", }; static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); - static ref THING: EntityType = EntityType::from("Thing"); + static ref THING_TYPE: EntityType = THINGS_SCHEMA.entity_type("Thing").unwrap(); static ref MOCK_STOPWATCH: StopwatchMetrics = StopwatchMetrics::new( Logger::root(slog::Discard, o!()), THINGS_SUBGRAPH_ID.clone(), @@ -122,9 +122,9 @@ pub fn row_group_delete( } fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity: Entity) { - let key = EntityKey::data(entity_type.to_owned(), entity.id()); + let entity_type = layout.input_schema.entity_type(entity_type).unwrap(); + let key = EntityKey::onchain(&entity_type, entity.id()); - let entity_type = EntityType::from(entity_type); let entities = vec![(key.clone(), entity)]; let group = row_group_insert(&entity_type, 0, entities); let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); @@ -215,14 +215,19 @@ where #[test] fn bad_id() { run_test(|conn, layout| { + fn find( + conn: &PgConnection, + layout: &Layout, + id: &str, + ) -> Result, StoreError> { + let key = EntityKey::onchain(&*THING_TYPE, id); + layout.find(conn, &key, BLOCK_NUMBER_MAX) + } + // We test that we get errors for various strings that are not // valid 'Bytes' strings; we use `find` to force the conversion // from String -> Bytes internally - let res = layout.find( - conn, - &EntityKey::data(THING.as_str(), "bad"), - BLOCK_NUMBER_MAX, - ); + let res = find(conn, layout, "bad"); assert!(res.is_err()); assert_eq!( "store error: Odd number of digits", @@ -230,11 +235,7 @@ fn bad_id() { ); // We do not allow the `\x` prefix that Postgres uses - let res = layout.find( - conn, - &EntityKey::data(THING.as_str(), "\\xbadd"), - BLOCK_NUMBER_MAX, - ); + let res = find(conn, layout, "\\xbadd"); assert!(res.is_err()); assert_eq!( "store error: Invalid character \'\\\\\' at position 0", @@ -242,19 +243,11 @@ fn bad_id() { ); // Having the '0x' prefix is ok - let res = layout.find( - conn, - &EntityKey::data(THING.as_str(), "0xbadd"), - BLOCK_NUMBER_MAX, - ); + let res = find(conn, layout, "0xbadd"); assert!(res.is_ok()); // Using non-hex characters is also bad - let res = layout.find( - conn, - &EntityKey::data(THING.as_str(), "nope"), - BLOCK_NUMBER_MAX, - ); + let res = find(conn, layout, "nope"); assert!(res.is_err()); assert_eq!( "store error: Invalid character \'n\' at position 0", @@ -266,26 +259,24 @@ fn bad_id() { #[test] fn find() { run_test(|conn, layout| { + fn find_entity(conn: &PgConnection, layout: &Layout, id: &str) -> Option { + let key = EntityKey::onchain(&*THING_TYPE, id); + layout + .find(conn, &key, BLOCK_NUMBER_MAX) + .expect(&format!("Failed to read Thing[{}]", id)) + } + const ID: &str = "deadbeef"; const NAME: &str = "Beef"; insert_thing(conn, layout, ID, NAME); // Happy path: find existing entity - let entity = layout - .find(conn, &EntityKey::data(THING.as_str(), ID), BLOCK_NUMBER_MAX) - .expect("Failed to read Thing[deadbeef]") - .unwrap(); + let entity = find_entity(conn, layout, ID).unwrap(); assert_entity_eq!(scrub(&BEEF_ENTITY), entity); assert!(CausalityRegion::from_entity(&entity) == CausalityRegion::ONCHAIN); // Find non-existing entity - let entity = layout - .find( - conn, - &EntityKey::data(THING.as_str(), "badd"), - BLOCK_NUMBER_MAX, - ) - .expect("Failed to read Thing[badd]"); + let entity = find_entity(conn, layout, "badd"); assert!(entity.is_none()); }); } @@ -302,7 +293,7 @@ fn find_many() { let mut id_map = BTreeMap::default(); id_map.insert( - (THING.clone(), CausalityRegion::ONCHAIN), + (THING_TYPE.clone(), CausalityRegion::ONCHAIN), vec![ID.to_string(), ID2.to_string(), "badd".to_string()], ); @@ -313,12 +304,12 @@ fn find_many() { let id_key = EntityKey { entity_id: ID.into(), - entity_type: THING.clone(), + entity_type: THING_TYPE.clone(), causality_region: CausalityRegion::ONCHAIN, }; let id2_key = EntityKey { entity_id: ID2.into(), - entity_type: THING.clone(), + entity_type: THING_TYPE.clone(), causality_region: CausalityRegion::ONCHAIN, }; assert!(entities.contains_key(&id_key), "Missing ID"); @@ -334,7 +325,7 @@ fn update() { // Update the entity let mut entity = BEEF_ENTITY.clone(); entity.set("name", "Moo").unwrap(); - let key = EntityKey::data("Thing".to_owned(), entity.id()); + let key = EntityKey::onchain(&*THING_TYPE, entity.id()); let entity_id = entity.id(); let entity_type = key.entity_type.clone(); @@ -347,7 +338,7 @@ fn update() { let actual = layout .find( conn, - &EntityKey::data(THING.as_str(), entity_id), + &EntityKey::onchain(&*THING_TYPE, entity_id), BLOCK_NUMBER_MAX, ) .expect("Failed to read Thing[deadbeef]") @@ -368,7 +359,7 @@ fn delete() { insert_entity(conn, layout, "Thing", two); // Delete where nothing is getting deleted - let key = EntityKey::data("Thing".to_owned(), "ffff".to_owned()); + let key = EntityKey::onchain(&*THING_TYPE, "ffff".to_owned()); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.clone()]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); @@ -472,14 +463,14 @@ fn query() { // for a discussion of the various types of relationships and queries // EntityCollection::All - let coll = EntityCollection::All(vec![(THING.clone(), AttributeNames::All)]); + let coll = EntityCollection::All(vec![(THING_TYPE.clone(), AttributeNames::All)]); let things = fetch(conn, layout, coll); assert_eq!(vec![CHILD1, CHILD2, ROOT, GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type A, many // things(where: { children_contains: [CHILD1] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![CHILD1.to_owned()], link: EntityLink::Direct( WindowAttribute::List("children".to_string()), @@ -493,7 +484,7 @@ fn query() { // EntityCollection::Window, type A, single // things(where: { children_contains: [GRANDCHILD1, GRANDCHILD2] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![GRANDCHILD1.to_owned(), GRANDCHILD2.to_owned()], link: EntityLink::Direct( WindowAttribute::List("children".to_string()), @@ -507,7 +498,7 @@ fn query() { // EntityCollection::Window, type B, many // things(where: { parent: [ROOT] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![ROOT.to_owned()], link: EntityLink::Direct( WindowAttribute::Scalar("parent".to_string()), @@ -521,7 +512,7 @@ fn query() { // EntityCollection::Window, type B, single // things(where: { parent: [CHILD1, CHILD2] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], link: EntityLink::Direct( WindowAttribute::Scalar("parent".to_string()), @@ -536,10 +527,10 @@ fn query() { // things { children { id } } // This is the inner 'children' query let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![ROOT.to_owned()], link: EntityLink::Parent( - THING.clone(), + THING_TYPE.clone(), ParentLink::List(vec![vec![CHILD1.to_owned(), CHILD2.to_owned()]]), ), column_names: AttributeNames::All, @@ -551,10 +542,10 @@ fn query() { // things { parent { id } } // This is the inner 'parent' query let coll = EntityCollection::Window(vec![EntityWindow { - child_type: THING.clone(), + child_type: THING_TYPE.clone(), ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], link: EntityLink::Parent( - THING.clone(), + THING_TYPE.clone(), ParentLink::Scalar(vec![ROOT.to_owned(), ROOT.to_owned()]), ), column_names: AttributeNames::All, diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index b6a5fe3bf5a..b3541d5de4a 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -124,6 +124,8 @@ lazy_static! { 5u64 ) .into(); + static ref USER_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type(USER).unwrap(); + static ref PERSON_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type("Person").unwrap(); } /// Test harness for running database integration tests. @@ -187,7 +189,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_1 = create_test_entity( "1", - USER, + &*USER_TYPE, "Johnton", "tonofjohn@email.com", 67_i32, @@ -206,7 +208,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_2 = create_test_entity( "2", - USER, + &*USER_TYPE, "Cindini", "dinici@email.com", 43_i32, @@ -216,7 +218,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator ); let test_entity_3_1 = create_test_entity( "3", - USER, + &*USER_TYPE, "Shaqueeena", "queensha@email.com", 28_i32, @@ -235,7 +237,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator let test_entity_3_2 = create_test_entity( "3", - USER, + &*USER_TYPE, "Shaqueeena", "teeko@email.com", 28_i32, @@ -258,7 +260,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator /// Creates a test entity. fn create_test_entity( id: &str, - entity_type: &str, + entity_type: &EntityType, name: &str, email: &str, age: i32, @@ -280,7 +282,7 @@ fn create_test_entity( }; EntityOperation::Set { - key: EntityKey::data(entity_type.to_owned(), id.to_owned()), + key: EntityKey::onchain(entity_type, id), data: test_entity, } } @@ -303,7 +305,7 @@ fn get_entity_count(store: Arc, subgraph_id: &DeploymentHash) -> u6 #[test] fn delete_entity() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::data(USER.to_owned(), "3".to_owned()); + let entity_key = EntityKey::onchain(&*USER_TYPE, "3"); // Check that there is an entity to remove. writable.get(&entity_key).unwrap().unwrap(); @@ -332,7 +334,7 @@ fn get_entity_1() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let key = EntityKey::onchain(&*USER_TYPE, "1"); let result = writable.get(&key).unwrap(); let bin_name = Value::Bytes("Johnton".as_bytes().into()); @@ -358,7 +360,7 @@ fn get_entity_1() { fn get_entity_3() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = EntityKey::data(USER.to_owned(), "3".to_owned()); + let key = EntityKey::onchain(&*USER_TYPE, "3"); let result = writable.get(&key).unwrap(); let expected_entity = entity! { schema => @@ -381,10 +383,10 @@ fn get_entity_3() { #[test] fn insert_entity() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::data(USER.to_owned(), "7".to_owned()); + let entity_key = EntityKey::onchain(&*USER_TYPE, "7".to_owned()); let test_entity = create_test_entity( "7", - USER, + &*USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -411,11 +413,11 @@ fn insert_entity() { #[test] fn update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); let op = create_test_entity( "1", - USER, + &*USER_TYPE, "Wanjon", "wanawana@email.com", 76_i32, @@ -457,7 +459,7 @@ fn update_existing() { #[test] fn partially_update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -536,7 +538,7 @@ fn user_query() -> EntityQuery { EntityQuery::new( TEST_SUBGRAPH_ID.clone(), BLOCK_NUMBER_MAX, - EntityCollection::All(vec![(EntityType::from(USER), AttributeNames::All)]), + EntityCollection::All(vec![(USER_TYPE.clone(), AttributeNames::All)]), ) } @@ -894,10 +896,10 @@ fn find() { }); } -fn make_entity_change(entity_type: &str) -> EntityChange { +fn make_entity_change(entity_type: &EntityType) -> EntityChange { EntityChange::Data { subgraph_id: TEST_SUBGRAPH_ID.clone(), - entity_type: EntityType::new(entity_type.to_owned()), + entity_type: entity_type.to_owned(), } } @@ -949,12 +951,12 @@ async fn check_events( // Subscribe to store events fn subscribe( subgraph: &DeploymentHash, - entity_type: &str, + entity_type: &EntityType, ) -> StoreEventStream, Error = ()> + Send> { let subscription = SUBSCRIPTION_MANAGER.subscribe(FromIterator::from_iter([SubscriptionFilter::Entities( subgraph.clone(), - EntityType::new(entity_type.to_owned()), + entity_type.to_owned(), )])); StoreEventStream::new(subscription) @@ -964,7 +966,7 @@ async fn check_basic_revert( store: Arc, expected: StoreEvent, deployment: &DeploymentLocator, - entity_type: &str, + entity_type: &EntityType, ) { let this_query = user_query() .filter(EntityFilter::Equal( @@ -1003,10 +1005,10 @@ async fn check_basic_revert( #[test] fn revert_block_basic_user() { run_test(|store, _, deployment| async move { - let expected = StoreEvent::new(vec![make_entity_change(USER)]); + let expected = StoreEvent::new(vec![make_entity_change(&*USER_TYPE)]); let count = get_entity_count(store.clone(), &deployment.hash); - check_basic_revert(store.clone(), expected, &deployment, USER).await; + check_basic_revert(store.clone(), expected, &deployment, &*USER_TYPE).await; assert_eq!(count, get_entity_count(store.clone(), &deployment.hash)); }) } @@ -1022,7 +1024,7 @@ fn revert_block_with_delete() { .desc("name"); // Delete entity with id=2 - let del_key = EntityKey::data(USER.to_owned(), "2".to_owned()); + let del_key = EntityKey::onchain(&*USER_TYPE, "2"); // Process deletion transact_and_wait( @@ -1034,7 +1036,7 @@ fn revert_block_with_delete() { .await .unwrap(); - let subscription = subscribe(&deployment.hash, USER); + let subscription = subscribe(&deployment.hash, &*USER_TYPE); // Revert deletion let count = get_entity_count(store.clone(), &deployment.hash); @@ -1057,7 +1059,7 @@ fn revert_block_with_delete() { assert_eq!(&test_value, returned_name.unwrap()); // Check that the subscription notified us of the changes - let expected = StoreEvent::new(vec![make_entity_change(USER)]); + let expected = StoreEvent::new(vec![make_entity_change(&*USER_TYPE)]); // The last event is the one for the reversion check_events(subscription, vec![expected]).await @@ -1067,7 +1069,7 @@ fn revert_block_with_delete() { #[test] fn revert_block_with_partial_update() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -1087,7 +1089,7 @@ fn revert_block_with_partial_update() { .await .unwrap(); - let subscription = subscribe(&deployment.hash, USER); + let subscription = subscribe(&deployment.hash, &*USER_TYPE); // Perform revert operation, reversing the partial update let count = get_entity_count(store.clone(), &deployment.hash); @@ -1101,7 +1103,7 @@ fn revert_block_with_partial_update() { assert_eq!(reverted_entity, original_entity); // Check that the subscription notified us of the changes - let expected = StoreEvent::new(vec![make_entity_change(USER)]); + let expected = StoreEvent::new(vec![make_entity_change(&*USER_TYPE)]); check_events(subscription, vec![expected]).await }) @@ -1163,7 +1165,7 @@ fn revert_block_with_dynamic_data_source_operations() { let schema = writable.input_schema(); // Create operations to add a user - let user_key = EntityKey::data(USER.to_owned(), "1".to_owned()); + let user_key = EntityKey::onchain(&*USER_TYPE, "1"); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; // Get the original user for comparisons @@ -1208,7 +1210,7 @@ fn revert_block_with_dynamic_data_source_operations() { **loaded_dds[0].param.as_ref().unwrap() ); - let subscription = subscribe(&deployment.hash, USER); + let subscription = subscribe(&deployment.hash, &*USER_TYPE); // Revert block that added the user and the dynamic data source revert_block(&store, &deployment, &TEST_BLOCK_2_PTR).await; @@ -1232,7 +1234,7 @@ fn revert_block_with_dynamic_data_source_operations() { changes: HashSet::from_iter( vec![EntityChange::Data { subgraph_id: DeploymentHash::new("testsubgraph").unwrap(), - entity_type: EntityType::new(USER.into()), + entity_type: USER_TYPE.to_owned(), }] .into_iter(), ), @@ -1276,7 +1278,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { ) .unwrap(); - let subscription = subscribe(&subgraph_id, USER); + let subscription = subscribe(&subgraph_id, &*USER_TYPE); // Add two entities to the store let added_entities = vec![ @@ -1293,7 +1295,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { added_entities .iter() .map(|(id, data)| EntityOperation::Set { - key: EntityKey::data(USER.to_owned(), id.clone()), + key: EntityKey::onchain(&*USER_TYPE, id), data: data.clone(), }) .collect(), @@ -1304,13 +1306,13 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { // Update an entity in the store let updated_entity = entity! { schema => id: "1", name: "Johnny" }; let update_op = EntityOperation::Set { - key: EntityKey::data(USER.to_owned(), "1".to_owned()), + key: EntityKey::onchain(&*USER_TYPE, "1"), data: updated_entity.clone(), }; // Delete an entity in the store let delete_op = EntityOperation::Remove { - key: EntityKey::data(USER.to_owned(), "2".to_owned()), + key: EntityKey::onchain(&*USER_TYPE, "2"), }; // Commit update & delete ops @@ -1324,26 +1326,25 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { .unwrap(); // We're expecting two events to be written to the subscription stream - let user_type = EntityType::new(USER.to_owned()); let expected = vec![ StoreEvent::new(vec![ EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: user_type.clone(), + entity_type: USER_TYPE.clone(), }, EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: user_type.clone(), + entity_type: USER_TYPE.clone(), }, ]), StoreEvent::new(vec![ EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: user_type.clone(), + entity_type: USER_TYPE.clone(), }, EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: user_type.clone(), + entity_type: USER_TYPE.clone(), }, ]), ]; @@ -1355,7 +1356,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { #[test] fn throttle_subscription_delivers() { run_test(|store, _, deployment| async move { - let subscription = subscribe(&deployment.hash, USER) + let subscription = subscribe(&deployment.hash, &*USER_TYPE) .throttle_while_syncing( &LOGGER, store @@ -1372,7 +1373,7 @@ fn throttle_subscription_delivers() { let user4 = create_test_entity( "4", - USER, + &*USER_TYPE, "Steve", "nieve@email.com", 72_i32, @@ -1390,7 +1391,7 @@ fn throttle_subscription_delivers() { .await .unwrap(); - let expected = StoreEvent::new(vec![make_entity_change(USER)]); + let expected = StoreEvent::new(vec![make_entity_change(&*USER_TYPE)]); check_events(subscription, vec![expected]).await }) @@ -1400,7 +1401,7 @@ fn throttle_subscription_delivers() { fn throttle_subscription_throttles() { run_test(|store, _, deployment| async move { // Throttle for a very long time (30s) - let subscription = subscribe(&deployment.hash, USER) + let subscription = subscribe(&deployment.hash, &*USER_TYPE) .throttle_while_syncing( &LOGGER, store @@ -1417,7 +1418,7 @@ fn throttle_subscription_throttles() { let user4 = create_test_entity( "4", - USER, + &*USER_TYPE, "Steve", "nieve@email.com", 72_i32, @@ -1500,7 +1501,7 @@ fn handle_large_string_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, name: name }; - let key = EntityKey::data(USER.to_owned(), id.to_owned()); + let key = EntityKey::onchain(&*USER_TYPE, id); EntityModification::insert(key, data, block) } @@ -1595,7 +1596,7 @@ fn handle_large_bytea_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; - let key = EntityKey::data(USER.to_owned(), id.to_owned()); + let key = EntityKey::onchain(&*USER_TYPE, id); EntityModification::insert(key, data, block) } @@ -1761,8 +1762,8 @@ impl WindowQuery { fn against_color_and_age(self) -> Self { let mut query = self.0; query.collection = EntityCollection::All(vec![ - (EntityType::from(USER), AttributeNames::All), - (EntityType::from("Person"), AttributeNames::All), + (USER_TYPE.clone(), AttributeNames::All), + (PERSON_TYPE.clone(), AttributeNames::All), ]); WindowQuery(query, self.1).default_window() } @@ -1791,21 +1792,26 @@ impl WindowQuery { #[test] fn window() { - fn make_color_and_age(entity_type: &str, id: &str, color: &str, age: i32) -> EntityOperation { + fn make_color_and_age( + entity_type: &EntityType, + id: &str, + color: &str, + age: i32, + ) -> EntityOperation { let entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color }; EntityOperation::Set { - key: EntityKey::data(entity_type.to_owned(), id.to_owned()), + key: EntityKey::onchain(entity_type, id), data: entity, } } fn make_user(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_and_age(USER, id, color, age) + make_color_and_age(&*USER_TYPE, id, color, age) } fn make_person(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_and_age("Person", id, color, age) + make_color_and_age(&*PERSON_TYPE, id, color, age) } let ops = vec![ @@ -2055,7 +2061,7 @@ fn reorg_tracking() { ) { let test_entity_1 = create_test_entity( "1", - USER, + &*USER_TYPE, "Johnton", "tonofjohn@email.com", age, diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index a98b00cd894..151adeddffc 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -8,7 +8,9 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use test_store::*; -use graph::components::store::{DeploymentLocator, DerivedEntityQuery, EntityKey, WritableStore}; +use graph::components::store::{ + DeploymentLocator, DerivedEntityQuery, EntityKey, EntityType, WritableStore, +}; use graph::data::subgraph::*; use graph::semver::Version; use graph::{entity, prelude::*}; @@ -32,6 +34,7 @@ lazy_static! { static ref TEST_SUBGRAPH_SCHEMA: InputSchema = InputSchema::parse(SCHEMA_GQL, TEST_SUBGRAPH_ID.clone()) .expect("Failed to parse user schema"); + static ref COUNTER_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type(COUNTER).unwrap(); } /// Inserts test data into the store. @@ -106,7 +109,7 @@ fn block_pointer(number: u8) -> BlockPtr { } fn count_key(id: &str) -> EntityKey { - EntityKey::data(COUNTER.to_owned(), id.to_owned()) + EntityKey::onchain(&*COUNTER_TYPE, id) } async fn insert_count(store: &Arc, deployment: &DeploymentLocator, count: u8) { From d0439360d47fb2af4514c5b6efe5c6452ccd5d3d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 11:19:43 -0700 Subject: [PATCH 0433/2104] all: Move EntityType to graph::schema --- graph/src/components/store/mod.rs | 95 +----------------- graph/src/data/graphql/object_or_interface.rs | 4 +- graph/src/data/store/mod.rs | 4 +- graph/src/data/subgraph/schema.rs | 3 +- graph/src/data_source/mod.rs | 3 +- graph/src/data_source/offchain.rs | 3 +- graph/src/runtime/gas/size_of.rs | 3 +- graph/src/schema/ast.rs | 3 +- graph/src/schema/entity_type.rs | 97 +++++++++++++++++++ graph/src/schema/input_schema.rs | 4 +- graph/src/schema/mod.rs | 2 + graph/src/util/cache_weight.rs | 3 +- graphql/src/store/prefetch.rs | 4 +- graphql/src/store/query.rs | 4 +- node/src/manager/commands/listen.rs | 4 +- runtime/test/src/test.rs | 2 +- runtime/wasm/src/host_exports.rs | 3 +- server/index-node/src/resolver.rs | 3 +- store/postgres/src/catalog.rs | 2 +- store/postgres/src/copy.rs | 2 +- store/postgres/src/deployment.rs | 16 +-- store/postgres/src/deployment_store.rs | 6 +- store/postgres/src/relational.rs | 6 +- store/postgres/src/relational_queries.rs | 7 +- store/postgres/src/writable.rs | 4 +- store/test-store/src/store.rs | 5 +- store/test-store/tests/graph/entity_cache.rs | 6 +- store/test-store/tests/postgres/graft.rs | 6 +- store/test-store/tests/postgres/relational.rs | 4 +- .../tests/postgres/relational_bytes.rs | 7 +- store/test-store/tests/postgres/store.rs | 4 +- store/test-store/tests/postgres/writable.rs | 6 +- 32 files changed, 169 insertions(+), 156 deletions(-) create mode 100644 graph/src/schema/entity_type.rs diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 79fe86d05a3..f03257358c4 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -3,7 +3,6 @@ mod err; mod traits; pub mod write; -use anyhow::{bail, Error}; pub use entity_cache::{EntityCache, GetScope, ModificationsAndCache}; use futures03::future::{FutureExt, TryFutureExt}; use slog::{trace, Logger}; @@ -18,7 +17,6 @@ pub use write::Batch; use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::fmt; @@ -32,103 +30,16 @@ use crate::blockchain::{Block, BlockHash, BlockPtr}; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; use crate::constraint_violation; -use crate::data::graphql::ObjectOrInterface; use crate::data::store::scalar::Bytes; use crate::data::store::Value; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::env::ENV_VARS; -use crate::prelude::{s, Attribute, DeploymentHash, SubscriptionFilter, ValueType}; -use crate::schema::InputSchema; -use crate::util::intern::{self, AtomPool}; +use crate::prelude::{Attribute, DeploymentHash, SubscriptionFilter, ValueType}; +use crate::schema::{EntityType, InputSchema}; +use crate::util::intern; use crate::util::stats::MovingStats; -/// The type name of an entity. This is the string that is used in the -/// subgraph's GraphQL schema as `type NAME @entity { .. }` -#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct EntityType(Word); - -impl EntityType { - /// Construct a new entity type. Ideally, this is only called when - /// `entity_type` either comes from the GraphQL schema, or from - /// the database from fields that are known to contain a valid entity type - pub fn new(pool: &Arc, entity_type: &str) -> Result { - match pool.lookup(entity_type) { - Some(_) => Ok(EntityType(Word::from(entity_type))), - None => bail!("entity type `{}` is not interned", entity_type), - } - } - - pub fn as_str(&self) -> &str { - self.0.as_str() - } - - pub fn into_string(self) -> String { - self.0.to_string() - } - - pub fn is_poi(&self) -> bool { - self.0.as_str() == "Poi$" - } -} - -impl fmt::Display for EntityType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl Borrow for EntityType { - fn borrow(&self) -> &str { - &self.0 - } -} - -impl CheapClone for EntityType {} - -impl std::fmt::Debug for EntityType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "EntityType({})", self.0) - } -} - -pub trait AsEntityTypeName { - fn name(&self) -> &str; -} - -impl AsEntityTypeName for &str { - fn name(&self) -> &str { - self - } -} - -impl AsEntityTypeName for &String { - fn name(&self) -> &str { - self.as_str() - } -} - -impl AsEntityTypeName for &s::ObjectType { - fn name(&self) -> &str { - &self.name - } -} - -impl AsEntityTypeName for &s::InterfaceType { - fn name(&self) -> &str { - &self.name - } -} - -impl AsEntityTypeName for ObjectOrInterface<'_> { - fn name(&self) -> &str { - match self { - ObjectOrInterface::Object(object) => &object.name, - ObjectOrInterface::Interface(interface) => &interface.name, - } - } -} - #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityFilterDerivative(bool); diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index af690192151..625965f2ba1 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -1,5 +1,5 @@ -use crate::schema::Schema; -use crate::{components::store::EntityType, prelude::s}; +use crate::prelude::s; +use crate::schema::{EntityType, Schema}; use std::cmp::Ordering; use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 171357e682d..e95959c3f85 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,9 +1,9 @@ use crate::{ - components::store::{DeploymentLocator, EntityKey, EntityType}, + components::store::{DeploymentLocator, EntityKey}, data::graphql::ObjectTypeExt, prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, - schema::InputSchema, + schema::{EntityType, InputSchema}, util::intern::{self, AtomPool}, util::intern::{Error as InternError, NullValue, Object}, }; diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index ba7c049916d..aa50c651311 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -9,12 +9,13 @@ use std::str::FromStr; use std::{fmt, fmt::Display}; use super::DeploymentHash; +use crate::blockchain::Blockchain; use crate::data::graphql::TryFromValue; use crate::data::store::Value; use crate::data::subgraph::SubgraphManifest; use crate::prelude::*; +use crate::schema::EntityType; use crate::util::stable_hash_glue::impl_stable_hash; -use crate::{blockchain::Blockchain, components::store::EntityType}; pub const POI_TABLE: &str = "poi2$"; diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 52c56764db3..bc5f3d31dff 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -13,10 +13,11 @@ use crate::{ }, components::{ link_resolver::LinkResolver, - store::{BlockNumber, EntityType, StoredDynamicDataSource}, + store::{BlockNumber, StoredDynamicDataSource}, }, data_source::offchain::OFFCHAIN_KINDS, prelude::{CheapClone as _, DataSourceContext}, + schema::EntityType, }; use anyhow::Error; use semver::Version; diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index f6a04735532..67c8f1d1de2 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -3,13 +3,14 @@ use crate::{ blockchain::{BlockPtr, Blockchain}, components::{ link_resolver::LinkResolver, - store::{BlockNumber, EntityType, StoredDynamicDataSource}, + store::{BlockNumber, StoredDynamicDataSource}, subgraph::DataSourceTemplateInfo, }, data::{store::scalar::Bytes, subgraph::SPEC_VERSION_0_0_7, value::Word}, data_source, ipfs_client::CidFile, prelude::{DataSourceContext, Link}, + schema::EntityType, }; use anyhow::{anyhow, Context, Error}; use itertools::Itertools; diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index 1ec140e4f9c..bab0ed36d46 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -1,8 +1,9 @@ //! Various implementations of GasSizeOf; use crate::{ - components::store::{EntityKey, EntityType, LoadRelatedRequest}, + components::store::{EntityKey, LoadRelatedRequest}, data::store::{scalar::Bytes, Value}, + schema::EntityType, }; use super::{Gas, GasSizeOf, SaturatingInto as _}; diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 822df504f9e..9c7c931dc27 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -406,11 +406,10 @@ pub fn is_list(field_type: &s::Type) -> bool { #[test] fn entity_validation() { use crate::components::store::EntityKey; - use crate::components::store::EntityType; use crate::data::store; use crate::entity; use crate::prelude::{DeploymentHash, Entity}; - use crate::schema::InputSchema; + use crate::schema::{EntityType, InputSchema}; const DOCUMENT: &str = " enum Color { red, yellow, blue } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs new file mode 100644 index 00000000000..710c0fb9e02 --- /dev/null +++ b/graph/src/schema/entity_type.rs @@ -0,0 +1,97 @@ +use std::{borrow::Borrow, fmt, sync::Arc}; + +use anyhow::{bail, Error}; +use serde::{Deserialize, Serialize}; + +use crate::{ + cheap_clone::CheapClone, + data::{graphql::ObjectOrInterface, value::Word}, + prelude::s, + util::intern::AtomPool, +}; + +/// The type name of an entity. This is the string that is used in the +/// subgraph's GraphQL schema as `type NAME @entity { .. }` +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct EntityType(Word); + +impl EntityType { + /// Construct a new entity type. Ideally, this is only called when + /// `entity_type` either comes from the GraphQL schema, or from + /// the database from fields that are known to contain a valid entity type + pub(crate) fn new(pool: &Arc, entity_type: &str) -> Result { + match pool.lookup(entity_type) { + Some(_) => Ok(EntityType(Word::from(entity_type))), + None => bail!("entity type `{}` is not interned", entity_type), + } + } + + pub fn as_str(&self) -> &str { + self.0.as_str() + } + + pub fn into_string(self) -> String { + self.0.to_string() + } + + pub fn is_poi(&self) -> bool { + self.0.as_str() == "Poi$" + } +} + +impl fmt::Display for EntityType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Borrow for EntityType { + fn borrow(&self) -> &str { + &self.0 + } +} + +impl CheapClone for EntityType {} + +impl std::fmt::Debug for EntityType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "EntityType({})", self.0) + } +} + +pub trait AsEntityTypeName { + fn name(&self) -> &str; +} + +impl AsEntityTypeName for &str { + fn name(&self) -> &str { + self + } +} + +impl AsEntityTypeName for &String { + fn name(&self) -> &str { + self.as_str() + } +} + +impl AsEntityTypeName for &s::ObjectType { + fn name(&self) -> &str { + &self.name + } +} + +impl AsEntityTypeName for &s::InterfaceType { + fn name(&self) -> &str { + &self.name + } +} + +impl AsEntityTypeName for ObjectOrInterface<'_> { + fn name(&self) -> &str { + match self { + ObjectOrInterface::Object(object) => &object.name, + ObjectOrInterface::Interface(interface) => &interface.name, + } + } +} diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index df57cefa3f3..3bd358fe00e 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Error}; use store::Entity; use crate::cheap_clone::CheapClone; -use crate::components::store::{AsEntityTypeName, EntityKey, EntityType, LoadRelatedRequest}; +use crate::components::store::{EntityKey, LoadRelatedRequest}; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; use crate::data::store::{ @@ -19,7 +19,7 @@ use crate::schema::api_schema; use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, Schema, SchemaValidationError}; +use super::{ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError}; /// The name of the PoI entity type const POI_OBJECT: &str = "Poi$"; diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index 3c4247308a2..cb054d36a43 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -27,12 +27,14 @@ mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; +mod entity_type; mod fulltext; mod input_schema; pub use api::{api_schema, is_introspection_field, APISchemaError, INTROSPECTION_QUERY_TYPE}; pub use api::{ApiSchema, ErrorPolicy}; +pub use entity_type::{AsEntityTypeName, EntityType}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 57a68a3205e..18e24bcbd1c 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -1,7 +1,8 @@ use crate::{ - components::store::{EntityKey, EntityType}, + components::store::EntityKey, data::value::Word, prelude::{q, BigDecimal, BigInt, Value}, + schema::EntityType, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 38f48e2896a..8f989d2678b 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -13,8 +13,8 @@ use std::collections::BTreeMap; use std::rc::Rc; use std::time::Instant; -use graph::schema::{ast as sast, InputSchema}; -use graph::{components::store::EntityType, data::graphql::*}; +use graph::data::graphql::*; +use graph::schema::{ast as sast, EntityType, InputSchema}; use graph::{ data::graphql::ext::DirectiveFinder, prelude::{ diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index 01dc49cbc48..ea2109242b0 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -2,13 +2,13 @@ use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; use std::mem::discriminant; use graph::data::graphql::ext::DirectiveFinder; +use graph::data::graphql::ObjectOrInterface; use graph::data::graphql::TypeExt as _; use graph::data::value::Object; use graph::data::value::Value as DataValue; use graph::prelude::*; use graph::schema::ast::{self as sast, FilterOp}; -use graph::schema::{ApiSchema, InputSchema}; -use graph::{components::store::EntityType, data::graphql::ObjectOrInterface}; +use graph::schema::{ApiSchema, EntityType, InputSchema}; use crate::execution::ast as a; diff --git a/node/src/manager/commands/listen.rs b/node/src/manager/commands/listen.rs index e033600d101..7a192db27ce 100644 --- a/node/src/manager/commands/listen.rs +++ b/node/src/manager/commands/listen.rs @@ -4,9 +4,9 @@ use std::{collections::BTreeSet, io::Write}; use futures::compat::Future01CompatExt; use graph::prelude::DeploymentHash; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph::{ - components::store::{EntityType, SubscriptionManager as _}, + components::store::SubscriptionManager as _, prelude::{serde_json, Error, Stream, SubscriptionFilter}, }; use graph_store_postgres::connection_pool::ConnectionPool; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 458b6d4a50f..19b861db677 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -5,7 +5,7 @@ use graph::prelude::web3::types::U256; use graph::runtime::gas::GasCounter; use graph::runtime::{AscIndexId, AscType, HostExportError}; use graph::runtime::{AscPtr, ToAscObj}; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph::{components::store::*, ipfs_client::IpfsClient}; use graph::{entity, prelude::*}; use graph_chain_ethereum::{Chain, DataSource}; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 6806f9a41ab..6b29048e957 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -6,14 +6,15 @@ use std::time::{Duration, Instant}; use graph::data::value::Word; +use graph::schema::EntityType; use never::Never; use semver::Version; use wasmtime::Trap; use web3::types::H160; use graph::blockchain::Blockchain; +use graph::components::store::EntityKey; use graph::components::store::{EnsLookup, GetScope, LoadRelatedRequest}; -use graph::components::store::{EntityKey, EntityType}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, }; diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 435e9ea8a07..3eaea81ba34 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -2,11 +2,12 @@ use std::collections::BTreeMap; use std::convert::TryInto; use graph::data::query::Trace; +use graph::schema::EntityType; use web3::types::Address; use git_testament::{git_testament, CommitKind}; use graph::blockchain::{Blockchain, BlockchainKind, BlockchainMap}; -use graph::components::store::{BlockPtrForNumber, BlockStore, EntityType, Store}; +use graph::components::store::{BlockPtrForNumber, BlockStore, Store}; use graph::components::versions::VERSIONS; use graph::data::graphql::{object, IntoValue, ObjectOrInterface, ValueMap}; use graph::data::subgraph::status; diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index e343a2c4a2b..cc448b9fd1d 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -6,9 +6,9 @@ use diesel::{ sql_types::{Array, Double, Nullable, Text}, ExpressionMethods, QueryDsl, }; -use graph::components::store::EntityType; use graph::components::store::VersionStats; use graph::prelude::BlockNumber; +use graph::schema::EntityType; use itertools::Itertools; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt::Write; diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index eaa11916738..889ef30db8f 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -32,9 +32,9 @@ use diesel::{ RunQueryDsl, }; use graph::{ - components::store::EntityType, constraint_violation, prelude::{info, o, warn, BlockNumber, BlockPtr, Logger, StoreError, ENV_VARS}, + schema::EntityType, }; use crate::{ diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index cc54c89adff..2e1f0c7db0e 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -13,14 +13,9 @@ use diesel::{ sql_query, sql_types::{Nullable, Text}, }; -use graph::{blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError}; use graph::{ - components::store::EntityType, - prelude::{ - anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr, - DeploymentHash, DeploymentState, StoreError, - }, - schema::InputSchema, + blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError, + schema::EntityType, }; use graph::{ data::subgraph::{ @@ -29,6 +24,13 @@ use graph::{ }, util::backoff::ExponentialBackoff, }; +use graph::{ + prelude::{ + anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr, + DeploymentHash, DeploymentState, StoreError, + }, + schema::InputSchema, +}; use stable_hash_legacy::crypto::SetHasher; use std::{collections::BTreeSet, convert::TryFrom, ops::Bound, time::Duration}; use std::{str::FromStr, sync::Arc}; diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 36de5e22919..f95bc1f63d1 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -7,8 +7,8 @@ use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::write::RowGroup; use graph::components::store::{ - Batch, DerivedEntityQuery, EntityKey, EntityType, PrunePhase, PruneReporter, PruneRequest, - PruningStrategy, StoredDynamicDataSource, VersionStats, + Batch, DerivedEntityQuery, EntityKey, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, + StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; use graph::data::query::Trace; @@ -43,7 +43,7 @@ use graph::prelude::{ DeploymentHash, DeploymentState, Entity, EntityQuery, Error, Logger, QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, }; -use graph::schema::{ApiSchema, InputSchema}; +use graph::schema::{ApiSchema, EntityType, InputSchema}; use web3::types::Address; use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 5ddc8f837c7..fc9ddb08702 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -31,7 +31,9 @@ use graph::data::query::Trace; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; -use graph::schema::{FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME}; +use graph::schema::{ + EntityType, FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME, +}; use graph::slog::warn; use inflector::Inflector; use itertools::Itertools; @@ -52,7 +54,7 @@ use crate::{ FilterQuery, FindManyQuery, FindQuery, InsertQuery, RevertClampQuery, RevertRemoveQuery, }, }; -use graph::components::store::{DerivedEntityQuery, EntityKey, EntityType}; +use graph::components::store::{DerivedEntityQuery, EntityKey}; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::POI_TABLE; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 5a5ca12ea10..ca2d7024b10 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -22,11 +22,8 @@ use graph::prelude::{ EntityFilter, EntityLink, EntityOrder, EntityOrderByChild, EntityOrderByChildInfo, EntityRange, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, ENV_VARS, }; -use graph::schema::{FulltextAlgorithm, InputSchema}; -use graph::{ - components::store::{AttributeNames, EntityType}, - data::store::scalar, -}; +use graph::schema::{EntityType, FulltextAlgorithm, InputSchema}; +use graph::{components::store::AttributeNames, data::store::scalar}; use inflector::Inflector; use itertools::Itertools; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index bd6b571d19c..b8d449182a5 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -19,7 +19,7 @@ use graph::prelude::{ BlockNumber, CacheWeight, Entity, MetricsRegistry, SubgraphDeploymentEntity, SubgraphStore as _, BLOCK_NUMBER_MAX, }; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph::slog::{info, warn}; use graph::tokio::select; use graph::tokio::sync::Notify; @@ -27,7 +27,7 @@ use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ cheap_clone::CheapClone, - components::store::{self, write::EntityOp, EntityType, WritableStore as WritableStoreTrait}, + components::store::{self, write::EntityOp, WritableStore as WritableStoreTrait}, data::subgraph::schema::SubgraphError, prelude::{ BlockPtr, DeploymentHash, EntityModification, Error, Logger, StopwatchMetrics, StoreError, diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 0e9b537e5fe..8e146c92f8f 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -9,13 +9,14 @@ use graph::data_source::CausalityRegion; use graph::data_source::DataSource; use graph::log; use graph::prelude::{QueryStoreManager as _, SubgraphStore as _, *}; +use graph::schema::EntityType; use graph::schema::InputSchema; use graph::semver::Version; use graph::{ blockchain::block_stream::FirehoseCursor, blockchain::ChainIdentifier, components::store::DeploymentLocator, components::store::EntityKey, - components::store::EntityType, components::store::StatusStore, - components::store::StoredDynamicDataSource, data::subgraph::status, prelude::NodeId, + components::store::StatusStore, components::store::StoredDynamicDataSource, + data::subgraph::status, prelude::NodeId, }; use graph_graphql::prelude::{ execute_query, Query as PreparedQuery, QueryExecutionOptions, StoreResolver, diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 6dff24ab589..102e51e494e 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -1,12 +1,12 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - DeploymentCursorTracker, DerivedEntityQuery, EntityKey, EntityType, GetScope, - LoadRelatedRequest, ReadStore, StoredDynamicDataSource, WritableStore, + DeploymentCursorTracker, DerivedEntityQuery, EntityKey, GetScope, LoadRelatedRequest, + ReadStore, StoredDynamicDataSource, WritableStore, }; use graph::data::store::PARENT_ID; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph::{ components::store::{DeploymentId, DeploymentLocator}, prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 3489e421353..2b55ce9185c 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,14 +1,14 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::value::Word; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; use std::{marker::PhantomData, str::FromStr}; use test_store::*; use graph::components::store::{ - DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, - PruneRequest, PruningStrategy, VersionStats, + DeploymentLocator, EntityKey, EntityOrder, EntityQuery, PruneReporter, PruneRequest, + PruningStrategy, VersionStats, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index bf323b66018..2acd3953884 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -10,7 +10,7 @@ use graph::prelude::{ EntityOrder, EntityQuery, Logger, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; use graph::prelude::{BlockNumber, MetricsRegistry}; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph_store_postgres::layout_for_tests::set_account_like; use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; @@ -24,7 +24,7 @@ use std::thread::sleep; use std::time::Duration; use graph::{ - components::store::{AttributeNames, EntityType}, + components::store::AttributeNames, data::store::scalar::{BigDecimal, BigInt, Bytes}, }; use graph_store_postgres::{ diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 39421d8de03..ed347aa4f26 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -8,22 +8,19 @@ use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry, StoreError}; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use hex_literal::hex; use lazy_static::lazy_static; use std::collections::BTreeSet; use std::str::FromStr; use std::{collections::BTreeMap, sync::Arc}; +use graph::data::store::scalar::{BigDecimal, BigInt}; use graph::prelude::{ o, slog, web3::types::H256, AttributeNames, ChildMultiplicity, DeploymentHash, Entity, EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, StopwatchMetrics, WindowAttribute, BLOCK_NUMBER_MAX, }; -use graph::{ - components::store::EntityType, - data::store::scalar::{BigDecimal, BigInt}, -}; use graph_store_postgres::{ layout_for_tests::make_dummy_site, layout_for_tests::{Layout, Namespace}, diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index b3541d5de4a..359fba81f50 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -2,7 +2,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use graph_chain_ethereum::{Mapping, MappingABI}; use hex_literal::hex; use lazy_static::lazy_static; @@ -16,7 +16,7 @@ use graph::data::subgraph::*; use graph::{ blockchain::DataSource, components::store::{ - BlockStore as _, EntityFilter, EntityOrder, EntityQuery, EntityType, StatusStore, + BlockStore as _, EntityFilter, EntityOrder, EntityQuery, StatusStore, SubscriptionManager as _, }, prelude::ethabi::Contract, diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 151adeddffc..e2bf3056ac1 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -2,15 +2,13 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::subgraph::schema::DeploymentCreate; use graph::data::value::Word; use graph::data_source::CausalityRegion; -use graph::schema::InputSchema; +use graph::schema::{EntityType, InputSchema}; use lazy_static::lazy_static; use std::collections::BTreeSet; use std::marker::PhantomData; use test_store::*; -use graph::components::store::{ - DeploymentLocator, DerivedEntityQuery, EntityKey, EntityType, WritableStore, -}; +use graph::components::store::{DeploymentLocator, DerivedEntityQuery, EntityKey, WritableStore}; use graph::data::subgraph::*; use graph::semver::Version; use graph::{entity, prelude::*}; From 8f49222650e44e73f3516ab03d42b456a473654c Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 14:19:54 -0700 Subject: [PATCH 0434/2104] graph, store: Remove Deserialize from EntityType --- graph/src/components/store/mod.rs | 13 ++++++++----- graph/src/data/store/mod.rs | 2 +- graph/src/data/subgraph/mod.rs | 9 +++++---- graph/src/data_source/mod.rs | 5 +++-- graph/src/data_source/offchain.rs | 18 +++++++++++++----- graph/src/schema/entity_type.rs | 4 ++-- store/postgres/src/relational.rs | 4 ++-- .../tests/chain/ethereum/manifest.rs | 5 ++++- store/test-store/tests/postgres/store.rs | 12 ++++++------ 9 files changed, 44 insertions(+), 28 deletions(-) diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index f03257358c4..c71d5cdfd5d 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -606,7 +606,7 @@ pub enum EntityChange { Data { subgraph_id: DeploymentHash, /// Entity type name of the changed entity. - entity_type: EntityType, + entity_type: String, }, Assignment { deployment: DeploymentLocator, @@ -618,7 +618,7 @@ impl EntityChange { pub fn for_data(subgraph_id: DeploymentHash, key: EntityKey) -> Self { Self::Data { subgraph_id, - entity_type: key.entity_type, + entity_type: key.entity_type.to_string(), } } @@ -629,14 +629,17 @@ impl EntityChange { } } - pub fn as_filter(&self) -> SubscriptionFilter { + pub fn as_filter(&self, schema: &InputSchema) -> SubscriptionFilter { use EntityChange::*; match self { Data { subgraph_id, entity_type, .. - } => SubscriptionFilter::Entities(subgraph_id.clone(), entity_type.clone()), + } => SubscriptionFilter::Entities( + subgraph_id.clone(), + schema.entity_type(entity_type).unwrap(), + ), Assignment { .. } => SubscriptionFilter::Assignment, } } @@ -696,7 +699,7 @@ impl StoreEvent { .into_iter() .map(|entity_type| EntityChange::Data { subgraph_id: deployment.clone(), - entity_type, + entity_type: entity_type.to_string(), }), ); Self::from_set(changes) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index e95959c3f85..559f1a11e8a 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -55,7 +55,7 @@ impl SubscriptionFilter { entity_type, .. }, - ) => subgraph_id == eid && entity_type == etype, + ) => subgraph_id == eid && entity_type == etype.as_str(), (Self::Assignment, EntityChange::Assignment { .. }) => true, _ => false, } diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 70f01bd91cf..5fbe307d72c 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -12,7 +12,7 @@ pub use features::{SubgraphFeature, SubgraphFeatureValidationError}; use crate::object; use anyhow::{anyhow, Context, Error}; -use futures03::{future::try_join3, stream::FuturesOrdered, TryStreamExt as _}; +use futures03::{future::try_join, stream::FuturesOrdered, TryStreamExt as _}; use itertools::Itertools; use semver::Version; use serde::{de, ser}; @@ -816,8 +816,9 @@ impl UnresolvedSubgraphManifest { ); } - let (schema, data_sources, templates) = try_join3( - schema.resolve(id.clone(), resolver, logger), + let schema = schema.resolve(id.clone(), resolver, logger).await?; + + let (data_sources, templates) = try_join( data_sources .into_iter() .enumerate() @@ -828,7 +829,7 @@ impl UnresolvedSubgraphManifest { .into_iter() .enumerate() .map(|(idx, template)| { - template.resolve(resolver, logger, ds_count as u32 + idx as u32) + template.resolve(resolver, &schema, logger, ds_count as u32 + idx as u32) }) .collect::>() .try_collect::>(), diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index bc5f3d31dff..90c0597805f 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -17,7 +17,7 @@ use crate::{ }, data_source::offchain::OFFCHAIN_KINDS, prelude::{CheapClone as _, DataSourceContext}, - schema::EntityType, + schema::{EntityType, InputSchema}, }; use anyhow::Error; use semver::Version; @@ -342,6 +342,7 @@ impl UnresolvedDataSourceTemplate { pub async fn resolve( self, resolver: &Arc, + schema: &InputSchema, logger: &Logger, manifest_idx: u32, ) -> Result, Error> { @@ -351,7 +352,7 @@ impl UnresolvedDataSourceTemplate { .await .map(DataSourceTemplate::Onchain), Self::Offchain(ds) => ds - .resolve(resolver, logger, manifest_idx) + .resolve(resolver, logger, manifest_idx, schema) .await .map(DataSourceTemplate::Offchain), } diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index 67c8f1d1de2..da7a5d91975 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -10,7 +10,7 @@ use crate::{ data_source, ipfs_client::CidFile, prelude::{DataSourceContext, Link}, - schema::EntityType, + schema::{EntityType, InputSchema}, }; use anyhow::{anyhow, Context, Error}; use itertools::Itertools; @@ -370,7 +370,7 @@ pub struct UnresolvedMapping { pub language: String, pub file: Link, pub handler: String, - pub entities: Vec, + pub entities: Vec, } impl UnresolvedDataSource { @@ -381,6 +381,7 @@ impl UnresolvedDataSource { logger: &Logger, manifest_idx: u32, causality_region: CausalityRegion, + schema: &InputSchema, ) -> Result { info!(logger, "Resolve offchain data source"; "name" => &self.name, @@ -396,7 +397,7 @@ impl UnresolvedDataSource { kind, name: self.name, source, - mapping: self.mapping.resolve(resolver, logger).await?, + mapping: self.mapping.resolve(resolver, schema, logger).await?, context: Arc::new(None), creation_block: None, done_at: Arc::new(AtomicI32::new(NOT_DONE_VALUE)), @@ -409,13 +410,19 @@ impl UnresolvedMapping { pub async fn resolve( self, resolver: &Arc, + schema: &InputSchema, logger: &Logger, ) -> Result { info!(logger, "Resolve offchain mapping"; "link" => &self.file.link); + let entities = self + .entities + .iter() + .map(|s| schema.entity_type(s)) + .collect::>()?; Ok(Mapping { language: self.language, api_version: semver::Version::parse(&self.api_version)?, - entities: self.entities, + entities, handler: self.handler, runtime: Arc::new(resolver.cat(logger, &self.file).await?), link: self.file, @@ -446,12 +453,13 @@ impl UnresolvedDataSourceTemplate { resolver: &Arc, logger: &Logger, manifest_idx: u32, + schema: &InputSchema, ) -> Result { let kind = OffchainDataSourceKind::from_str(&self.kind)?; let mapping = self .mapping - .resolve(resolver, logger) + .resolve(resolver, schema, logger) .await .with_context(|| format!("failed to resolve data source template {}", self.name))?; diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 710c0fb9e02..04a837e5376 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt, sync::Arc}; use anyhow::{bail, Error}; -use serde::{Deserialize, Serialize}; +use serde::Serialize; use crate::{ cheap_clone::CheapClone, @@ -12,7 +12,7 @@ use crate::{ /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` -#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Clone, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityType(Word); impl EntityType { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index fc9ddb08702..5e7cd164408 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -931,13 +931,13 @@ impl Layout { .filter(|id| !unclamped.contains(id)) .map(|_| EntityChange::Data { subgraph_id: self.site.deployment.clone(), - entity_type: table.object.clone(), + entity_type: table.object.to_string(), }); changes.extend(deleted); // EntityChange for versions that we just updated or inserted let set = unclamped.into_iter().map(|_| EntityChange::Data { subgraph_id: self.site.deployment.clone(), - entity_type: table.object.clone(), + entity_type: table.object.to_string(), }); changes.extend(set); } diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index 11aad8ad2fc..609d264ba2c 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -24,7 +24,10 @@ use graph::semver::Version; use graph_chain_ethereum::{BlockHandlerFilter, Chain, NodeCapabilities}; use test_store::LOGGER; -const GQL_SCHEMA: &str = "type Thing @entity { id: ID! }"; +const GQL_SCHEMA: &str = r#" + type Thing @entity { id: ID! } + type TestEntity @entity { id: ID! } +"#; const GQL_SCHEMA_FULLTEXT: &str = include_str!("full-text.graphql"); const MAPPING_WITH_IPFS_FUNC_WASM: &[u8] = include_bytes!("ipfs-on-ethereum-contracts.wasm"); const ABI: &str = "[{\"type\":\"function\", \"inputs\": [{\"name\": \"i\",\"type\": \"uint256\"}],\"name\":\"get\",\"outputs\": [{\"type\": \"address\",\"name\": \"o\"}]}]"; diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 359fba81f50..d6d9b35ba46 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -899,7 +899,7 @@ fn find() { fn make_entity_change(entity_type: &EntityType) -> EntityChange { EntityChange::Data { subgraph_id: TEST_SUBGRAPH_ID.clone(), - entity_type: entity_type.to_owned(), + entity_type: entity_type.to_string(), } } @@ -1234,7 +1234,7 @@ fn revert_block_with_dynamic_data_source_operations() { changes: HashSet::from_iter( vec![EntityChange::Data { subgraph_id: DeploymentHash::new("testsubgraph").unwrap(), - entity_type: USER_TYPE.to_owned(), + entity_type: USER_TYPE.to_string(), }] .into_iter(), ), @@ -1330,21 +1330,21 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { StoreEvent::new(vec![ EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: USER_TYPE.clone(), + entity_type: USER_TYPE.to_string(), }, EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: USER_TYPE.clone(), + entity_type: USER_TYPE.to_string(), }, ]), StoreEvent::new(vec![ EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: USER_TYPE.clone(), + entity_type: USER_TYPE.to_string(), }, EntityChange::Data { subgraph_id: subgraph_id.clone(), - entity_type: USER_TYPE.clone(), + entity_type: USER_TYPE.to_string(), }, ]), ]; From 934bc7ca5de6a7b76964943c11f49e9f162a622d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 14:42:58 -0700 Subject: [PATCH 0435/2104] graph: Do not store EntityType values within InputSchema --- graph/src/schema/input_schema.rs | 31 ++++++++++++++----------------- store/postgres/src/relational.rs | 2 +- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 3bd358fe00e..cd3c43a8acf 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -39,12 +39,10 @@ pub struct InputSchema { #[derive(Debug, PartialEq)] pub struct Inner { schema: Schema, - immutable_types: HashSet, + immutable_types: HashSet, // Maps each entity type to its field names - field_names: HashMap>, + field_names: HashMap>, pool: Arc, - - poi_type: EntityType, } impl CheapClone for InputSchema { @@ -59,15 +57,15 @@ impl InputSchema { fn create(schema: Schema) -> Self { let pool = Arc::new(atom_pool(&schema.document)); + // The `unwrap` of `lookup` is safe because we just created the pool let immutable_types = HashSet::from_iter( schema .document .get_object_type_definitions() .into_iter() .filter(|obj_type| obj_type.is_immutable()) - .map(|obj_type| EntityType::new(&pool, &obj_type.name)) - .collect::, _>>() - .unwrap(), + .map(|obj_type| pool.lookup(&obj_type.name).unwrap()) + .collect::>(), ); let field_names = HashMap::from_iter( @@ -81,21 +79,18 @@ impl InputSchema { .iter() .map(|field| pool.lookup(&field.name).unwrap()) .collect(); - EntityType::new(&pool, &obj_type.name).map(|t| (t, fields)) + let type_atom = pool.lookup(&obj_type.name).unwrap(); + (type_atom, fields) }) - .collect::, _>>() - .unwrap(), + .collect::>(), ); - let poi_type = EntityType::new(&pool, POI_OBJECT).unwrap(); - Self { inner: Arc::new(Inner { schema, immutable_types, field_names, pool, - poi_type, }), } } @@ -286,7 +281,8 @@ impl InputSchema { } pub fn is_immutable(&self, entity_type: &EntityType) -> bool { - self.inner.immutable_types.contains(entity_type) + let atom = self.inner.pool.lookup(entity_type.as_str()).unwrap(); + self.inner.immutable_types.contains(&atom) } pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { @@ -393,15 +389,16 @@ impl InputSchema { } pub fn has_field(&self, entity_type: &EntityType, field: Atom) -> bool { + let atom = self.inner.pool.lookup(entity_type.as_str()).unwrap(); self.inner .field_names - .get(entity_type) + .get(&atom) .map(|fields| fields.contains(&field)) .unwrap_or(false) } - pub fn poi_type(&self) -> &EntityType { - &self.inner.poi_type + pub fn poi_type(&self) -> EntityType { + EntityType::new(&self.inner.pool, POI_OBJECT).unwrap() } pub fn poi_digest(&self) -> Word { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 5e7cd164408..bc7121a8e1c 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -437,7 +437,7 @@ impl Layout { } pub fn supports_proof_of_indexing(&self) -> bool { - self.tables.contains_key(self.input_schema.poi_type()) + self.tables.contains_key(&self.input_schema.poi_type()) } pub fn create_relational_schema( From 393fb7a8922529965b963babd505295bffaaaa49 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 14:58:31 -0700 Subject: [PATCH 0436/2104] all: Don't use Arc since it already does that internally --- chain/ethereum/src/chain.rs | 2 +- chain/near/src/chain.rs | 2 +- chain/substreams/src/block_stream.rs | 2 +- chain/substreams/src/mapper.rs | 5 ++--- graph/src/blockchain/block_stream.rs | 2 +- graph/src/components/server/index_node.rs | 4 +--- graph/src/components/store/entity_cache.rs | 2 +- graph/src/components/store/mod.rs | 6 +++--- graph/src/components/store/traits.rs | 12 ++++++------ graph/src/components/store/write.rs | 8 ++++---- graph/src/schema/input_schema.rs | 3 +++ graphql/src/store/query.rs | 4 ++-- store/postgres/src/deployment_store.rs | 4 ++-- store/postgres/src/fork.rs | 13 ++++++------- store/postgres/src/query_store.rs | 2 +- store/postgres/src/subgraph_store.rs | 2 +- store/postgres/src/writable.rs | 10 +++++----- store/test-store/tests/graph/entity_cache.rs | 16 +++++++--------- tests/src/fixture/mod.rs | 4 ++-- 19 files changed, 50 insertions(+), 53 deletions(-) diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 1a125ad48a9..12a322c6422 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -106,7 +106,7 @@ impl BlockStreamBuilder for EthereumStreamBuilder { async fn build_substreams( &self, _chain: &Chain, - _schema: Arc, + _schema: InputSchema, _deployment: DeploymentLocator, _block_cursor: FirehoseCursor, _subgraph_current_block: Option, diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index f25c36b71df..765a88f495f 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -44,7 +44,7 @@ impl BlockStreamBuilder for NearStreamBuilder { async fn build_substreams( &self, _chain: &Chain, - _schema: Arc, + _schema: InputSchema, _deployment: DeploymentLocator, _block_cursor: FirehoseCursor, _subgraph_current_block: Option, diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index c0e14c9b9a4..ef073789e28 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -34,7 +34,7 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { async fn build_substreams( &self, chain: &Chain, - schema: Arc, + schema: InputSchema, deployment: DeploymentLocator, block_cursor: FirehoseCursor, subgraph_current_block: Option, diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 1aad546e9bd..ca5eb8a1156 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; use std::str::FromStr; -use std::sync::Arc; use crate::codec::entity_change; use crate::{codec, Block, Chain, EntityChanges, ParsedChanges, TriggerData}; @@ -26,7 +25,7 @@ use prost::Message; // the store. If schema is None then only the original block is passed. This None should only // be used for block ingestion where entity content is empty and gets discarded. pub struct Mapper { - pub schema: Option>, + pub schema: Option, } #[async_trait] @@ -126,7 +125,7 @@ impl SubstreamsMapper for Mapper { fn parse_changes( changes: &EntityChanges, - schema: &Arc, + schema: &InputSchema, ) -> anyhow::Result> { let mut parsed_changes = vec![]; for entity_change in changes.entity_changes.iter() { diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index e5fbacb7d1c..18e0fd78ef1 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -124,7 +124,7 @@ pub trait BlockStreamBuilder: Send + Sync { async fn build_substreams( &self, chain: &C, - schema: Arc, + schema: InputSchema, deployment: DeploymentLocator, block_cursor: FirehoseCursor, subgraph_current_block: Option, diff --git a/graph/src/components/server/index_node.rs b/graph/src/components/server/index_node.rs index 7afdef5e8ed..56fd8f0fd71 100644 --- a/graph/src/components/server/index_node.rs +++ b/graph/src/components/server/index_node.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use futures::prelude::*; use crate::{prelude::BlockNumber, schema::InputSchema}; @@ -15,7 +13,7 @@ pub struct VersionInfo { pub failed: bool, pub description: Option, pub repository: Option, - pub schema: Arc, + pub schema: InputSchema, pub network: String, } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index fb95a131ff8..95dc39331de 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -86,7 +86,7 @@ pub struct EntityCache { /// The store is only used to read entities. pub store: Arc, - pub schema: Arc, + pub schema: InputSchema, } impl Debug for EntityCache { diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index c71d5cdfd5d..fc02d69441e 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1073,11 +1073,11 @@ impl fmt::Display for DeploymentSchemaVersion { /// A `ReadStore` that is always empty. pub struct EmptyStore { - schema: Arc, + schema: InputSchema, } impl EmptyStore { - pub fn new(schema: Arc) -> Self { + pub fn new(schema: InputSchema) -> Self { EmptyStore { schema } } } @@ -1098,7 +1098,7 @@ impl ReadStore for EmptyStore { Ok(BTreeMap::new()) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { self.schema.cheap_clone() } } diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index ce28e492b94..dc8d6bd2e7c 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -137,7 +137,7 @@ pub trait SubgraphStore: Send + Sync + 'static { ) -> Result, StoreError>; /// Return the GraphQL schema supplied by the user - fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result, StoreError>; + fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result; /// Return a bool represeting whether there is a pending graft for the subgraph fn graft_pending(&self, id: &DeploymentHash) -> Result; @@ -225,7 +225,7 @@ pub trait ReadStore: Send + Sync + 'static { query_derived: &DerivedEntityQuery, ) -> Result, StoreError>; - fn input_schema(&self) -> Arc; + fn input_schema(&self) -> InputSchema; } // This silly impl is needed until https://github.com/rust-lang/rust/issues/65991 is stable. @@ -248,13 +248,13 @@ impl ReadStore for Arc { (**self).get_derived(entity_derived) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { (**self).input_schema() } } pub trait DeploymentCursorTracker: Sync + Send + 'static { - fn input_schema(&self) -> Arc; + fn input_schema(&self) -> InputSchema; /// Get a pointer to the most recently processed block in the subgraph. fn block_ptr(&self) -> Option; @@ -274,7 +274,7 @@ impl DeploymentCursorTracker for Arc { (**self).firehose_cursor() } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { (**self).input_schema() } } @@ -564,7 +564,7 @@ pub trait QueryStore: Send + Sync { fn api_schema(&self) -> Result, QueryExecutionError>; - fn input_schema(&self) -> Result, QueryExecutionError>; + fn input_schema(&self) -> Result; fn network_name(&self) -> &str; diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 0323345434a..88ba70116d9 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -1,5 +1,5 @@ //! Data structures and helpers for writing subgraph changes to the store -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use crate::{ blockchain::{block_stream::FirehoseCursor, BlockPtr}, @@ -524,12 +524,12 @@ impl<'a> Iterator for ClampsByBlockIterator<'a> { /// A list of entity changes with one group per entity type #[derive(Debug)] pub struct RowGroups { - schema: Arc, + schema: InputSchema, pub groups: Vec, } impl RowGroups { - fn new(schema: Arc) -> Self { + fn new(schema: InputSchema) -> Self { Self { schema, groups: Vec::new(), @@ -633,7 +633,7 @@ pub struct Batch { impl Batch { pub fn new( - schema: Arc, + schema: InputSchema, block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, mut raw_mods: Vec, diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index cd3c43a8acf..d03f638f26a 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -31,6 +31,9 @@ const POI_DIGEST: &str = "digest"; /// with writing a subgraph should use this struct. Code that deals with /// querying subgraphs will instead want to use an `ApiSchema` which can be /// generated with the `api_schema` method on `InputSchema` +/// +/// There's no need to put this into an `Arc`, since `InputSchema` already +/// does that internally and is `CheapClone` #[derive(Clone, Debug, PartialEq)] pub struct InputSchema { inner: Arc, diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index ea2109242b0..7ca0b91b982 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -22,7 +22,7 @@ enum OrderDirection { pub(crate) struct SchemaPair { pub api: Arc, - pub input: Arc, + pub input: InputSchema, } /// Builds a EntityQuery from GraphQL arguments. @@ -932,7 +932,7 @@ mod tests { let api_schema = ApiSchema::from_graphql_schema(api_schema).unwrap(); SchemaPair { - input: Arc::new(input_schema), + input: input_schema, api: Arc::new(api_schema), } } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index f95bc1f63d1..702ee6bb198 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -76,7 +76,7 @@ pub enum ReplicaId { #[derive(Clone)] pub(crate) struct SubgraphInfo { /// The schema as supplied by the user - pub(crate) input: Arc, + pub(crate) input: InputSchema, /// The schema we derive from `input` with `graphql::schema::api::api_schema` pub(crate) api: HashMap>, /// The block number at which this subgraph was grafted onto @@ -511,7 +511,7 @@ impl DeploymentStore { }; let info = SubgraphInfo { - input: Arc::new(manifest_info.input_schema), + input: manifest_info.input_schema, api, graft_block, debug_fork, diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 1940ba1f881..ce3846322e6 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -1,7 +1,7 @@ use std::{ collections::{HashMap, HashSet}, str::FromStr, - sync::{Arc, Mutex}, + sync::Mutex, }; use graph::{ @@ -41,7 +41,7 @@ struct Variables { pub(crate) struct SubgraphFork { client: reqwest::Client, endpoint: Url, - schema: Arc, + schema: InputSchema, fetched_ids: Mutex>, logger: Logger, } @@ -91,7 +91,7 @@ impl SubgraphFork { pub(crate) fn new( base: Url, id: DeploymentHash, - schema: Arc, + schema: InputSchema, logger: Logger, ) -> Result { Ok(Self { @@ -249,8 +249,8 @@ mod tests { DeploymentHash::new("test").unwrap() } - fn test_schema() -> Arc { - let schema = InputSchema::new( + fn test_schema() -> InputSchema { + InputSchema::new( DeploymentHash::new("test").unwrap(), parse_schema::( r#"type Gravatar @entity { @@ -262,8 +262,7 @@ mod tests { ) .unwrap(), ) - .unwrap(); - Arc::new(schema) + .unwrap() } fn test_logger() -> Logger { diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index 19e35c3cf99..2b78d2d8a26 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -120,7 +120,7 @@ impl QueryStoreTrait for QueryStore { Ok(info.api.get(&self.api_version).unwrap().clone()) } - fn input_schema(&self) -> Result, QueryExecutionError> { + fn input_schema(&self) -> Result { let info = self.store.subgraph_info(&self.site)?; Ok(info.input) } diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 3745fb8cdc4..e4579ff893b 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1370,7 +1370,7 @@ impl SubgraphStoreTrait for SubgraphStore { Ok(changes) } - fn input_schema(&self, id: &DeploymentHash) -> Result, StoreError> { + fn input_schema(&self, id: &DeploymentHash) -> Result { let (store, site) = self.store(id)?; let info = store.subgraph_info(&site)?; Ok(info.input) diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index b8d449182a5..5e1a36fd80b 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -79,7 +79,7 @@ struct SyncStore { store: WritableSubgraphStore, writable: Arc, site: Arc, - input_schema: Arc, + input_schema: InputSchema, manifest_idx_and_name: Arc>, } @@ -369,8 +369,8 @@ impl SyncStore { .await } - fn input_schema(&self) -> Arc { - self.input_schema.clone() + fn input_schema(&self) -> InputSchema { + self.input_schema.cheap_clone() } } @@ -1444,7 +1444,7 @@ impl ReadStore for WritableStore { self.writer.get_derived(key) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { self.store.input_schema() } } @@ -1458,7 +1458,7 @@ impl DeploymentCursorTracker for WritableStore { self.block_cursor.lock().unwrap().clone() } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { self.store.input_schema() } } diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 102e51e494e..88320f06f28 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -29,9 +29,8 @@ lazy_static! { static ref SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("entity_cache").unwrap(); static ref DEPLOYMENT: DeploymentLocator = DeploymentLocator::new(DeploymentId::new(-12), SUBGRAPH_ID.clone()); - static ref SCHEMA: Arc = Arc::new( - InputSchema::parse( - " + static ref SCHEMA: InputSchema = InputSchema::parse( + " type Band @entity { id: ID! name: String! @@ -39,10 +38,9 @@ lazy_static! { label: String } ", - SUBGRAPH_ID.clone(), - ) - .expect("Test schema invalid") - ); + SUBGRAPH_ID.clone(), + ) + .expect("Test schema invalid"); } struct MockStore { @@ -74,7 +72,7 @@ impl ReadStore for MockStore { Ok(self.get_many_res.clone()) } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { SCHEMA.clone() } } @@ -87,7 +85,7 @@ impl DeploymentCursorTracker for MockStore { unimplemented!() } - fn input_schema(&self) -> Arc { + fn input_schema(&self) -> InputSchema { todo!() } } diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index dae161d6cad..5aaaf9cf170 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -584,7 +584,7 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { async fn build_substreams( &self, _chain: &C, - _schema: Arc, + _schema: InputSchema, _deployment: DeploymentLocator, _block_cursor: FirehoseCursor, _subgraph_current_block: Option, @@ -623,7 +623,7 @@ where async fn build_substreams( &self, _chain: &C, - _schema: Arc, + _schema: InputSchema, _deployment: DeploymentLocator, _block_cursor: FirehoseCursor, _subgraph_current_block: Option, From 34790b47ca26f6ef19ed4f3e230e08fccbee97e2 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 15:19:23 -0700 Subject: [PATCH 0437/2104] graph: Make EntityType carry a reference to InputSchema --- graph/src/data/store/mod.rs | 2 +- graph/src/schema/entity_type.rs | 86 +++++++++++++++++++++++++------- graph/src/schema/input_schema.rs | 14 +++--- 3 files changed, 74 insertions(+), 28 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 559f1a11e8a..e223cbe8cc9 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -931,7 +931,7 @@ impl Entity { for field in self.0.atoms() { if !schema.has_field(&key.entity_type, field) { return Err(EntityValidationError::FieldsNotDefined { - entity: key.entity_type.clone().into_string(), + entity: key.entity_type.to_string(), }); } } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 04a837e5376..163aadbc504 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -4,50 +4,61 @@ use anyhow::{bail, Error}; use serde::Serialize; use crate::{ - cheap_clone::CheapClone, - data::{graphql::ObjectOrInterface, value::Word}, - prelude::s, - util::intern::AtomPool, + cheap_clone::CheapClone, data::graphql::ObjectOrInterface, prelude::s, util::intern::Atom, }; +use super::{input_schema::POI_OBJECT, InputSchema}; + /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` -#[derive(Clone, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct EntityType(Word); +/// +/// Even though it is not implemented as a string type, it behaves as if it +/// were the string name of the type for all external purposes like +/// comparison, ordering, and serialization +#[derive(Clone)] +pub struct EntityType { + schema: InputSchema, + atom: Atom, +} impl EntityType { /// Construct a new entity type. Ideally, this is only called when /// `entity_type` either comes from the GraphQL schema, or from /// the database from fields that are known to contain a valid entity type - pub(crate) fn new(pool: &Arc, entity_type: &str) -> Result { - match pool.lookup(entity_type) { - Some(_) => Ok(EntityType(Word::from(entity_type))), - None => bail!("entity type `{}` is not interned", entity_type), - } + // This method is only meant to be used in `InputSchema`; all external + // constructions of an `EntityType` need to go through that struct + pub(in crate::schema) fn new(schema: InputSchema, name: &str) -> Result { + let atom = match schema.pool().lookup(name) { + Some(atom) => atom, + None => bail!("entity type `{name}` is not interned"), + }; + + Ok(EntityType { schema, atom }) } pub fn as_str(&self) -> &str { - self.0.as_str() + // unwrap: we constructed the entity type from the schema's pool + self.schema.pool().get(self.atom).unwrap() } - pub fn into_string(self) -> String { - self.0.to_string() + pub fn is_poi(&self) -> bool { + self.as_str() == POI_OBJECT } - pub fn is_poi(&self) -> bool { - self.0.as_str() == "Poi$" + fn same_pool(&self, other: &EntityType) -> bool { + Arc::ptr_eq(self.schema.pool(), other.schema.pool()) } } impl fmt::Display for EntityType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) + write!(f, "{}", self.as_str()) } } impl Borrow for EntityType { fn borrow(&self) -> &str { - &self.0 + self.as_str() } } @@ -55,10 +66,47 @@ impl CheapClone for EntityType {} impl std::fmt::Debug for EntityType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "EntityType({})", self.0) + write!(f, "EntityType({})", self.as_str()) + } +} + +impl Serialize for EntityType { + fn serialize(&self, serializer: S) -> Result { + self.as_str().serialize(serializer) + } +} + +impl PartialEq for EntityType { + fn eq(&self, other: &Self) -> bool { + if self.same_pool(other) && self.atom == other.atom { + return true; + } + self.as_str() == other.as_str() + } +} + +impl Eq for EntityType {} + +impl PartialOrd for EntityType { + fn partial_cmp(&self, other: &Self) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl Ord for EntityType { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl std::hash::Hash for EntityType { + fn hash(&self, state: &mut H) { + self.as_str().hash(state) } } +/// A trait to mark types that can reasonably turned into the name of an +/// entity type pub trait AsEntityTypeName { fn name(&self) -> &str; } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index d03f638f26a..b58a22f4b69 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -22,7 +22,7 @@ use super::fulltext::FulltextDefinition; use super::{ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError}; /// The name of the PoI entity type -const POI_OBJECT: &str = "Poi$"; +pub(crate) const POI_OBJECT: &str = "Poi$"; /// The name of the digest attribute of POI entities const POI_DIGEST: &str = "digest"; @@ -401,18 +401,16 @@ impl InputSchema { } pub fn poi_type(&self) -> EntityType { - EntityType::new(&self.inner.pool, POI_OBJECT).unwrap() + // unwrap: we make sure to put POI_OBJECT into the pool + EntityType::new(self.cheap_clone(), POI_OBJECT).unwrap() } pub fn poi_digest(&self) -> Word { Word::from(POI_DIGEST) } - pub fn atom(&self, s: &str) -> Option { - self.inner.pool.lookup(s) - } - - pub fn pool(&self) -> &Arc { + // A helper for the `EntityType` constructor + pub(in crate::schema) fn pool(&self) -> &Arc { &self.inner.pool } @@ -421,7 +419,7 @@ impl InputSchema { /// of `named` is based on user input. If `named` is an internal object, /// like a `ObjectType`, it is safe to unwrap the result pub fn entity_type(&self, named: N) -> Result { - EntityType::new(&self.inner.pool, named.name()) + EntityType::new(self.cheap_clone(), named.name()) } } From a58f1263d3c7f58b0c1e0262e3c4db6e33da9d24 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 15:34:55 -0700 Subject: [PATCH 0438/2104] graph, store: Move some functions from InputSchema to EntityType This makes code that uses them much easier to read --- chain/substreams/src/mapper.rs | 2 +- graph/src/components/store/entity_cache.rs | 2 +- graph/src/components/store/write.rs | 14 ++++---------- graph/src/data/store/mod.rs | 2 +- graph/src/schema/entity_type.rs | 17 ++++++++++++++++- graph/src/schema/input_schema.rs | 15 ++++++++------- store/postgres/src/writable.rs | 1 - 7 files changed, 31 insertions(+), 22 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index ca5eb8a1156..6633a1e1bd4 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -142,7 +142,7 @@ fn parse_changes( // Needless to say, this is a very ugly hack, and the // real fix is what's described in [this // issue](https://github.com/graphprotocol/graph-node/issues/4663) - let entity_id: String = match schema.id_type(&entity_type)? { + let entity_id: String = match entity_type.id_type()? { IdType::String => entity_change.id.clone(), IdType::Bytes => { if entity_change.id.starts_with("0x") { diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 95dc39331de..2270477b155 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -412,7 +412,7 @@ impl EntityCache { // is wrong and the store already has a version of the entity from a // previous block, the attempt to insert will trigger a constraint // violation in the database, ensuring correctness - let missing = missing.filter(|key| !self.schema.is_immutable(&key.entity_type)); + let missing = missing.filter(|key| !key.entity_type.is_immutable()); for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { self.current.insert(entity_key, Some(entity)); diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 88ba70116d9..94b1709697b 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -9,7 +9,6 @@ use crate::{ data::{subgraph::schema::SubgraphError, value::Word}, data_source::CausalityRegion, prelude::DeploymentHash, - schema::InputSchema, util::cache_weight::CacheWeight, }; @@ -524,16 +523,12 @@ impl<'a> Iterator for ClampsByBlockIterator<'a> { /// A list of entity changes with one group per entity type #[derive(Debug)] pub struct RowGroups { - schema: InputSchema, pub groups: Vec, } impl RowGroups { - fn new(schema: InputSchema) -> Self { - Self { - schema, - groups: Vec::new(), - } + fn new() -> Self { + Self { groups: Vec::new() } } fn group(&self, entity_type: &EntityType) -> Option<&RowGroup> { @@ -552,7 +547,7 @@ impl RowGroups { match pos { Some(pos) => &mut self.groups[pos], None => { - let immutable = self.schema.is_immutable(entity_type); + let immutable = entity_type.is_immutable(); self.groups .push(RowGroup::new(entity_type.clone(), immutable)); // unwrap: we just pushed an entry @@ -633,7 +628,6 @@ pub struct Batch { impl Batch { pub fn new( - schema: InputSchema, block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, mut raw_mods: Vec, @@ -654,7 +648,7 @@ impl Batch { EntityModification::Remove { .. } => 0, }); - let mut mods = RowGroups::new(schema); + let mut mods = RowGroups::new(); for m in raw_mods { mods.group_entry(&m.key().entity_type).push(m, block)?; diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index e223cbe8cc9..f3ac88d1f39 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -929,7 +929,7 @@ impl Entity { })?; for field in self.0.atoms() { - if !schema.has_field(&key.entity_type, field) { + if !key.entity_type.has_field(field) { return Err(EntityValidationError::FieldsNotDefined { entity: key.entity_type.to_string(), }); diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 163aadbc504..46344b1fb94 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -4,7 +4,10 @@ use anyhow::{bail, Error}; use serde::Serialize; use crate::{ - cheap_clone::CheapClone, data::graphql::ObjectOrInterface, prelude::s, util::intern::Atom, + cheap_clone::CheapClone, + data::{graphql::ObjectOrInterface, store::IdType}, + prelude::s, + util::intern::Atom, }; use super::{input_schema::POI_OBJECT, InputSchema}; @@ -45,6 +48,18 @@ impl EntityType { self.as_str() == POI_OBJECT } + pub fn has_field(&self, field: Atom) -> bool { + self.schema.has_field(self.atom, field) + } + + pub fn is_immutable(&self) -> bool { + self.schema.is_immutable(self.atom) + } + + pub fn id_type(&self) -> Result { + self.schema.id_type(self) + } + fn same_pool(&self, other: &EntityType) -> bool { Arc::ptr_eq(self.schema.pool(), other.schema.pool()) } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index b58a22f4b69..998f406101a 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -245,7 +245,10 @@ impl InputSchema { } } - pub fn id_type(&self, entity_type: &EntityType) -> Result { + pub(in crate::schema) fn id_type( + &self, + entity_type: &EntityType, + ) -> Result { let base_type = self .inner .schema @@ -283,9 +286,8 @@ impl InputSchema { } } - pub fn is_immutable(&self, entity_type: &EntityType) -> bool { - let atom = self.inner.pool.lookup(entity_type.as_str()).unwrap(); - self.inner.immutable_types.contains(&atom) + pub(in crate::schema) fn is_immutable(&self, entity_type: Atom) -> bool { + self.inner.immutable_types.contains(&entity_type) } pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { @@ -391,11 +393,10 @@ impl InputSchema { Entity::try_make(self.inner.pool.clone(), iter) } - pub fn has_field(&self, entity_type: &EntityType, field: Atom) -> bool { - let atom = self.inner.pool.lookup(entity_type.as_str()).unwrap(); + pub(in crate::schema) fn has_field(&self, entity_type: Atom, field: Atom) -> bool { self.inner .field_names - .get(&atom) + .get(&entity_type) .map(|fields| fields.contains(&field)) .unwrap_or(false) } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 5e1a36fd80b..26f884c351b 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1537,7 +1537,6 @@ impl WritableStoreTrait for WritableStore { is_non_fatal_errors_active: bool, ) -> Result<(), StoreError> { let batch = Batch::new( - self.store.input_schema.cheap_clone(), block_ptr_to.clone(), firehose_cursor.clone(), mods, From 339eda2307784e30477ebbea27a70f11850e4f06 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 15:55:18 -0700 Subject: [PATCH 0439/2104] all: Move EntityKey to graph::schema --- chain/substreams/src/chain.rs | 3 +- chain/substreams/src/mapper.rs | 3 +- core/src/subgraph/runner.rs | 5 +- core/src/subgraph/state.rs | 2 +- graph/src/components/store/entity_cache.rs | 4 +- graph/src/components/store/mod.rs | 57 +---------------- graph/src/components/subgraph/instance.rs | 3 +- graph/src/data/store/mod.rs | 4 +- graph/src/runtime/gas/size_of.rs | 4 +- graph/src/schema/ast.rs | 3 +- graph/src/schema/entity_key.rs | 62 +++++++++++++++++++ graph/src/schema/input_schema.rs | 4 +- graph/src/schema/mod.rs | 2 + graph/src/util/cache_weight.rs | 3 +- runtime/test/src/test.rs | 2 +- runtime/wasm/src/host_exports.rs | 3 +- store/postgres/src/deployment_store.rs | 4 +- store/postgres/src/relational.rs | 4 +- store/postgres/src/relational_queries.rs | 4 +- store/postgres/src/writable.rs | 6 +- store/test-store/src/store.rs | 6 +- store/test-store/tests/graph/entity_cache.rs | 6 +- store/test-store/tests/graphql/query.rs | 3 +- store/test-store/tests/postgres/graft.rs | 6 +- store/test-store/tests/postgres/relational.rs | 3 +- .../tests/postgres/relational_bytes.rs | 3 +- store/test-store/tests/postgres/store.rs | 4 +- store/test-store/tests/postgres/writable.rs | 4 +- 28 files changed, 109 insertions(+), 108 deletions(-) create mode 100644 graph/src/schema/entity_key.rs diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 0c05d7fea54..1fdd26f96cc 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -5,9 +5,10 @@ use graph::blockchain::client::ChainClient; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter, }; -use graph::components::store::{DeploymentCursorTracker, EntityKey}; +use graph::components::store::DeploymentCursorTracker; use graph::firehose::FirehoseEndpoints; use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; +use graph::schema::EntityKey; use graph::{ blockchain::{ self, diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 6633a1e1bd4..0402983d175 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -6,14 +6,13 @@ use crate::{codec, Block, Chain, EntityChanges, ParsedChanges, TriggerData}; use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; -use graph::components::store::EntityKey; use graph::data::store::scalar::Bytes; use graph::data::store::IdType; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::BigDecimal; use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, BlockPtr, Logger, Value}; -use graph::schema::InputSchema; +use graph::schema::{EntityKey, InputSchema}; use graph::slog::o; use graph::substreams::Clock; use graph::substreams_rpc::response::Message as SubstreamsMessage; diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 7a5146934e8..30e54adad5a 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -6,9 +6,7 @@ use crate::subgraph::stream::new_block_stream; use atomic_refcell::AtomicRefCell; use graph::blockchain::block_stream::{BlockStreamEvent, BlockWithTriggers, FirehoseCursor}; use graph::blockchain::{Block, Blockchain, DataSource as _, TriggerFilter as _}; -use graph::components::store::{ - EmptyStore, EntityKey, GetScope, ReadStore, StoredDynamicDataSource, -}; +use graph::components::store::{EmptyStore, GetScope, ReadStore, StoredDynamicDataSource}; use graph::components::{ store::ModificationsAndCache, subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, @@ -23,6 +21,7 @@ use graph::data_source::{ }; use graph::env::EnvVars; use graph::prelude::*; +use graph::schema::EntityKey; use graph::util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}; use std::sync::Arc; use std::time::{Duration, Instant}; diff --git a/core/src/subgraph/state.rs b/core/src/subgraph/state.rs index 0d5edd84b65..a2c9db4de09 100644 --- a/core/src/subgraph/state.rs +++ b/core/src/subgraph/state.rs @@ -1,6 +1,6 @@ use graph::{ - components::store::EntityKey, prelude::Entity, + schema::EntityKey, util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}, }; use std::time::Instant; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 2270477b155..c24d342cf75 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -6,10 +6,10 @@ use std::sync::Arc; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; -use crate::components::store::{self as s, Entity, EntityKey, EntityOperation}; +use crate::components::store::{self as s, Entity, EntityOperation}; use crate::data::store::{EntityValidationError, IntoEntityIterator}; use crate::prelude::ENV_VARS; -use crate::schema::InputSchema; +use crate::schema::{EntityKey, InputSchema}; use crate::util::intern::Error as InternError; use crate::util::lfu_cache::{EvictStats, LfuCache}; diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index fc02d69441e..0f65c30da02 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -36,8 +36,7 @@ use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::env::ENV_VARS; use crate::prelude::{Attribute, DeploymentHash, SubscriptionFilter, ValueType}; -use crate::schema::{EntityType, InputSchema}; -use crate::util::intern; +use crate::schema::{EntityKey, EntityType, InputSchema}; use crate::util::stats::MovingStats; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -53,39 +52,6 @@ impl EntityFilterDerivative { } } -/// Key by which an individual entity in the store can be accessed. Stores -/// only the entity type and id. The deployment must be known from context. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct EntityKey { - /// Name of the entity type. - pub entity_type: EntityType, - - /// ID of the individual entity. - pub entity_id: Word, - - /// This is the causality region of the data source that created the entity. - /// - /// In the case of an entity lookup, this is the causality region of the data source that is - /// doing the lookup. So if the entity exists but was created on a different causality region, - /// the lookup will return empty. - pub causality_region: CausalityRegion, -} - -impl EntityKey { - pub fn unknown_attribute(&self, err: intern::Error) -> StoreError { - StoreError::UnknownAttribute(self.entity_type.to_string(), err.not_interned()) - } -} - -impl std::fmt::Debug for EntityKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "EntityKey({}[{}], cr={})", - self.entity_type, self.entity_id, self.causality_region - ) - } -} #[derive(Debug, Clone)] pub struct LoadRelatedRequest { /// Name of the entity type. @@ -138,27 +104,6 @@ impl DerivedEntityQuery { } } -impl EntityKey { - // For use in tests only - #[cfg(debug_assertions)] - pub fn onchain(entity_type: &EntityType, entity_id: impl Into) -> Self { - Self { - entity_type: entity_type.clone(), - entity_id: entity_id.into().into(), - causality_region: CausalityRegion::ONCHAIN, - } - } - - pub fn from(id: &String, load_related_request: &LoadRelatedRequest) -> Self { - let clone = load_related_request.clone(); - Self { - entity_id: id.clone().into(), - entity_type: clone.entity_type, - causality_region: clone.causality_region, - } - } -} - #[derive(Clone, Debug, PartialEq)] pub struct Child { pub attr: Attribute, diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index d5592aeb902..8634d5725f7 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -1,9 +1,10 @@ use crate::{ blockchain::Blockchain, - components::store::{EntityKey, ReadStore, StoredDynamicDataSource}, + components::store::{ReadStore, StoredDynamicDataSource}, data::subgraph::schema::SubgraphError, data_source::DataSourceTemplate, prelude::*, + schema::EntityKey, util::lfu_cache::LfuCache, }; diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index f3ac88d1f39..064edade52e 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1,9 +1,9 @@ use crate::{ - components::store::{DeploymentLocator, EntityKey}, + components::store::DeploymentLocator, data::graphql::ObjectTypeExt, prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, - schema::{EntityType, InputSchema}, + schema::{EntityKey, EntityType, InputSchema}, util::intern::{self, AtomPool}, util::intern::{Error as InternError, NullValue, Object}, }; diff --git a/graph/src/runtime/gas/size_of.rs b/graph/src/runtime/gas/size_of.rs index bab0ed36d46..6d40816394b 100644 --- a/graph/src/runtime/gas/size_of.rs +++ b/graph/src/runtime/gas/size_of.rs @@ -1,9 +1,9 @@ //! Various implementations of GasSizeOf; use crate::{ - components::store::{EntityKey, LoadRelatedRequest}, + components::store::LoadRelatedRequest, data::store::{scalar::Bytes, Value}, - schema::EntityType, + schema::{EntityKey, EntityType}, }; use super::{Gas, GasSizeOf, SaturatingInto as _}; diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 9c7c931dc27..33c9d1b683f 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -405,11 +405,10 @@ pub fn is_list(field_type: &s::Type) -> bool { #[test] fn entity_validation() { - use crate::components::store::EntityKey; use crate::data::store; use crate::entity; use crate::prelude::{DeploymentHash, Entity}; - use crate::schema::{EntityType, InputSchema}; + use crate::schema::{EntityKey, EntityType, InputSchema}; const DOCUMENT: &str = " enum Color { red, yellow, blue } diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs new file mode 100644 index 00000000000..be9be42b918 --- /dev/null +++ b/graph/src/schema/entity_key.rs @@ -0,0 +1,62 @@ +use std::fmt; + +use crate::components::store::{LoadRelatedRequest, StoreError}; +use crate::data::value::Word; +use crate::data_source::CausalityRegion; +use crate::schema::EntityType; +use crate::util::intern; + +/// Key by which an individual entity in the store can be accessed. Stores +/// only the entity type and id. The deployment must be known from context. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct EntityKey { + /// Name of the entity type. + pub entity_type: EntityType, + + /// ID of the individual entity. + pub entity_id: Word, + + /// This is the causality region of the data source that created the entity. + /// + /// In the case of an entity lookup, this is the causality region of the data source that is + /// doing the lookup. So if the entity exists but was created on a different causality region, + /// the lookup will return empty. + pub causality_region: CausalityRegion, +} + +impl EntityKey { + pub fn unknown_attribute(&self, err: intern::Error) -> StoreError { + StoreError::UnknownAttribute(self.entity_type.to_string(), err.not_interned()) + } +} + +impl EntityKey { + // For use in tests only + #[cfg(debug_assertions)] + pub fn onchain(entity_type: &EntityType, entity_id: impl Into) -> Self { + Self { + entity_type: entity_type.clone(), + entity_id: entity_id.into().into(), + causality_region: CausalityRegion::ONCHAIN, + } + } + + pub fn from(id: &String, load_related_request: &LoadRelatedRequest) -> Self { + let clone = load_related_request.clone(); + Self { + entity_id: id.clone().into(), + entity_type: clone.entity_type, + causality_region: clone.causality_region, + } + } +} + +impl std::fmt::Debug for EntityKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "EntityKey({}[{}], cr={})", + self.entity_type, self.entity_id, self.causality_region + ) + } +} diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 998f406101a..751e2e18503 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Error}; use store::Entity; use crate::cheap_clone::CheapClone; -use crate::components::store::{EntityKey, LoadRelatedRequest}; +use crate::components::store::LoadRelatedRequest; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; use crate::data::store::{ @@ -19,7 +19,7 @@ use crate::schema::api_schema; use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError}; +use super::{ApiSchema, AsEntityTypeName, EntityKey, EntityType, Schema, SchemaValidationError}; /// The name of the PoI entity type pub(crate) const POI_OBJECT: &str = "Poi$"; diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index cb054d36a43..f5106c6d783 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -27,6 +27,7 @@ mod api; /// Utilities for working with GraphQL schema ASTs. pub mod ast; +mod entity_key; mod entity_type; mod fulltext; mod input_schema; @@ -34,6 +35,7 @@ mod input_schema; pub use api::{api_schema, is_introspection_field, APISchemaError, INTROSPECTION_QUERY_TYPE}; pub use api::{ApiSchema, ErrorPolicy}; +pub use entity_key::EntityKey; pub use entity_type::{AsEntityTypeName, EntityType}; pub use fulltext::{FulltextAlgorithm, FulltextConfig, FulltextDefinition, FulltextLanguage}; pub use input_schema::InputSchema; diff --git a/graph/src/util/cache_weight.rs b/graph/src/util/cache_weight.rs index 18e24bcbd1c..73e207b963a 100644 --- a/graph/src/util/cache_weight.rs +++ b/graph/src/util/cache_weight.rs @@ -1,8 +1,7 @@ use crate::{ - components::store::EntityKey, data::value::Word, prelude::{q, BigDecimal, BigInt, Value}, - schema::EntityType, + schema::{EntityKey, EntityType}, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 19b861db677..38168d12ed8 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -5,7 +5,7 @@ use graph::prelude::web3::types::U256; use graph::runtime::gas::GasCounter; use graph::runtime::{AscIndexId, AscType, HostExportError}; use graph::runtime::{AscPtr, ToAscObj}; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph::{components::store::*, ipfs_client::IpfsClient}; use graph::{entity, prelude::*}; use graph_chain_ethereum::{Chain, DataSource}; diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 6b29048e957..186dd6566bf 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -6,14 +6,13 @@ use std::time::{Duration, Instant}; use graph::data::value::Word; -use graph::schema::EntityType; +use graph::schema::{EntityKey, EntityType}; use never::Never; use semver::Version; use wasmtime::Trap; use web3::types::H160; use graph::blockchain::Blockchain; -use graph::components::store::EntityKey; use graph::components::store::{EnsLookup, GetScope, LoadRelatedRequest}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 702ee6bb198..e0d9d485668 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -7,7 +7,7 @@ use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::write::RowGroup; use graph::components::store::{ - Batch, DerivedEntityQuery, EntityKey, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, + Batch, DerivedEntityQuery, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, StoredDynamicDataSource, VersionStats, }; use graph::components::versions::VERSIONS; @@ -43,7 +43,7 @@ use graph::prelude::{ DeploymentHash, DeploymentState, Entity, EntityQuery, Error, Logger, QueryExecutionError, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, Value, ENV_VARS, }; -use graph::schema::{ApiSchema, EntityType, InputSchema}; +use graph::schema::{ApiSchema, EntityKey, EntityType, InputSchema}; use web3::types::Address; use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index bc7121a8e1c..3e2b37b0e66 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -32,7 +32,7 @@ use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; use graph::schema::{ - EntityType, FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME, + EntityKey, EntityType, FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME, }; use graph::slog::warn; use inflector::Inflector; @@ -54,7 +54,7 @@ use crate::{ FilterQuery, FindManyQuery, FindQuery, InsertQuery, RevertClampQuery, RevertRemoveQuery, }, }; -use graph::components::store::{DerivedEntityQuery, EntityKey}; +use graph::components::store::DerivedEntityQuery; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::POI_TABLE; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index ca2d7024b10..e43ee0f4f56 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -13,7 +13,7 @@ use diesel::sql_types::{Array, BigInt, Binary, Bool, Int8, Integer, Jsonb, Range use diesel::Connection; use graph::components::store::write::WriteChunk; -use graph::components::store::{DerivedEntityQuery, EntityKey}; +use graph::components::store::DerivedEntityQuery; use graph::data::store::{NULL, PARENT_ID}; use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; @@ -22,7 +22,7 @@ use graph::prelude::{ EntityFilter, EntityLink, EntityOrder, EntityOrderByChild, EntityOrderByChildInfo, EntityRange, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, ENV_VARS, }; -use graph::schema::{EntityType, FulltextAlgorithm, InputSchema}; +use graph::schema::{EntityKey, EntityType, FulltextAlgorithm, InputSchema}; use graph::{components::store::AttributeNames, data::store::scalar}; use inflector::Inflector; use itertools::Itertools; diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 26f884c351b..eed3dbff0a9 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -7,9 +7,7 @@ use std::time::{Duration, Instant}; use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::{ - Batch, DeploymentCursorTracker, DerivedEntityQuery, EntityKey, ReadStore, -}; +use graph::components::store::{Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore}; use graph::constraint_violation; use graph::data::store::scalar::Bytes; use graph::data::store::Value; @@ -19,7 +17,7 @@ use graph::prelude::{ BlockNumber, CacheWeight, Entity, MetricsRegistry, SubgraphDeploymentEntity, SubgraphStore as _, BLOCK_NUMBER_MAX, }; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph::slog::{info, warn}; use graph::tokio::select; use graph::tokio::sync::Notify; diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 8e146c92f8f..44080aedf6a 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -9,14 +9,14 @@ use graph::data_source::CausalityRegion; use graph::data_source::DataSource; use graph::log; use graph::prelude::{QueryStoreManager as _, SubgraphStore as _, *}; +use graph::schema::EntityKey; use graph::schema::EntityType; use graph::schema::InputSchema; use graph::semver::Version; use graph::{ blockchain::block_stream::FirehoseCursor, blockchain::ChainIdentifier, - components::store::DeploymentLocator, components::store::EntityKey, - components::store::StatusStore, components::store::StoredDynamicDataSource, - data::subgraph::status, prelude::NodeId, + components::store::DeploymentLocator, components::store::StatusStore, + components::store::StoredDynamicDataSource, data::subgraph::status, prelude::NodeId, }; use graph_graphql::prelude::{ execute_query, Query as PreparedQuery, QueryExecutionOptions, StoreResolver, diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 88320f06f28..31a98fefb1d 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -1,12 +1,12 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{ - DeploymentCursorTracker, DerivedEntityQuery, EntityKey, GetScope, LoadRelatedRequest, - ReadStore, StoredDynamicDataSource, WritableStore, + DeploymentCursorTracker, DerivedEntityQuery, GetScope, LoadRelatedRequest, ReadStore, + StoredDynamicDataSource, WritableStore, }; use graph::data::store::PARENT_ID; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph::{ components::store::{DeploymentId, DeploymentLocator}, prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 75de8fe3681..9e020d1fe15 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -1,8 +1,7 @@ -use graph::components::store::EntityKey; use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; use graph::prelude::SubscriptionResult; -use graph::schema::InputSchema; +use graph::schema::{EntityKey, InputSchema}; use graphql_parser::Pos; use std::iter::FromIterator; use std::sync::atomic::{AtomicBool, Ordering}; diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 2b55ce9185c..fd2a7bff95e 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,14 +1,14 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::value::Word; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; use std::{marker::PhantomData, str::FromStr}; use test_store::*; use graph::components::store::{ - DeploymentLocator, EntityKey, EntityOrder, EntityQuery, PruneReporter, PruneRequest, - PruningStrategy, VersionStats, + DeploymentLocator, EntityOrder, EntityQuery, PruneReporter, PruneRequest, PruningStrategy, + VersionStats, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 2acd3953884..0d028dd7a2c 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -1,7 +1,6 @@ //! Test mapping of GraphQL schema to a relational schema use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; -use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data::value::Word; use graph::entity; @@ -10,7 +9,7 @@ use graph::prelude::{ EntityOrder, EntityQuery, Logger, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; use graph::prelude::{BlockNumber, MetricsRegistry}; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph_store_postgres::layout_for_tests::set_account_like; use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index ed347aa4f26..5434178006a 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -2,13 +2,12 @@ use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; use graph::components::store::write::RowGroup; -use graph::components::store::EntityKey; use graph::data::store::scalar; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry, StoreError}; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use hex_literal::hex; use lazy_static::lazy_static; use std::collections::BTreeSet; diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index d6d9b35ba46..ba5bb463d96 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -2,7 +2,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use graph_chain_ethereum::{Mapping, MappingABI}; use hex_literal::hex; use lazy_static::lazy_static; @@ -11,7 +11,7 @@ use std::{collections::HashSet, sync::Mutex}; use std::{marker::PhantomData, str::FromStr}; use test_store::*; -use graph::components::store::{DeploymentLocator, EntityKey, ReadStore, WritableStore}; +use graph::components::store::{DeploymentLocator, ReadStore, WritableStore}; use graph::data::subgraph::*; use graph::{ blockchain::DataSource, diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index e2bf3056ac1..a0190f672c9 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -2,13 +2,13 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::subgraph::schema::DeploymentCreate; use graph::data::value::Word; use graph::data_source::CausalityRegion; -use graph::schema::{EntityType, InputSchema}; +use graph::schema::{EntityKey, EntityType, InputSchema}; use lazy_static::lazy_static; use std::collections::BTreeSet; use std::marker::PhantomData; use test_store::*; -use graph::components::store::{DeploymentLocator, DerivedEntityQuery, EntityKey, WritableStore}; +use graph::components::store::{DeploymentLocator, DerivedEntityQuery, WritableStore}; use graph::data::subgraph::*; use graph::semver::Version; use graph::{entity, prelude::*}; From 691079471f70fdc412ea20f272c7539f0fc066d5 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 16:16:09 -0700 Subject: [PATCH 0440/2104] graph, runtime, store: Replace EntityKey::onchain with EntityType.key --- graph/src/components/store/write.rs | 13 +++-- graph/src/data/store/mod.rs | 2 +- graph/src/schema/ast.rs | 4 +- graph/src/schema/entity_key.rs | 14 ++--- graph/src/schema/entity_type.rs | 15 +++++- runtime/test/src/test.rs | 4 +- store/test-store/tests/graph/entity_cache.rs | 14 ++--- store/test-store/tests/graphql/query.rs | 4 +- store/test-store/tests/postgres/graft.rs | 6 +-- store/test-store/tests/postgres/relational.rs | 52 +++++-------------- .../tests/postgres/relational_bytes.rs | 16 +++--- store/test-store/tests/postgres/store.rs | 34 ++++++------ store/test-store/tests/postgres/writable.rs | 2 +- 13 files changed, 82 insertions(+), 98 deletions(-) diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 94b1709697b..232a5122930 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -897,8 +897,7 @@ impl<'a> Iterator for WriteChunkIter<'a> { mod test { use crate::{ components::store::{ - write::EntityModification, write::EntityOp, BlockNumber, EntityKey, EntityType, - StoreError, + write::EntityModification, write::EntityOp, BlockNumber, EntityType, StoreError, }, entity, prelude::DeploymentHash, @@ -916,7 +915,7 @@ mod test { .iter() .zip(blocks.iter()) .map(|(value, block)| EntityModification::Remove { - key: EntityKey::onchain(&*ROW_GROUP_TYPE, value.to_string()), + key: ROW_GROUP_TYPE.key(value.to_string()), block: *block, }) .collect(); @@ -989,7 +988,7 @@ mod test { use Mod::*; let value = value.clone(); - let key = EntityKey::onchain(&*THING_TYPE, "one"); + let key = THING_TYPE.key("one"); match value { Ins(block) => EntityModification::Insert { key, @@ -1093,7 +1092,7 @@ mod test { fn last_op() { #[track_caller] fn is_remove(group: &RowGroup, at: BlockNumber) { - let key = EntityKey::onchain(&*THING_TYPE, "one"); + let key = THING_TYPE.key("one"); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1105,7 +1104,7 @@ mod test { } #[track_caller] fn is_write(group: &RowGroup, at: BlockNumber) { - let key = EntityKey::onchain(&*THING_TYPE, "one"); + let key = THING_TYPE.key("one"); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1118,7 +1117,7 @@ mod test { use Mod::*; - let key = EntityKey::onchain(&*THING_TYPE, "one"); + let key = THING_TYPE.key("one"); // This will result in two mods int the group: // [ InsC(1,2), InsC(2,3) ] diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 064edade52e..b12b87c29ae 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -1108,7 +1108,7 @@ fn entity_validation() { fn check(thing: Entity, errmsg: &str) { let id = thing.id(); - let key = EntityKey::onchain(&*THING_TYPE, id.clone()); + let key = THING_TYPE.key(id.clone()); let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { diff --git a/graph/src/schema/ast.rs b/graph/src/schema/ast.rs index 33c9d1b683f..633968ca45c 100644 --- a/graph/src/schema/ast.rs +++ b/graph/src/schema/ast.rs @@ -408,7 +408,7 @@ fn entity_validation() { use crate::data::store; use crate::entity; use crate::prelude::{DeploymentHash, Entity}; - use crate::schema::{EntityKey, EntityType, InputSchema}; + use crate::schema::{EntityType, InputSchema}; const DOCUMENT: &str = " enum Color { red, yellow, blue } @@ -440,7 +440,7 @@ fn entity_validation() { fn check(thing: Entity, errmsg: &str) { let id = thing.id(); - let key = EntityKey::onchain(&*THING_TYPE, id.clone()); + let key = THING_TYPE.key(id.clone()); let err = thing.validate(&SCHEMA, &key); if errmsg.is_empty() { diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index be9be42b918..873939db48e 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -31,13 +31,15 @@ impl EntityKey { } impl EntityKey { - // For use in tests only - #[cfg(debug_assertions)] - pub fn onchain(entity_type: &EntityType, entity_id: impl Into) -> Self { + pub(in crate::schema) fn new( + entity_type: EntityType, + entity_id: Word, + causality_region: CausalityRegion, + ) -> Self { Self { - entity_type: entity_type.clone(), - entity_id: entity_id.into().into(), - causality_region: CausalityRegion::ONCHAIN, + entity_type, + entity_id, + causality_region, } } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 46344b1fb94..0a48a06afd2 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -5,12 +5,13 @@ use serde::Serialize; use crate::{ cheap_clone::CheapClone, - data::{graphql::ObjectOrInterface, store::IdType}, + data::{graphql::ObjectOrInterface, store::IdType, value::Word}, + data_source::causality_region::CausalityRegion, prelude::s, util::intern::Atom, }; -use super::{input_schema::POI_OBJECT, InputSchema}; +use super::{input_schema::POI_OBJECT, EntityKey, InputSchema}; /// The type name of an entity. This is the string that is used in the /// subgraph's GraphQL schema as `type NAME @entity { .. }` @@ -60,6 +61,16 @@ impl EntityType { self.schema.id_type(self) } + /// Create a key from this type for an onchain entity + pub fn key(&self, id: impl Into) -> EntityKey { + self.key_in(id, CausalityRegion::ONCHAIN) + } + + /// Create a key from this type for an entity in the given causality region + pub fn key_in(&self, id: impl Into, causality_region: CausalityRegion) -> EntityKey { + EntityKey::new(self.cheap_clone(), id.into(), causality_region) + } + fn same_pool(&self, other: &EntityType) -> bool { Arc::ptr_eq(self.schema.pool(), other.schema.pool()) } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 38168d12ed8..360a694aa07 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -5,7 +5,7 @@ use graph::prelude::web3::types::U256; use graph::runtime::gas::GasCounter; use graph::runtime::{AscIndexId, AscType, HostExportError}; use graph::runtime::{AscPtr, ToAscObj}; -use graph::schema::{EntityKey, EntityType, InputSchema}; +use graph::schema::{EntityType, InputSchema}; use graph::{components::store::*, ipfs_client::IpfsClient}; use graph::{entity, prelude::*}; use graph_chain_ethereum::{Chain, DataSource}; @@ -439,7 +439,7 @@ fn make_thing(id: &str, value: &str) -> (String, EntityModification) { static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA }; - let key = EntityKey::onchain(&*THING_TYPE, id); + let key = THING_TYPE.key(id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), EntityModification::insert(key, data, 0), diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 31a98fefb1d..576871f6e33 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -458,7 +458,7 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age }; EntityOperation::Set { - key: EntityKey::onchain(&*ACCOUNT_TYPE, id), + key: ACCOUNT_TYPE.key(id), data: test_entity, } } @@ -469,7 +469,7 @@ fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { let test_wallet = create_wallet_entity(id, account_id, balance); EntityOperation::Set { - key: EntityKey::onchain(&*WALLET_TYPE, id), + key: WALLET_TYPE.key(id), data: test_wallet, } } @@ -634,7 +634,7 @@ fn check_for_insert_async_not_related() { fn check_for_update_async_related() { run_store_test(|mut cache, store, deployment, writable| async move { let account_id = "1"; - let entity_key = EntityKey::onchain(&*WALLET_TYPE, "1"); + let entity_key = WALLET_TYPE.key("1"); let wallet_entity_update = create_wallet_operation("1", account_id, 79_i32); let new_data = match wallet_entity_update { @@ -671,7 +671,7 @@ fn check_for_update_async_related() { fn check_for_delete_async_related() { run_store_test(|mut cache, store, deployment, _writable| async move { let account_id = "1"; - let del_key = EntityKey::onchain(&*WALLET_TYPE, "1"); + let del_key = WALLET_TYPE.key("1"); // delete wallet transact_entity_operations( &store, @@ -701,12 +701,12 @@ fn check_for_delete_async_related() { fn scoped_get() { run_store_test(|mut cache, _store, _deployment, _writable| async move { // Key for an existing entity that is in the store - let key1 = EntityKey::onchain(&*WALLET_TYPE, "1"); + let key1 = WALLET_TYPE.key("1"); let wallet1 = create_wallet_entity("1", "1", 67); // Create a new entity that is not in the store let wallet5 = create_wallet_entity("5", "5", 100); - let key5 = EntityKey::onchain(&*WALLET_TYPE, "5"); + let key5 = WALLET_TYPE.key("5"); cache.set(key5.clone(), wallet5.clone()).unwrap(); // For the new entity, we can retrieve it with either scope @@ -748,7 +748,7 @@ fn no_internal_keys() { assert_eq!(None, entity.get("__typename")); assert_eq!(None, entity.get(&*PARENT_ID)); } - let key = EntityKey::onchain(&*WALLET_TYPE, "1"); + let key = WALLET_TYPE.key("1"); let wallet = writable.get(&key).unwrap().unwrap(); check(&wallet); diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 9e020d1fe15..967d7cfce5d 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -1,7 +1,7 @@ use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; use graph::prelude::SubscriptionResult; -use graph::schema::{EntityKey, InputSchema}; +use graph::schema::InputSchema; use graphql_parser::Pos; use std::iter::FromIterator; use std::sync::atomic::{AtomicBool, Ordering}; @@ -306,7 +306,7 @@ async fn insert_test_entities( .map(|(typename, entities)| { let entity_type = schema.entity_type(typename).unwrap(); entities.into_iter().map(move |data| EntityOperation::Set { - key: EntityKey::onchain(&entity_type, data.id()), + key: entity_type.key(data.id()), data, }) }) diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index fd2a7bff95e..a8550994060 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,6 +1,6 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::value::Word; -use graph::schema::{EntityKey, EntityType, InputSchema}; +use graph::schema::{EntityType, InputSchema}; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; use std::{marker::PhantomData, str::FromStr}; @@ -258,7 +258,7 @@ fn create_test_entity( let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(entity_type).unwrap(); EntityOperation::Set { - key: EntityKey::onchain(&entity_type, id), + key: entity_type.key(id), data: test_entity, } } @@ -322,7 +322,7 @@ async fn check_graft( // Make our own entries for block 2 shaq.set("email", "shaq@gmail.com").unwrap(); let op = EntityOperation::Set { - key: EntityKey::onchain(&*USER_TYPE, "3"), + key: USER_TYPE.key("3"), data: shaq, }; transact_and_wait(&store, &deployment, BLOCKS[2].clone(), vec![op]) diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 0d028dd7a2c..92f16f7a2f5 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -243,7 +243,7 @@ fn insert_entity_at( let entities_with_keys_owned = entities .drain(..) .map(|entity| { - let key = EntityKey::onchain(entity_type, entity.id()); + let key = entity_type.key(entity.id()); (key, entity) }) .collect::>(); @@ -282,7 +282,7 @@ fn update_entity_at( let entities_with_keys_owned: Vec<(EntityKey, Entity)> = entities .drain(..) .map(|entity| { - let key = EntityKey::onchain(entity_type, entity.id()); + let key = entity_type.key(entity.id()); (key, entity) }) .collect(); @@ -539,22 +539,14 @@ fn find() { // Happy path: find existing entity let entity = layout - .find( - conn, - &EntityKey::onchain(&*SCALAR_TYPE, "one"), - BLOCK_NUMBER_MAX, - ) + .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&SCALAR_ENTITY), entity); // Find non-existing entity let entity = layout - .find( - conn, - &EntityKey::onchain(&*SCALAR_TYPE, "noone"), - BLOCK_NUMBER_MAX, - ) + .find(conn, &SCALAR_TYPE.key("noone"), BLOCK_NUMBER_MAX) .expect("Failed to read Scalar[noone]"); assert!(entity.is_none()); }); @@ -572,11 +564,7 @@ fn insert_null_fulltext_fields() { // Find entity with null string values let entity = layout - .find( - conn, - &EntityKey::onchain(&*NULLABLE_STRINGS_TYPE, "one"), - BLOCK_NUMBER_MAX, - ) + .find(conn, &NULLABLE_STRINGS_TYPE.key("one"), BLOCK_NUMBER_MAX) .expect("Failed to read NullableStrings[one]") .unwrap(); assert_entity_eq!(scrub(&EMPTY_NULLABLESTRINGS_ENTITY), entity); @@ -593,7 +581,7 @@ fn update() { entity.set("string", "updated").unwrap(); entity.remove("strings"); entity.set("bool", Value::Null).unwrap(); - let key = EntityKey::onchain(&*SCALAR_TYPE, entity.id()); + let key = SCALAR_TYPE.key(entity.id()); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let entities = vec![(key, entity.clone())]; @@ -603,11 +591,7 @@ fn update() { .expect("Failed to update"); let actual = layout - .find( - conn, - &EntityKey::onchain(&*SCALAR_TYPE, "one"), - BLOCK_NUMBER_MAX, - ) + .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&entity), actual); @@ -647,7 +631,7 @@ fn update_many() { let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let keys: Vec = ["one", "two", "three"] .iter() - .map(|id| EntityKey::onchain(&*SCALAR_TYPE, *id)) + .map(|id| SCALAR_TYPE.key(*id)) .collect(); let entities_vec = vec![one, two, three]; @@ -662,11 +646,7 @@ fn update_many() { .iter() .map(|&id| { layout - .find( - conn, - &EntityKey::onchain(&*SCALAR_TYPE, id), - BLOCK_NUMBER_MAX, - ) + .find(conn, &SCALAR_TYPE.key(id), BLOCK_NUMBER_MAX) .unwrap_or_else(|_| panic!("Failed to read Scalar[{}]", id)) .unwrap() }) @@ -718,7 +698,7 @@ fn serialize_bigdecimal() { let d = BigDecimal::from_str(d).unwrap(); entity.set("bigDecimal", d).unwrap(); - let key = EntityKey::onchain(&*SCALAR_TYPE, entity.id()); + let key = SCALAR_TYPE.key(entity.id()); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let entities = vec![(key, entity.clone())]; let group = row_group_update(&entity_type, 0, entities); @@ -727,11 +707,7 @@ fn serialize_bigdecimal() { .expect("Failed to update"); let actual = layout - .find( - conn, - &EntityKey::onchain(&*SCALAR_TYPE, "one"), - BLOCK_NUMBER_MAX, - ) + .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(entity, actual); @@ -764,7 +740,7 @@ fn delete() { insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]); // Delete where nothing is getting deleted - let key = EntityKey::onchain(&*SCALAR_TYPE, "no such entity"); + let key = SCALAR_TYPE.key("no such entity"); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let mut entity_keys = vec![key]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); @@ -805,7 +781,7 @@ fn insert_many_and_delete_many() { // Delete entities with ids equal to "two" and "three" let entity_keys: Vec<_> = vec!["two", "three"] .into_iter() - .map(|key| EntityKey::onchain(&*SCALAR_TYPE, key)) + .map(|key| SCALAR_TYPE.key(key)) .collect(); let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); let num_removed = layout @@ -920,7 +896,7 @@ fn revert_block() { let assert_fred = |name: &str| { let fred = layout - .find(conn, &EntityKey::onchain(&*CAT_TYPE, id), BLOCK_NUMBER_MAX) + .find(conn, &CAT_TYPE.key(id), BLOCK_NUMBER_MAX) .unwrap() .expect("there's a fred"); assert_eq!(name, fred.get("name").unwrap().as_str().unwrap()) diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 5434178006a..fc372982528 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -119,7 +119,7 @@ pub fn row_group_delete( fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity: Entity) { let entity_type = layout.input_schema.entity_type(entity_type).unwrap(); - let key = EntityKey::onchain(&entity_type, entity.id()); + let key = entity_type.key(entity.id()); let entities = vec![(key.clone(), entity)]; let group = row_group_insert(&entity_type, 0, entities); @@ -216,7 +216,7 @@ fn bad_id() { layout: &Layout, id: &str, ) -> Result, StoreError> { - let key = EntityKey::onchain(&*THING_TYPE, id); + let key = THING_TYPE.key(id); layout.find(conn, &key, BLOCK_NUMBER_MAX) } @@ -256,7 +256,7 @@ fn bad_id() { fn find() { run_test(|conn, layout| { fn find_entity(conn: &PgConnection, layout: &Layout, id: &str) -> Option { - let key = EntityKey::onchain(&*THING_TYPE, id); + let key = THING_TYPE.key(id); layout .find(conn, &key, BLOCK_NUMBER_MAX) .expect(&format!("Failed to read Thing[{}]", id)) @@ -321,7 +321,7 @@ fn update() { // Update the entity let mut entity = BEEF_ENTITY.clone(); entity.set("name", "Moo").unwrap(); - let key = EntityKey::onchain(&*THING_TYPE, entity.id()); + let key = THING_TYPE.key(entity.id()); let entity_id = entity.id(); let entity_type = key.entity_type.clone(); @@ -332,11 +332,7 @@ fn update() { .expect("Failed to update"); let actual = layout - .find( - conn, - &EntityKey::onchain(&*THING_TYPE, entity_id), - BLOCK_NUMBER_MAX, - ) + .find(conn, &THING_TYPE.key(entity_id), BLOCK_NUMBER_MAX) .expect("Failed to read Thing[deadbeef]") .unwrap(); @@ -355,7 +351,7 @@ fn delete() { insert_entity(conn, layout, "Thing", two); // Delete where nothing is getting deleted - let key = EntityKey::onchain(&*THING_TYPE, "ffff".to_owned()); + let key = THING_TYPE.key("ffff"); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.clone()]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index ba5bb463d96..113a7eeb90c 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -2,7 +2,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; -use graph::schema::{EntityKey, EntityType, InputSchema}; +use graph::schema::{EntityType, InputSchema}; use graph_chain_ethereum::{Mapping, MappingABI}; use hex_literal::hex; use lazy_static::lazy_static; @@ -282,7 +282,7 @@ fn create_test_entity( }; EntityOperation::Set { - key: EntityKey::onchain(entity_type, id), + key: entity_type.key(id), data: test_entity, } } @@ -305,7 +305,7 @@ fn get_entity_count(store: Arc, subgraph_id: &DeploymentHash) -> u6 #[test] fn delete_entity() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::onchain(&*USER_TYPE, "3"); + let entity_key = USER_TYPE.key("3"); // Check that there is an entity to remove. writable.get(&entity_key).unwrap().unwrap(); @@ -334,7 +334,7 @@ fn get_entity_1() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = EntityKey::onchain(&*USER_TYPE, "1"); + let key = USER_TYPE.key("1"); let result = writable.get(&key).unwrap(); let bin_name = Value::Bytes("Johnton".as_bytes().into()); @@ -360,7 +360,7 @@ fn get_entity_1() { fn get_entity_3() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = EntityKey::onchain(&*USER_TYPE, "3"); + let key = USER_TYPE.key("3"); let result = writable.get(&key).unwrap(); let expected_entity = entity! { schema => @@ -383,7 +383,7 @@ fn get_entity_3() { #[test] fn insert_entity() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::onchain(&*USER_TYPE, "7".to_owned()); + let entity_key = USER_TYPE.key("7"); let test_entity = create_test_entity( "7", &*USER_TYPE, @@ -413,7 +413,7 @@ fn insert_entity() { #[test] fn update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); + let entity_key = USER_TYPE.key("1"); let op = create_test_entity( "1", @@ -459,7 +459,7 @@ fn update_existing() { #[test] fn partially_update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); + let entity_key = USER_TYPE.key("1"); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -1024,7 +1024,7 @@ fn revert_block_with_delete() { .desc("name"); // Delete entity with id=2 - let del_key = EntityKey::onchain(&*USER_TYPE, "2"); + let del_key = USER_TYPE.key("2"); // Process deletion transact_and_wait( @@ -1069,7 +1069,7 @@ fn revert_block_with_delete() { #[test] fn revert_block_with_partial_update() { run_test(|store, writable, deployment| async move { - let entity_key = EntityKey::onchain(&*USER_TYPE, "1"); + let entity_key = USER_TYPE.key("1"); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -1165,7 +1165,7 @@ fn revert_block_with_dynamic_data_source_operations() { let schema = writable.input_schema(); // Create operations to add a user - let user_key = EntityKey::onchain(&*USER_TYPE, "1"); + let user_key = USER_TYPE.key("1"); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; // Get the original user for comparisons @@ -1295,7 +1295,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { added_entities .iter() .map(|(id, data)| EntityOperation::Set { - key: EntityKey::onchain(&*USER_TYPE, id), + key: USER_TYPE.key(id.as_str()), data: data.clone(), }) .collect(), @@ -1306,13 +1306,13 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { // Update an entity in the store let updated_entity = entity! { schema => id: "1", name: "Johnny" }; let update_op = EntityOperation::Set { - key: EntityKey::onchain(&*USER_TYPE, "1"), + key: USER_TYPE.key("1"), data: updated_entity.clone(), }; // Delete an entity in the store let delete_op = EntityOperation::Remove { - key: EntityKey::onchain(&*USER_TYPE, "2"), + key: USER_TYPE.key("2"), }; // Commit update & delete ops @@ -1501,7 +1501,7 @@ fn handle_large_string_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, name: name }; - let key = EntityKey::onchain(&*USER_TYPE, id); + let key = USER_TYPE.key(id); EntityModification::insert(key, data, block) } @@ -1596,7 +1596,7 @@ fn handle_large_bytea_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; - let key = EntityKey::onchain(&*USER_TYPE, id); + let key = USER_TYPE.key(id); EntityModification::insert(key, data, block) } @@ -1801,7 +1801,7 @@ fn window() { let entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color }; EntityOperation::Set { - key: EntityKey::onchain(entity_type, id), + key: entity_type.key(id), data: entity, } } diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index a0190f672c9..14c8ec327e4 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -107,7 +107,7 @@ fn block_pointer(number: u8) -> BlockPtr { } fn count_key(id: &str) -> EntityKey { - EntityKey::onchain(&*COUNTER_TYPE, id) + COUNTER_TYPE.key(id) } async fn insert_count(store: &Arc, deployment: &DeploymentLocator, count: u8) { From 770e2f36ed4d325fb14fd3208ad5384efbe941fb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 15 Sep 2023 19:01:27 -0700 Subject: [PATCH 0441/2104] all: Force using a constructor for EntityKey --- chain/substreams/src/mapper.rs | 10 +++---- core/src/subgraph/runner.rs | 20 +++++++------- graph/src/schema/entity_key.rs | 13 ++++++---- runtime/wasm/src/host_exports.rs | 18 +++---------- store/postgres/src/relational.rs | 26 +++++-------------- store/postgres/src/relational_queries.rs | 10 ++----- store/test-store/src/store.rs | 8 +----- store/test-store/tests/graph/entity_cache.rs | 12 ++------- .../tests/postgres/relational_bytes.rs | 12 ++------- 9 files changed, 37 insertions(+), 92 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 0402983d175..eca56cd991b 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -12,7 +12,7 @@ use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::BigDecimal; use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, BlockPtr, Logger, Value}; -use graph::schema::{EntityKey, InputSchema}; +use graph::schema::InputSchema; use graph::slog::o; use graph::substreams::Clock; use graph::substreams_rpc::response::Message as SubstreamsMessage; @@ -151,11 +151,9 @@ fn parse_changes( } } }; - let key = EntityKey { - entity_type, - entity_id: Word::from(entity_id), - causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data - }; + // Substreams don't currently support offchain data + let key = entity_type.key_in(Word::from(entity_id), CausalityRegion::ONCHAIN); + let id = schema.id_value(&key)?; parsed_data.insert(Word::from("id"), id); diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 30e54adad5a..5db94b7b286 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -1095,17 +1095,15 @@ async fn update_proof_of_indexing( for (causality_region, stream) in proof_of_indexing.drain() { // Create the special POI entity key specific to this causality_region - let entity_key = EntityKey { - entity_type: entity_cache.schema.poi_type().clone(), - - // There are two things called causality regions here, one is the causality region for - // the poi which is a string and the PoI entity id. The other is the data source - // causality region to which the PoI belongs as an entity. Currently offchain events do - // not affect PoI so it is assumed to be `ONCHAIN`. - // See also: poi-ignores-offchain - entity_id: causality_region.into(), - causality_region: CausalityRegion::ONCHAIN, - }; + // There are two things called causality regions here, one is the causality region for + // the poi which is a string and the PoI entity id. The other is the data source + // causality region to which the PoI belongs as an entity. Currently offchain events do + // not affect PoI so it is assumed to be `ONCHAIN`. + // See also: poi-ignores-offchain + let entity_key = entity_cache + .schema + .poi_type() + .key_in(causality_region, CausalityRegion::ONCHAIN); // Grab the current digest attribute on this entity let poi_digest = entity_cache.schema.poi_digest().clone(); diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index 873939db48e..bb4fdf053b0 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -22,6 +22,8 @@ pub struct EntityKey { /// doing the lookup. So if the entity exists but was created on a different causality region, /// the lookup will return empty. pub causality_region: CausalityRegion, + + _force_use_of_new: (), } impl EntityKey { @@ -40,16 +42,17 @@ impl EntityKey { entity_type, entity_id, causality_region, + _force_use_of_new: (), } } pub fn from(id: &String, load_related_request: &LoadRelatedRequest) -> Self { let clone = load_related_request.clone(); - Self { - entity_id: id.clone().into(), - entity_type: clone.entity_type, - causality_region: clone.causality_region, - } + Self::new( + clone.entity_type, + Word::from(id.as_str()), + clone.causality_region, + ) } } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 186dd6566bf..280fdccd98f 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -177,11 +177,7 @@ impl HostExports { } let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let key = EntityKey { - entity_type, - entity_id: entity_id.into(), - causality_region: self.data_source_causality_region, - }; + let key = entity_type.key_in(entity_id, self.data_source_causality_region); self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; @@ -242,11 +238,7 @@ impl HostExports { logger, ); let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let key = EntityKey { - entity_type, - entity_id: entity_id.into(), - causality_region: self.data_source_causality_region, - }; + let key = entity_type.key_in(entity_id, self.data_source_causality_region); self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_REMOVE.with_args(complexity::Size, &key))?; @@ -265,11 +257,7 @@ impl HostExports { scope: GetScope, ) -> Result>, anyhow::Error> { let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let store_key = EntityKey { - entity_type, - entity_id: entity_id.into(), - causality_region: self.data_source_causality_region, - }; + let store_key = entity_type.key_in(entity_id, self.data_source_causality_region); self.check_entity_type_access(&store_key.entity_type)?; let result = state.entity_cache.get(&store_key, scope)?; diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 3e2b37b0e66..a0f1777f7dc 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -562,11 +562,8 @@ impl Layout { let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; - let key = EntityKey { - entity_type, - entity_id: entity_data.id(), - causality_region: CausalityRegion::from_entity(&entity_data), - }; + let key = + entity_type.key_in(entity_data.id(), CausalityRegion::from_entity(&entity_data)); if entities.contains_key(&key) { return Err(constraint_violation!( "duplicate entity {}[{}] in result set, block = {}", @@ -596,11 +593,8 @@ impl Layout { for data in query.load::(conn)? { let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; - let key = EntityKey { - entity_type, - entity_id: entity_data.id(), - causality_region: CausalityRegion::from_entity(&entity_data), - }; + let key = + entity_type.key_in(entity_data.id(), CausalityRegion::from_entity(&entity_data)); entities.insert(key, entity_data); } @@ -636,11 +630,7 @@ impl Layout { processed_entities.insert((entity_type.clone(), entity_id.clone())); changes.push(EntityOperation::Set { - key: EntityKey { - entity_type, - entity_id, - causality_region: CausalityRegion::from_entity(&data), - }, + key: entity_type.key_in(entity_id, CausalityRegion::from_entity(&data)), data, }); } @@ -653,11 +643,7 @@ impl Layout { // about why this check is necessary. if !processed_entities.contains(&(entity_type.clone(), entity_id.clone())) { changes.push(EntityOperation::Remove { - key: EntityKey { - entity_type, - entity_id, - causality_region: del.causality_region(), - }, + key: entity_type.key_in(entity_id, del.causality_region()), }); } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index e43ee0f4f56..a8c978e5d1f 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -1517,12 +1517,6 @@ impl<'a> QueryFragment for FindQuery<'a> { fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { out.unsafe_to_cache_prepared(); - let EntityKey { - entity_type: _, - entity_id, - causality_region, - } = self.key; - // Generate // select '..' as entity, to_jsonb(e.*) as data // from schema.table e where id = $1 @@ -1532,11 +1526,11 @@ impl<'a> QueryFragment for FindQuery<'a> { out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); - self.table.primary_key().eq(entity_id, &mut out)?; + self.table.primary_key().eq(&self.key.entity_id, &mut out)?; out.push_sql(" and "); if self.table.has_causality_region { out.push_sql("causality_region = "); - out.push_bind_param::(causality_region)?; + out.push_bind_param::(&self.key.causality_region)?; out.push_sql(" and "); } BlockRangeColumn::new(self.table, "e.", self.block).contains(&mut out) diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 44080aedf6a..b989683c7c2 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -5,11 +5,9 @@ use graph::data::query::QueryResults; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError}; use graph::data::subgraph::SubgraphFeature; -use graph::data_source::CausalityRegion; use graph::data_source::DataSource; use graph::log; use graph::prelude::{QueryStoreManager as _, SubgraphStore as _, *}; -use graph::schema::EntityKey; use graph::schema::EntityType; use graph::schema::InputSchema; use graph::semver::Version; @@ -414,11 +412,7 @@ pub async fn insert_entities( let insert_ops = entities .into_iter() .map(|(entity_type, data)| EntityOperation::Set { - key: EntityKey { - entity_type, - entity_id: data.get("id").unwrap().clone().as_string().unwrap().into(), - causality_region: CausalityRegion::ONCHAIN, - }, + key: entity_type.key(data.id()), data, }); diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 576871f6e33..271fb9b2e40 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -179,11 +179,7 @@ impl WritableStore for MockStore { } fn make_band_key(id: &'static str) -> EntityKey { - EntityKey { - entity_type: SCHEMA.entity_type("Band").unwrap(), - entity_id: id.into(), - causality_region: CausalityRegion::ONCHAIN, - } + SCHEMA.entity_type("Band").unwrap().key(id) } fn sort_by_entity_key(mut mods: Vec) -> Vec { @@ -231,11 +227,7 @@ fn insert_modifications() { fn entity_version_map(entity_type: &str, entities: Vec) -> BTreeMap { let mut map = BTreeMap::new(); for entity in entities { - let key = EntityKey { - entity_type: SCHEMA.entity_type(entity_type).unwrap(), - entity_id: entity.id().into(), - causality_region: CausalityRegion::ONCHAIN, - }; + let key = SCHEMA.entity_type(entity_type).unwrap().key(entity.id()); map.insert(key, entity); } map diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index fc372982528..5d25ed80def 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -298,16 +298,8 @@ fn find_many() { .expect("Failed to read many things"); assert_eq!(2, entities.len()); - let id_key = EntityKey { - entity_id: ID.into(), - entity_type: THING_TYPE.clone(), - causality_region: CausalityRegion::ONCHAIN, - }; - let id2_key = EntityKey { - entity_id: ID2.into(), - entity_type: THING_TYPE.clone(), - causality_region: CausalityRegion::ONCHAIN, - }; + let id_key = THING_TYPE.key(ID); + let id2_key = THING_TYPE.key(ID2); assert!(entities.contains_key(&id_key), "Missing ID"); assert!(entities.contains_key(&id2_key), "Missing ID2"); }); From 486b11080ef30b3e27b1d9071849fee300af5f65 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 16 Sep 2023 16:06:49 -0700 Subject: [PATCH 0442/2104] graph: Make id_value a function on EntityType --- chain/substreams/src/mapper.rs | 2 +- graph/src/schema/entity_key.rs | 7 +++++++ graph/src/schema/entity_type.rs | 17 ++++++++++++++++- graph/src/schema/input_schema.rs | 22 +++------------------- runtime/wasm/src/host_exports.rs | 2 +- 5 files changed, 28 insertions(+), 22 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index eca56cd991b..532b9da52fd 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -154,7 +154,7 @@ fn parse_changes( // Substreams don't currently support offchain data let key = entity_type.key_in(Word::from(entity_id), CausalityRegion::ONCHAIN); - let id = schema.id_value(&key)?; + let id = key.id_value()?; parsed_data.insert(Word::from("id"), id); let changes = match entity_change.operation() { diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index bb4fdf053b0..5061fa9df27 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -1,6 +1,9 @@ use std::fmt; +use anyhow::Error; + use crate::components::store::{LoadRelatedRequest, StoreError}; +use crate::data::store::Value; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::schema::EntityType; @@ -54,6 +57,10 @@ impl EntityKey { clone.causality_region, ) } + + pub fn id_value(&self) -> Result { + self.entity_type.id_value(self.entity_id.clone()) + } } impl std::fmt::Debug for EntityKey { diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index 0a48a06afd2..dec75dc0baa 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -1,10 +1,11 @@ use std::{borrow::Borrow, fmt, sync::Arc}; -use anyhow::{bail, Error}; +use anyhow::{bail, Context, Error}; use serde::Serialize; use crate::{ cheap_clone::CheapClone, + data::store::Value, data::{graphql::ObjectOrInterface, store::IdType, value::Word}, data_source::causality_region::CausalityRegion, prelude::s, @@ -71,6 +72,20 @@ impl EntityType { EntityKey::new(self.cheap_clone(), id.into(), causality_region) } + /// Construct a `Value` for the given id and parse it into the correct + /// type if necessary + pub fn id_value(&self, id: impl Into) -> Result { + let id = id.into(); + let id_type = self + .schema + .id_type(self) + .with_context(|| format!("error determining id_type for {}[{}]", self.as_str(), id))?; + match id_type { + IdType::String => Ok(Value::String(id.to_string())), + IdType::Bytes => Ok(Value::Bytes(id.parse()?)), + } + } + fn same_pool(&self, other: &EntityType) -> bool { Arc::ptr_eq(self.schema.pool(), other.schema.pool()) } diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 751e2e18503..f4c5953cbd3 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -1,17 +1,14 @@ use std::collections::{BTreeMap, HashMap, HashSet}; -use std::str::FromStr; use std::sync::Arc; -use anyhow::{anyhow, Context, Error}; +use anyhow::{anyhow, Error}; use store::Entity; use crate::cheap_clone::CheapClone; use crate::components::store::LoadRelatedRequest; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::store::{ - self, scalar, EntityValidationError, IntoEntityIterator, TryIntoEntityIterator, -}; +use crate::data::store::{self, EntityValidationError, IntoEntityIterator, TryIntoEntityIterator}; use crate::data::value::Word; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; @@ -19,7 +16,7 @@ use crate::schema::api_schema; use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, AsEntityTypeName, EntityKey, EntityType, Schema, SchemaValidationError}; +use super::{ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError}; /// The name of the PoI entity type pub(crate) const POI_OBJECT: &str = "Poi$"; @@ -273,19 +270,6 @@ impl InputSchema { } } - /// Construct a value for the entity type's id attribute - pub fn id_value(&self, key: &EntityKey) -> Result { - let id_type = self - .id_type(&key.entity_type) - .with_context(|| format!("error determining id_type for {:?}", key))?; - match id_type { - store::IdType::String => Ok(store::Value::String(key.entity_id.to_string())), - store::IdType::Bytes => Ok(store::Value::Bytes(scalar::Bytes::from_str( - &key.entity_id, - )?)), - } - } - pub(in crate::schema) fn is_immutable(&self, entity_type: Atom) -> bool { self.inner.immutable_types.contains(&entity_type) } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 280fdccd98f..c1c4aea0cc2 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -191,7 +191,7 @@ impl HostExports { // The validation will catch the type mismatch } None => { - let value = state.entity_cache.schema.id_value(&key)?; + let value = key.entity_type.id_value(key.entity_id.clone())?; data.insert(store::ID.clone(), value); } } From 0a676c61142d5c852e1000483419ddc9be4242ae Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sat, 16 Sep 2023 16:20:39 -0700 Subject: [PATCH 0443/2104] graph, store: Move find_object_type to EntityType --- graph/src/data/store/mod.rs | 2 +- graph/src/schema/entity_type.rs | 4 +++ graph/src/schema/input_schema.rs | 5 +++- store/postgres/src/fork.rs | 49 +++++++++++++------------------- 4 files changed, 28 insertions(+), 32 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index b12b87c29ae..f4f55fcad14 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -921,7 +921,7 @@ impl Entity { return Ok(()); } - let object_type = schema.find_object_type(&key.entity_type).ok_or_else(|| { + let object_type = key.entity_type.object_type().ok_or_else(|| { EntityValidationError::UnknownEntityType { entity: key.entity_type.to_string(), id: key.entity_id.to_string(), diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index dec75dc0baa..e51fb2d701d 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -62,6 +62,10 @@ impl EntityType { self.schema.id_type(self) } + pub fn object_type(&self) -> Option<&s::ObjectType> { + self.schema.find_object_type(self) + } + /// Create a key from this type for an onchain entity pub fn key(&self, id: impl Into) -> EntityKey { self.key_in(id, CausalityRegion::ONCHAIN) diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index f4c5953cbd3..54043caba3b 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -287,7 +287,10 @@ impl InputSchema { self.inner.schema.interfaces_for_type(type_name) } - pub fn find_object_type(&self, entity_type: &EntityType) -> Option<&s::ObjectType> { + pub(in crate::schema) fn find_object_type( + &self, + entity_type: &EntityType, + ) -> Option<&s::ObjectType> { self.inner .schema .document diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index ce3846322e6..aa6d8b4422e 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -7,13 +7,11 @@ use std::{ use graph::{ block_on, components::store::SubgraphFork as SubgraphForkTrait, + constraint_violation, data::graphql::ext::DirectiveFinder, prelude::{ - info, - r::Value as RValue, - reqwest, - s::{Field, ObjectType}, - serde_json, DeploymentHash, Entity, Logger, Serialize, StoreError, Value, ValueType, + info, r::Value as RValue, reqwest, s::Field, serde_json, DeploymentHash, Entity, Logger, + Serialize, StoreError, Value, ValueType, }, url::Url, }; @@ -47,7 +45,7 @@ pub(crate) struct SubgraphFork { } impl SubgraphForkTrait for SubgraphFork { - fn fetch(&self, entity_type: String, id: String) -> Result, StoreError> { + fn fetch(&self, entity_type_name: String, id: String) -> Result, StoreError> { { let mut fids = self.fetched_ids.lock().map_err(|e| { StoreError::ForkFailure(format!( @@ -56,22 +54,28 @@ impl SubgraphForkTrait for SubgraphFork { )) })?; if fids.contains(&id) { - info!(self.logger, "Already fetched entity! Abort!"; "entity_type" => entity_type, "id" => id); + info!(self.logger, "Already fetched entity! Abort!"; "entity_type" => entity_type_name, "id" => id); return Ok(None); } fids.insert(id.clone()); } - info!(self.logger, "Fetching entity from {}", &self.endpoint; "entity_type" => &entity_type, "id" => &id); + info!(self.logger, "Fetching entity from {}", &self.endpoint; "entity_type" => &entity_type_name, "id" => &id); // NOTE: Subgraph fork compatibility checking (similar to the grafting compatibility checks) // will be added in the future (in a separate PR). // Currently, forking incompatible subgraphs is allowed, but, for example, storing the // incompatible fetched entities in the local store results in an error. + let entity_type = self.schema.entity_type(&entity_type_name)?; + let fields = &entity_type + .object_type() + .ok_or_else(|| { + constraint_violation!("no object type called `{}` found", entity_type_name) + })? + .fields; - let fields = self.get_fields_of(&entity_type)?; let query = Query { - query: self.query_string(&entity_type, fields)?, + query: self.query_string(&entity_type_name, fields)?, variables: Variables { id }, }; let raw_json = block_on(self.send(&query))?; @@ -82,7 +86,8 @@ impl SubgraphForkTrait for SubgraphFork { ))); } - let entity = SubgraphFork::extract_entity(&self.schema, &raw_json, &entity_type, fields)?; + let entity = + SubgraphFork::extract_entity(&self.schema, &raw_json, &entity_type_name, fields)?; Ok(entity) } } @@ -129,20 +134,6 @@ impl SubgraphFork { Ok(res) } - fn get_fields_of(&self, entity_type: &str) -> Result<&Vec, StoreError> { - let entity_type = self.schema.entity_type(entity_type)?; - let entity: Option<&ObjectType> = self.schema.find_object_type(&entity_type); - - if entity.is_none() { - return Err(StoreError::ForkFailure(format!( - "No object type definition with entity type `{}` found in the GraphQL schema supplied by the user.", - entity_type - ))); - } - - Ok(&entity.unwrap().fields) - } - fn query_string(&self, entity_type: &str, fields: &[Field]) -> Result { let names = fields .iter() @@ -308,14 +299,12 @@ mod tests { #[test] fn test_get_fields_of() { - let base = test_base(); - let id = test_id(); let schema = test_schema(); - let logger = test_logger(); - let fork = SubgraphFork::new(base, id, schema, logger).unwrap(); + let entity_type = schema.entity_type("Gravatar").unwrap(); + let fields = &entity_type.object_type().unwrap().fields; - assert_eq!(fork.get_fields_of("Gravatar").unwrap(), &test_fields()); + assert_eq!(fields, &test_fields()); } #[test] From 7845657524c831bd8019c212dcbbfaa1763aeff8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 18 Sep 2023 10:14:45 -0700 Subject: [PATCH 0444/2104] graph: Consolidate type information in InputSchema --- graph/src/data/store/mod.rs | 20 ++++ graph/src/schema/entity_type.rs | 4 +- graph/src/schema/input_schema.rs | 164 +++++++++++++++++++------------ graph/src/util/intern.rs | 5 +- 4 files changed, 126 insertions(+), 67 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index f4f55fcad14..af18c4c88af 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -182,6 +182,26 @@ pub enum IdType { Bytes, } +impl TryFrom<&s::ObjectType> for IdType { + type Error = Error; + + fn try_from(obj_type: &s::ObjectType) -> Result { + let base_type = obj_type.field("id").unwrap().field_type.get_base_type(); + + match base_type { + "ID" | "String" => Ok(IdType::String), + "Bytes" => Ok(IdType::Bytes), + s => { + return Err(anyhow!( + "Entity type {} uses illegal type {} for id column", + obj_type.name, + s + )) + } + } + } +} + // Note: Do not modify fields without also making a backward compatible change to the StableHash impl (below) /// An attribute value is represented as an enum with variants for all supported value types. #[derive(Clone, Deserialize, Serialize, PartialEq, Eq)] diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index e51fb2d701d..e39ba78678e 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -59,7 +59,7 @@ impl EntityType { } pub fn id_type(&self) -> Result { - self.schema.id_type(self) + self.schema.id_type(self.atom) } pub fn object_type(&self) -> Option<&s::ObjectType> { @@ -82,7 +82,7 @@ impl EntityType { let id = id.into(); let id_type = self .schema - .id_type(self) + .id_type(self.atom) .with_context(|| format!("error determining id_type for {}[{}]", self.as_str(), id))?; match id_type { IdType::String => Ok(Value::String(id.to_string())), diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 54043caba3b..925e590f65e 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::sync::Arc; use anyhow::{anyhow, Error}; @@ -8,7 +8,9 @@ use crate::cheap_clone::CheapClone; use crate::components::store::LoadRelatedRequest; use crate::data::graphql::ext::DirectiveFinder; use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; -use crate::data::store::{self, EntityValidationError, IntoEntityIterator, TryIntoEntityIterator}; +use crate::data::store::{ + self, EntityValidationError, IdType, IntoEntityIterator, TryIntoEntityIterator, +}; use crate::data::value::Word; use crate::prelude::q::Value; use crate::prelude::{s, DeploymentHash}; @@ -16,7 +18,9 @@ use crate::schema::api_schema; use crate::util::intern::{Atom, AtomPool}; use super::fulltext::FulltextDefinition; -use super::{ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError}; +use super::{ + ApiSchema, AsEntityTypeName, EntityType, Schema, SchemaValidationError, SCHEMA_TYPE_NAME, +}; /// The name of the PoI entity type pub(crate) const POI_OBJECT: &str = "Poi$"; @@ -36,12 +40,61 @@ pub struct InputSchema { inner: Arc, } +#[derive(Debug, PartialEq)] +enum TypeKind { + MutableObject(IdType), + ImmutableObject(IdType), + Interface, +} + +impl TypeKind { + fn is_object(&self) -> bool { + match self { + TypeKind::MutableObject(_) | TypeKind::ImmutableObject(_) => true, + TypeKind::Interface => false, + } + } + + fn id_type(&self) -> Option { + match self { + TypeKind::MutableObject(id_type) | TypeKind::ImmutableObject(id_type) => Some(*id_type), + TypeKind::Interface => None, + } + } +} + +#[derive(Debug, PartialEq)] +struct TypeInfo { + name: Atom, + kind: TypeKind, + fields: Box<[Atom]>, +} + +impl TypeInfo { + fn new(pool: &AtomPool, obj_type: &s::ObjectType) -> Self { + // The `unwrap` of `lookup` is safe because the pool was just + // constructed against the underlying schema + let name = pool.lookup(&obj_type.name).unwrap(); + let fields = obj_type + .fields + .iter() + .map(|field| pool.lookup(&field.name).unwrap()) + .collect::>() + .into_boxed_slice(); + let id_type = IdType::try_from(obj_type).expect("validation caught any issues here"); + let kind = if obj_type.is_immutable() { + TypeKind::ImmutableObject(id_type) + } else { + TypeKind::MutableObject(id_type) + }; + Self { name, kind, fields } + } +} + #[derive(Debug, PartialEq)] pub struct Inner { schema: Schema, - immutable_types: HashSet, - // Maps each entity type to its field names - field_names: HashMap>, + type_infos: Box<[TypeInfo]>, pool: Arc, } @@ -57,39 +110,20 @@ impl InputSchema { fn create(schema: Schema) -> Self { let pool = Arc::new(atom_pool(&schema.document)); - // The `unwrap` of `lookup` is safe because we just created the pool - let immutable_types = HashSet::from_iter( - schema - .document - .get_object_type_definitions() - .into_iter() - .filter(|obj_type| obj_type.is_immutable()) - .map(|obj_type| pool.lookup(&obj_type.name).unwrap()) - .collect::>(), - ); - - let field_names = HashMap::from_iter( - schema - .document - .get_object_type_definitions() - .into_iter() - .map(|obj_type| { - let fields: Vec<_> = obj_type - .fields - .iter() - .map(|field| pool.lookup(&field.name).unwrap()) - .collect(); - let type_atom = pool.lookup(&obj_type.name).unwrap(); - (type_atom, fields) - }) - .collect::>(), - ); + let mut type_infos: Vec<_> = schema + .document + .get_object_type_definitions() + .into_iter() + .filter(|obj_type| obj_type.name != SCHEMA_TYPE_NAME) + .map(|obj_type| TypeInfo::new(&pool, obj_type)) + .collect(); + type_infos.sort_by_key(|ti| ti.name); + let type_infos = type_infos.into_boxed_slice(); Self { inner: Arc::new(Inner { schema, - immutable_types, - field_names, + type_infos, pool, }), } @@ -242,36 +276,39 @@ impl InputSchema { } } - pub(in crate::schema) fn id_type( - &self, - entity_type: &EntityType, - ) -> Result { - let base_type = self - .inner - .schema - .document - .get_object_type_definition(entity_type.as_str()) - .ok_or_else(|| anyhow!("unknown entity type `{}`", entity_type))? - .field("id") - .unwrap() - .field_type - .get_base_type(); - - match base_type { - "ID" | "String" => Ok(store::IdType::String), - "Bytes" => Ok(store::IdType::Bytes), - s => { - return Err(anyhow!( - "Entity type {} uses illegal type {} for id column", - entity_type, - s - )) + fn type_info(&self, atom: Atom) -> Option<&TypeInfo> { + self.inner + .type_infos + .binary_search_by_key(&atom, |ti| ti.name) + .map(|idx| &self.inner.type_infos[idx]) + .ok() + } + + pub(in crate::schema) fn id_type(&self, entity_type: Atom) -> Result { + fn unknown_name(pool: &AtomPool, atom: Atom) -> Error { + match pool.get(atom) { + Some(name) => anyhow!("Entity type `{name}` is not defined in the schema"), + None => anyhow!( + "Invalid atom for id_type lookup (atom is probably from a different pool)" + ), } } + + let type_info = self + .type_info(entity_type) + .ok_or_else(|| unknown_name(&self.inner.pool, entity_type))?; + + type_info.kind.id_type().ok_or_else(|| { + let name = self.inner.pool.get(entity_type).unwrap(); + anyhow!("Entity type `{}` does not have an `id` field", name) + }) } + /// Check if `entity_type` is an immutable object type pub(in crate::schema) fn is_immutable(&self, entity_type: Atom) -> bool { - self.inner.immutable_types.contains(&entity_type) + self.type_info(entity_type) + .map(|ti| matches!(ti.kind, TypeKind::ImmutableObject(_))) + .unwrap_or(false) } pub fn get_named_type(&self, name: &str) -> Option<&s::TypeDefinition> { @@ -380,11 +417,10 @@ impl InputSchema { Entity::try_make(self.inner.pool.clone(), iter) } + /// Check if `entity_type` is an object type and has a field `field` pub(in crate::schema) fn has_field(&self, entity_type: Atom, field: Atom) -> bool { - self.inner - .field_names - .get(&entity_type) - .map(|fields| fields.contains(&field)) + self.type_info(entity_type) + .map(|ti| ti.kind.is_object() && ti.fields.contains(&field)) .unwrap_or(false) } diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index 1c93c3a389f..ab5da53b485 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -24,7 +24,10 @@ type AtomInt = u16; /// An atom in a pool. To look up the underlying string, surrounding code /// needs to know the pool for it. -#[derive(Eq, Hash, PartialEq, Clone, Copy, Debug)] +/// +/// The ordering for atoms is based on their integer value, and has no +/// connection to how the strings they represent would be ordered +#[derive(Eq, Hash, PartialEq, PartialOrd, Ord, Clone, Copy, Debug)] pub struct Atom(AtomInt); /// An atom and the underlying pool. A `FatAtom` can be used in place of a From 73d3a0702ca8ca35a2f9b15d886e54f030c082be Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 18 Sep 2023 10:42:33 -0700 Subject: [PATCH 0445/2104] graph: Verify that we are looking up an existing type --- graph/src/data/store/mod.rs | 4 +- graph/src/schema/entity_type.rs | 16 ++---- graph/src/schema/input_schema.rs | 89 +++++++++++++++++++++++++++----- 3 files changed, 80 insertions(+), 29 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index af18c4c88af..2c0d995e6b3 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -22,7 +22,7 @@ use strum_macros::IntoStaticStr; use thiserror::Error; use super::{ - graphql::{ext::DirectiveFinder, TypeExt as _}, + graphql::{ext::DirectiveFinder, ObjectOrInterface, TypeExt as _}, value::Word, }; @@ -182,7 +182,7 @@ pub enum IdType { Bytes, } -impl TryFrom<&s::ObjectType> for IdType { +impl<'a> TryFrom<&s::ObjectType> for IdType { type Error = Error; fn try_from(obj_type: &s::ObjectType) -> Result { diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index e39ba78678e..ac7ce7fb540 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -1,6 +1,6 @@ use std::{borrow::Borrow, fmt, sync::Arc}; -use anyhow::{bail, Context, Error}; +use anyhow::{Context, Error}; use serde::Serialize; use crate::{ @@ -27,18 +27,8 @@ pub struct EntityType { } impl EntityType { - /// Construct a new entity type. Ideally, this is only called when - /// `entity_type` either comes from the GraphQL schema, or from - /// the database from fields that are known to contain a valid entity type - // This method is only meant to be used in `InputSchema`; all external - // constructions of an `EntityType` need to go through that struct - pub(in crate::schema) fn new(schema: InputSchema, name: &str) -> Result { - let atom = match schema.pool().lookup(name) { - Some(atom) => atom, - None => bail!("entity type `{name}` is not interned"), - }; - - Ok(EntityType { schema, atom }) + pub(in crate::schema) fn new(schema: InputSchema, atom: Atom) -> Self { + EntityType { schema, atom } } pub fn as_str(&self) -> &str { diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index 925e590f65e..e99d7f0f0ad 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -7,9 +7,11 @@ use store::Entity; use crate::cheap_clone::CheapClone; use crate::components::store::LoadRelatedRequest; use crate::data::graphql::ext::DirectiveFinder; -use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, ValueExt}; +use crate::data::graphql::{ + DirectiveExt, DocumentExt, ObjectOrInterface, ObjectTypeExt, TypeExt, ValueExt, +}; use crate::data::store::{ - self, EntityValidationError, IdType, IntoEntityIterator, TryIntoEntityIterator, + self, EntityValidationError, IdType, IntoEntityIterator, TryIntoEntityIterator, ID, }; use crate::data::value::Word; use crate::prelude::q::Value; @@ -40,7 +42,7 @@ pub struct InputSchema { inner: Arc, } -#[derive(Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] enum TypeKind { MutableObject(IdType), ImmutableObject(IdType), @@ -71,29 +73,55 @@ struct TypeInfo { } impl TypeInfo { - fn new(pool: &AtomPool, obj_type: &s::ObjectType) -> Self { + fn new(pool: &AtomPool, typ: ObjectOrInterface<'_>, kind: TypeKind) -> Self { // The `unwrap` of `lookup` is safe because the pool was just // constructed against the underlying schema - let name = pool.lookup(&obj_type.name).unwrap(); - let fields = obj_type - .fields + let name = pool.lookup(typ.name()).unwrap(); + let fields = typ + .fields() .iter() .map(|field| pool.lookup(&field.name).unwrap()) .collect::>() .into_boxed_slice(); + Self { name, kind, fields } + } + + fn for_object(pool: &AtomPool, obj_type: &s::ObjectType) -> Self { let id_type = IdType::try_from(obj_type).expect("validation caught any issues here"); let kind = if obj_type.is_immutable() { TypeKind::ImmutableObject(id_type) } else { TypeKind::MutableObject(id_type) }; - Self { name, kind, fields } + Self::new(pool, obj_type.into(), kind) + } + + fn for_interface(pool: &AtomPool, intf_type: &s::InterfaceType) -> Self { + Self::new(pool, intf_type.into(), TypeKind::Interface) + } + + fn for_poi(pool: &AtomPool) -> Self { + // The way we handle the PoI type is a bit of a hack. We pretend + // it's an object type, but trying to look up the `s::ObjectType` + // for it will turn up nothing. + // See also https://github.com/graphprotocol/graph-node/issues/4873 + let name = pool.lookup(POI_OBJECT).unwrap(); + let fields = + vec![pool.lookup(&ID).unwrap(), pool.lookup(POI_DIGEST).unwrap()].into_boxed_slice(); + Self { + name, + kind: TypeKind::MutableObject(IdType::String), + fields, + } } } #[derive(Debug, PartialEq)] pub struct Inner { schema: Schema, + /// A list of all the object and interface types in the `schema` with + /// some important information extracted from the schema. The list is + /// sorted by the name atom (not the string name) of the types type_infos: Box<[TypeInfo]>, pool: Arc, } @@ -110,12 +138,20 @@ impl InputSchema { fn create(schema: Schema) -> Self { let pool = Arc::new(atom_pool(&schema.document)); - let mut type_infos: Vec<_> = schema + let obj_types = schema .document .get_object_type_definitions() .into_iter() .filter(|obj_type| obj_type.name != SCHEMA_TYPE_NAME) - .map(|obj_type| TypeInfo::new(&pool, obj_type)) + .map(|obj_type| TypeInfo::for_object(&pool, obj_type)); + let intf_types = schema + .document + .get_interface_type_definitions() + .into_iter() + .map(|intf_type| TypeInfo::for_interface(&pool, intf_type)); + let mut type_infos: Vec<_> = obj_types + .chain(intf_types) + .chain(vec![TypeInfo::for_poi(&pool)]) .collect(); type_infos.sort_by_key(|ti| ti.name); let type_infos = type_infos.into_boxed_slice(); @@ -426,7 +462,8 @@ impl InputSchema { pub fn poi_type(&self) -> EntityType { // unwrap: we make sure to put POI_OBJECT into the pool - EntityType::new(self.cheap_clone(), POI_OBJECT).unwrap() + let atom = self.inner.pool.lookup(POI_OBJECT).unwrap(); + EntityType::new(self.cheap_clone(), atom) } pub fn poi_digest(&self) -> Word { @@ -440,10 +477,22 @@ impl InputSchema { /// Return the entity type for `named`. If the entity type does not /// exist, return an error. Generally, an error should only be possible - /// of `named` is based on user input. If `named` is an internal object, + /// if `named` is based on user input. If `named` is an internal object, /// like a `ObjectType`, it is safe to unwrap the result pub fn entity_type(&self, named: N) -> Result { - EntityType::new(self.cheap_clone(), named.name()) + let name = named.name(); + self.inner + .pool + .lookup(name) + .and_then(|atom| self.type_info(atom)) + .map(|ti| EntityType::new(self.cheap_clone(), ti.name)) + .ok_or_else(|| { + anyhow!( + "internal error: entity type `{}` does not exist in {}", + name, + self.inner.schema.id + ) + }) } } @@ -507,7 +556,11 @@ fn atom_pool(document: &s::Document) -> AtomPool { #[cfg(test)] mod tests { - use crate::prelude::DeploymentHash; + use crate::{ + data::store::ID, + prelude::DeploymentHash, + schema::input_schema::{POI_DIGEST, POI_OBJECT}, + }; use super::InputSchema; @@ -524,6 +577,14 @@ mod tests { let schema = InputSchema::parse(SCHEMA, id).unwrap(); assert_eq!("Thing", schema.entity_type("Thing").unwrap().as_str()); + + let poi = schema.entity_type(POI_OBJECT).unwrap(); + assert_eq!(POI_OBJECT, poi.as_str()); + assert!(poi.has_field(schema.pool().lookup(&ID).unwrap())); + assert!(poi.has_field(schema.pool().lookup(POI_DIGEST).unwrap())); + // This is not ideal, but we don't have an object type for the PoI + assert_eq!(None, poi.object_type()); + assert!(schema.entity_type("NonExistent").is_err()); } } From 408075ca0f89dce8c725079457c279b52ead1f54 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 17 Sep 2023 15:58:14 -0700 Subject: [PATCH 0446/2104] graph, graphql, store: Pass the parent of a query result explicitly Instead of passing the parent id of an object coming from a query as the made-up `g$parent_id` attribute, pass it as an explicit member of a new `QueryObject` struct --- graph/src/components/store/traits.rs | 4 +- graph/src/data/store/mod.rs | 18 +++++- graph/src/lib.rs | 2 +- graphql/src/store/prefetch.rs | 23 +++++--- store/postgres/src/query_store.rs | 4 +- store/postgres/src/relational_queries.rs | 62 +++++++++++++------- store/test-store/tests/graph/entity_cache.rs | 14 +++-- 7 files changed, 82 insertions(+), 45 deletions(-) diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index dc8d6bd2e7c..e9b3354e93b 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -10,8 +10,8 @@ use crate::components::subgraph::SubgraphVersionSwitchingMode; use crate::components::transaction_receipt; use crate::components::versions::ApiVersion; use crate::data::query::Trace; +use crate::data::store::QueryObject; use crate::data::subgraph::{status, DeploymentFeatures}; -use crate::data::value::Object; use crate::data::{query::QueryTarget, subgraph::schema::*}; use crate::prelude::{DeploymentState, NodeId, QueryExecutionError, SubgraphName}; use crate::schema::{ApiSchema, InputSchema}; @@ -537,7 +537,7 @@ pub trait QueryStore: Send + Sync { fn find_query_values( &self, query: EntityQuery, - ) -> Result<(Vec, Trace), QueryExecutionError>; + ) -> Result<(Vec, Trace), QueryExecutionError>; async fn is_deployment_synced(&self) -> Result; diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 2c0d995e6b3..8ab376fbe94 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -654,9 +654,6 @@ where lazy_static! { /// The name of the id attribute, `"id"` pub static ref ID: Word = Word::from("id"); - /// The name of the parent_id attribute that we inject into query - /// results - pub static ref PARENT_ID: Word = Word::from("g$parent_id"); } /// An entity is represented as a map of attribute names to values. @@ -1068,6 +1065,21 @@ impl std::fmt::Debug for Entity { } } +/// An object that is returned from a query. It's a an `r::Value` which +/// carries the attributes of the object (`__typename`, `id` etc.) and +/// possibly a pointer to its parent if the query that constructed it is one +/// that depends on parents +pub struct QueryObject { + pub parent: Option, + pub entity: r::Object, +} + +impl CacheWeight for QueryObject { + fn indirect_weight(&self) -> usize { + self.parent.indirect_weight() + self.entity.indirect_weight() + } +} + #[test] fn value_bytes() { let graphql_value = r::Value::String("0x8f494c66afc1d3f8ac1b45df21f02a46".to_owned()); diff --git a/graph/src/lib.rs b/graph/src/lib.rs index d90730b1b4c..83959ee8651 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -207,6 +207,6 @@ pub mod prelude { }); pub mod r { - pub use crate::data::value::Value; + pub use crate::data::value::{Object, Value}; } } diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 8f989d2678b..a92d9813e53 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; -use graph::data::store::PARENT_ID; +use graph::data::store::QueryObject; use graph::data::value::{Object, Word}; use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; @@ -41,6 +41,8 @@ struct Node { /// the keys and values of the `children` map, but not of the map itself children_weight: usize, + parent: Option, + entity: Object, /// We are using an `Rc` here for two reasons: it allows us to defer /// copying objects until the end, when converting to `q::Value` forces @@ -85,11 +87,12 @@ struct Node { children: BTreeMap>>, } -impl From for Node { - fn from(entity: Object) -> Self { +impl From for Node { + fn from(object: QueryObject) -> Self { Node { - children_weight: entity.weight(), - entity, + children_weight: object.weight(), + parent: object.parent, + entity: object.entity, children: BTreeMap::default(), } } @@ -134,6 +137,7 @@ fn make_root_node() -> Vec { let entity = Object::empty(); vec![Node { children_weight: entity.weight(), + parent: None, entity, children: BTreeMap::default(), }] @@ -460,10 +464,11 @@ fn add_children(parents: &mut [&mut Node], children: Vec, response_key: &s // interface. let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); for child in children.iter() { - match child - .get(&*PARENT_ID) - .expect("the query that produces 'child' ensures there is always a g$parent_id") - { + let parent = child + .parent + .as_ref() + .expect("the query that produces 'child' ensures there is always a g$parent_id"); + match parent { r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), _ => unreachable!("the parent_id returned by the query is always a string"), } diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index 2b78d2d8a26..21f8aa59753 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -1,7 +1,7 @@ use crate::deployment_store::{DeploymentStore, ReplicaId}; use graph::components::store::{DeploymentId, QueryStore as QueryStoreTrait}; use graph::data::query::Trace; -use graph::data::value::Object; +use graph::data::store::QueryObject; use graph::prelude::*; use graph::schema::{ApiSchema, InputSchema}; @@ -38,7 +38,7 @@ impl QueryStoreTrait for QueryStore { fn find_query_values( &self, query: EntityQuery, - ) -> Result<(Vec, Trace), graph::prelude::QueryExecutionError> { + ) -> Result<(Vec, Trace), graph::prelude::QueryExecutionError> { assert_eq!(&self.site.deployment, &query.subgraph_id); let conn = self .store diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index a8c978e5d1f..07f695f5851 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -14,7 +14,8 @@ use diesel::Connection; use graph::components::store::write::WriteChunk; use graph::components::store::DerivedEntityQuery; -use graph::data::store::{NULL, PARENT_ID}; +use graph::data::store::QueryObject; +use graph::data::store::NULL; use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -53,6 +54,11 @@ const POSTGRES_MAX_PARAMETERS: usize = u16::MAX as usize; // 65535 const SORT_KEY_COLUMN: &str = "sort_key$"; +/// The name of the parent_id attribute that we inject into queries. Users +/// outside of this module should access the parent id through the +/// `QueryObject` struct +const PARENT_ID: &str = "g$parent_id"; + /// Describes at what level a `SELECT` statement is used. enum SelectStatementLevel { // A `SELECT` statement that is nested inside another `SELECT` statement @@ -256,6 +262,7 @@ pub trait FromEntityData: Sized { fn from_data>>( schema: &InputSchema, + parent_id: Option, iter: I, ) -> Result; } @@ -267,22 +274,28 @@ impl FromEntityData for Entity { fn from_data>>( schema: &InputSchema, + parent_id: Option, iter: I, ) -> Result { + debug_assert_eq!(None, parent_id); schema.try_make_entity(iter).map_err(StoreError::from) } } -impl FromEntityData for Object { +impl FromEntityData for QueryObject { const WITH_INTERNAL_KEYS: bool = true; type Value = r::Value; fn from_data>>( _schema: &InputSchema, + parent: Option, iter: I, ) -> Result { - as FromIterator>>::from_iter(iter) + let entity = as FromIterator< + Result<(Word, Self::Value), StoreError>, + >>::from_iter(iter)?; + Ok(QueryObject { parent, entity }) } } @@ -525,19 +538,10 @@ impl EntityData { use serde_json::Value as j; match self.data { - j::Object(map) => { - let typname = std::iter::once(self.entity).filter_map(move |e| { - if T::WITH_INTERNAL_KEYS { - Some(Ok((Word::from("__typename"), T::Value::from_string(e)))) - } else { - None - } - }); - let entries = map.into_iter().filter_map(move |(key, json)| { - // Simply ignore keys that do not have an underlying table - // column; those will be things like the block_range that - // is used internally for versioning - if key == PARENT_ID.as_str() { + j::Object(mut map) => { + let parent_id = map + .remove(PARENT_ID) + .and_then(|json| { if T::WITH_INTERNAL_KEYS { match &parent_type { None => { @@ -548,15 +552,29 @@ impl EntityData { "query unexpectedly produces parent ids" ))) } - Some(parent_type) => Some( - T::Value::from_column_value(parent_type, json) - .map(|value| (PARENT_ID.clone(), value)), - ), + Some(parent_type) => { + Some(T::Value::from_column_value(parent_type, json)) + } } } else { None } - } else if let Some(column) = table.column(&SqlName::verbatim(key)) { + }) + .transpose()?; + let map = map; + let typname = std::iter::once(self.entity).filter_map(move |e| { + if T::WITH_INTERNAL_KEYS { + Some(Ok((Word::from("__typename"), T::Value::from_string(e)))) + } else { + None + } + }); + let entries = map.into_iter().filter_map(move |(key, json)| { + // Simply ignore keys that do not have an underlying + // table column; those will be things like the + // block_range that `select *` pulls in but that we + // don't care about here + if let Some(column) = table.column(&SqlName::verbatim(key)) { match T::Value::from_column_value(&column.column_type, json) { Ok(value) if value.is_null() => None, Ok(value) => Some(Ok((Word::from(column.field.to_string()), value))), @@ -566,7 +584,7 @@ impl EntityData { None } }); - T::from_data(&layout.input_schema, typname.chain(entries)) + T::from_data(&layout.input_schema, parent_id, typname.chain(entries)) } _ => unreachable!( "we use `to_json` in our queries, and will therefore always get an object back" diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 271fb9b2e40..278e4baee9f 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -3,7 +3,6 @@ use graph::components::store::{ DeploymentCursorTracker, DerivedEntityQuery, GetScope, LoadRelatedRequest, ReadStore, StoredDynamicDataSource, WritableStore, }; -use graph::data::store::PARENT_ID; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; use graph::schema::{EntityKey, EntityType, InputSchema}; @@ -736,16 +735,19 @@ fn scoped_get() { fn no_internal_keys() { run_store_test(|mut cache, _, _, writable| async move { #[track_caller] - fn check(entity: &Entity) { - assert_eq!(None, entity.get("__typename")); - assert_eq!(None, entity.get(&*PARENT_ID)); + fn check(schema: &InputSchema, key: &EntityKey, entity: &Entity) { + // Validate checks that all attributes are actually declared in + // the schema + entity.validate(schema, key).expect("the entity is valid"); } let key = WALLET_TYPE.key("1"); + let schema = cache.schema.cheap_clone(); + let wallet = writable.get(&key).unwrap().unwrap(); - check(&wallet); + check(&schema, &key, &wallet); let wallet = cache.get(&key, GetScope::Store).unwrap().unwrap(); - check(&wallet); + check(&schema, &key, &wallet); }); } From 8094fa4b1d97161b94e84ea9cc00fceffba23f71 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 17 Sep 2023 16:05:41 -0700 Subject: [PATCH 0447/2104] graphql: Turn a panic in the prefetch logic into an error --- graphql/src/store/prefetch.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index a92d9813e53..f404a180a4a 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -449,13 +449,17 @@ impl<'a> MaybeJoin<'a> { /// /// If `parents` only has one entry, add all children to that one parent. In /// particular, this is what happens for toplevel queries. -fn add_children(parents: &mut [&mut Node], children: Vec, response_key: &str) { +fn add_children( + parents: &mut [&mut Node], + children: Vec, + response_key: &str, +) -> Result<(), QueryExecutionError> { let children: Vec<_> = children.into_iter().map(Rc::new).collect(); if parents.len() == 1 { let parent = parents.first_mut().expect("we just checked"); parent.set_children(response_key.to_owned(), children); - return; + return Ok(()); } // Build a map parent_id -> Vec that we will use to add @@ -464,10 +468,13 @@ fn add_children(parents: &mut [&mut Node], children: Vec, response_key: &s // interface. let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); for child in children.iter() { - let parent = child - .parent - .as_ref() - .expect("the query that produces 'child' ensures there is always a g$parent_id"); + let parent = child.parent.as_ref().ok_or_else(|| { + QueryExecutionError::Panic(format!( + "child {}[{}] is missing a parent id", + child.typename(), + child.id().unwrap_or_else(|_| "".to_owned()) + )) + })?; match parent { r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), _ => unreachable!("the parent_id returned by the query is always a string"), @@ -486,6 +493,8 @@ fn add_children(parents: &mut [&mut Node], children: Vec, response_key: &s let values = parent.id().ok().and_then(|id| grouped.get(&*id).cloned()); parent.set_children(response_key.to_owned(), values.unwrap_or_default()); } + + Ok(()) } /// Run the query in `ctx` in such a manner that we only perform one query @@ -640,7 +649,7 @@ fn execute_selection_set<'a>( &field.selection_set, ) { Ok((children, trace)) => { - add_children(&mut parents, children, field.response_key()); + add_children(&mut parents, children, field.response_key())?; let weight = parents.iter().map(|parent| parent.weight()).sum::(); check_result_size(ctx, weight)?; From f73b2951a75f47c28e43e9da20b021ca82dc9c46 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 17 Sep 2023 16:23:45 -0700 Subject: [PATCH 0448/2104] graph: Remove unused EntityKey::from(LoadRelatedRequest) --- graph/src/schema/entity_key.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index 5061fa9df27..8c1e8fbd6eb 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -2,7 +2,7 @@ use std::fmt; use anyhow::Error; -use crate::components::store::{LoadRelatedRequest, StoreError}; +use crate::components::store::StoreError; use crate::data::store::Value; use crate::data::value::Word; use crate::data_source::CausalityRegion; @@ -49,15 +49,6 @@ impl EntityKey { } } - pub fn from(id: &String, load_related_request: &LoadRelatedRequest) -> Self { - let clone = load_related_request.clone(); - Self::new( - clone.entity_type, - Word::from(id.as_str()), - clone.causality_region, - ) - } - pub fn id_value(&self) -> Result { self.entity_type.id_value(self.entity_id.clone()) } From 86979c8b1701576546153ff74a0ec3a9675018f8 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 18 Sep 2023 11:54:04 -0700 Subject: [PATCH 0449/2104] store: Return error from ColumnType::id_type, don't panic --- store/postgres/src/relational.rs | 21 +++++++++-------- store/postgres/src/relational_queries.rs | 29 +++++++++++++----------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index a0f1777f7dc..3d7a36f8c0e 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -22,7 +22,7 @@ use diesel::serialize::Output; use diesel::sql_types::Text; use diesel::types::{FromSql, ToSql}; use diesel::{connection::SimpleConnection, Connection}; -use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; +use diesel::{debug_query, OptionalExtension, PgConnection, QueryResult, RunQueryDsl}; use graph::cheap_clone::CheapClone; use graph::components::store::write::RowGroup; use graph::constraint_violation; @@ -1107,15 +1107,18 @@ impl ColumnType { } /// Return the `IdType` corresponding to this column type. This can only - /// be called on a column that stores an `ID` and will panic otherwise - pub(crate) fn id_type(&self) -> IdType { + /// be called on a column that stores an `ID` and will return an error + pub(crate) fn id_type(&self) -> QueryResult { match self { - ColumnType::String => IdType::String, - ColumnType::Bytes => IdType::Bytes, - _ => unreachable!( - "only String and BytesId are allowed as primary keys but not {:?}", - self - ), + ColumnType::String => Ok(IdType::String), + ColumnType::Bytes => Ok(IdType::Bytes), + _ => Err(diesel::result::Error::QueryBuilderError( + anyhow!( + "only String and Bytes are allowed as primary keys but not {:?}", + self + ) + .into(), + )), } } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 07f695f5851..dc708249960 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -153,7 +153,7 @@ trait ForeignKeyClauses { /// Add `id` as a bind variable to `out`, using the right SQL type fn bind_id(&self, id: &str, out: &mut AstPass) -> QueryResult<()> { - match self.column_type().id_type() { + match self.column_type().id_type()? { IdType::String => out.push_bind_param::(&id)?, IdType::Bytes => out.push_bind_param::(&str_as_bytes(id)?.as_slice())?, } @@ -168,7 +168,7 @@ trait ForeignKeyClauses { where S: AsRef + diesel::serialize::ToSql, { - self.column_type().id_type().bind_ids(ids, out) + self.column_type().id_type()?.bind_ids(ids, out) } /// Generate a clause `{name()} = $id` using the right types to bind `$id` @@ -219,7 +219,7 @@ trait ForeignKeyClauses { } match id { None => out.push_sql("null"), - Some(id) => match self.column_type().id_type() { + Some(id) => match self.column_type().id_type()? { IdType::String => { out.push_sql("'"); out.push_sql(&id.0); @@ -2270,7 +2270,7 @@ impl<'a> FilterWindow<'a> { }) } - fn parent_type(&self) -> IdType { + fn parent_type(&self) -> QueryResult { match &self.link { TableLink::Direct(column, _) => column.column_type.id_type(), TableLink::Parent(parent_table, _) => parent_table.primary_key().column_type.id_type(), @@ -2747,10 +2747,10 @@ impl<'a> FilterCollection<'a> { pub(crate) fn parent_type(&self) -> Result, StoreError> { match self { FilterCollection::All(_) => Ok(None), - FilterCollection::SingleWindow(window) => Ok(Some(window.parent_type())), + FilterCollection::SingleWindow(window) => Ok(Some(window.parent_type()?)), FilterCollection::MultiWindow(windows, _) => { if windows.iter().map(FilterWindow::parent_type).all_equal() { - Ok(Some(windows[0].parent_type())) + Ok(Some(windows[0].parent_type()?)) } else { Err(graph::constraint_violation!( "all implementors of an interface must use the same type for their `id`" @@ -4122,7 +4122,7 @@ impl<'a> FilterQuery<'a> { out.push_sql("select c.* from "); out.push_sql("unnest("); // windows always has at least 2 entries - windows[0].parent_type().bind_ids(parent_ids, &mut out)?; + windows[0].parent_type()?.bind_ids(parent_ids, &mut out)?; out.push_sql(") as q(id)\n"); out.push_sql(" cross join lateral ("); for (i, window) in windows.iter().enumerate() { @@ -4311,14 +4311,17 @@ pub struct ReturnedEntityData { impl ReturnedEntityData { /// Convert primary key ids from Postgres' internal form to the format we /// use by stripping `\\x` off the front of bytes strings - fn bytes_as_str(table: &Table, mut data: Vec) -> Vec { - match table.primary_key().column_type.id_type() { - IdType::String => data, + fn bytes_as_str( + table: &Table, + mut data: Vec, + ) -> QueryResult> { + match table.primary_key().column_type.id_type()? { + IdType::String => Ok(data), IdType::Bytes => { for entry in data.iter_mut() { entry.id = bytes_as_str(&entry.id); } - data + Ok(data) } } } @@ -4367,7 +4370,7 @@ impl<'a> QueryId for RevertRemoveQuery<'a> { impl<'a> LoadQuery for RevertRemoveQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult> { conn.query_by_name(&self) - .map(|data| ReturnedEntityData::bytes_as_str(self.table, data)) + .and_then(|data| ReturnedEntityData::bytes_as_str(self.table, data)) } } @@ -4450,7 +4453,7 @@ impl<'a> QueryId for RevertClampQuery<'a> { impl<'a> LoadQuery for RevertClampQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult> { conn.query_by_name(&self) - .map(|data| ReturnedEntityData::bytes_as_str(self.table, data)) + .and_then(|data| ReturnedEntityData::bytes_as_str(self.table, data)) } } From f7bc2e6b4978bfd8020707884f6084039315486b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Mon, 18 Sep 2023 16:14:35 -0700 Subject: [PATCH 0450/2104] store: Change how we pass in parents and children for type C queries We used to pass the children as one big matrix which had the disadvantage that we couldn't use bind variables and had to pass in literal values. With this way of passing in the parent/children pairs, we need 2 bind variables per parent, which restricts us to about 30k parents, which is a pretty unreasonable number of parents. --- graph/src/data/store/mod.rs | 2 +- store/postgres/src/relational_queries.rs | 186 ++++++++--------------- store/test-store/tests/graphql/query.rs | 21 +++ 3 files changed, 84 insertions(+), 125 deletions(-) diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 8ab376fbe94..14145e0214a 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -22,7 +22,7 @@ use strum_macros::IntoStaticStr; use thiserror::Error; use super::{ - graphql::{ext::DirectiveFinder, ObjectOrInterface, TypeExt as _}, + graphql::{ext::DirectiveFinder, TypeExt as _}, value::Word, }; diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index dc708249960..96175db9699 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -191,57 +191,6 @@ trait ForeignKeyClauses { out.push_sql(") as p(g$id) where id = p.g$id)"); Ok(()) } - - /// Generate an array of arrays as literal SQL. The `ids` must form a - /// valid matrix, i.e. the same numbe of entries in each row. This can - /// be achieved by padding them with `None` values. Diesel does not support - /// arrays of arrays as bind variables, nor arrays containing nulls, so - /// we have to manually serialize the `ids` as literal SQL. - fn push_matrix( - &self, - matrix: &[Vec>], - out: &mut AstPass, - ) -> QueryResult<()> { - out.push_sql("array["); - if matrix.is_empty() { - // If there are no ids, make sure we are producing an - // empty array of arrays - out.push_sql("array[null]"); - } else { - for (i, ids) in matrix.iter().enumerate() { - if i > 0 { - out.push_sql(", "); - } - out.push_sql("array["); - for (j, id) in ids.iter().enumerate() { - if j > 0 { - out.push_sql(", "); - } - match id { - None => out.push_sql("null"), - Some(id) => match self.column_type().id_type()? { - IdType::String => { - out.push_sql("'"); - out.push_sql(&id.0); - out.push_sql("'"); - } - IdType::Bytes => { - out.push_sql("'\\x"); - out.push_sql(id.0.trim_start_matches("0x")); - out.push_sql("'"); - } - }, - } - } - out.push_sql("]"); - } - } - // Generate '::text[][]' or '::bytea[][]' - out.push_sql("]::"); - out.push_sql(self.column_type().sql_type()); - out.push_sql("[][]"); - Ok(()) - } } impl ForeignKeyClauses for Column { @@ -2090,22 +2039,9 @@ impl<'a> LoadQuery for ConflictingEntityQue impl<'a, Conn> RunQueryDsl for ConflictingEntityQuery<'a> {} -/// A string where we have checked that it is safe to embed it literally -/// in a string in a SQL query. In particular, we have escaped any use -/// of the string delimiter `'`. -/// -/// This is only needed for `ParentIds::List` since we can't send those to -/// the database as a bind variable, and therefore need to embed them in -/// the query literally -#[derive(Debug, Clone)] -struct SafeString(String); - -/// A `ParentLink` where we've made sure for the `List` variant that each -/// `Vec>` has the same length -/// Use the provided constructors to make sure this invariant holds #[derive(Debug, Clone)] enum ParentIds { - List(Vec>>), + List(Vec>), Scalar(Vec), } @@ -2113,31 +2049,7 @@ impl ParentIds { fn new(link: ParentLink) -> Self { match link { ParentLink::Scalar(child_ids) => ParentIds::Scalar(child_ids), - ParentLink::List(child_ids) => { - // Postgres will only accept child_ids, which is a Vec> - // if all Vec are the same length. We therefore pad - // shorter ones with None, which become nulls in the database - let maxlen = child_ids.iter().map(|ids| ids.len()).max().unwrap_or(0); - let child_ids = child_ids - .into_iter() - .map(|ids| { - let mut ids: Vec<_> = ids - .into_iter() - .map(|s| { - if s.contains('\'') { - SafeString(s.replace('\'', "''")) - } else { - SafeString(s) - } - }) - .map(Some) - .collect(); - ids.resize_with(maxlen, || None); - ids - }) - .collect(); - ParentIds::List(child_ids) - } + ParentLink::List(child_ids) => ParentIds::List(child_ids), } } } @@ -2433,40 +2345,69 @@ impl<'a> FilterWindow<'a> { fn children_type_c( &self, parent_primary_key: &Column, - child_ids: &[Vec>], + child_ids: &[Vec], limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass, ) -> QueryResult<()> { - // Generate - // from rows from (unnest({parent_ids}), reduce_dim({child_id_matrix})) - // as p(id, child_ids) - // cross join lateral - // (select {column names} - // from children c - // where c.id = any(p.child_ids) - // and .. other conditions on c .. - // order by c.{sort_key} - // limit {first} offset {skip}) c - // order by c.{sort_key} - - out.push_sql("\n/* children_type_c */ from "); - out.push_sql("rows from (unnest("); - parent_primary_key.bind_ids(&self.ids, out)?; - out.push_sql("), reduce_dim("); - self.table.primary_key().push_matrix(child_ids, out)?; - out.push_sql(")) as p(id, child_ids)"); - out.push_sql(" cross join lateral (select "); - write_column_names(&self.column_names, self.table, None, out)?; - out.push_sql(" from "); - out.push_sql(self.table.qualified_name.as_str()); - out.push_sql(" c where "); - BlockRangeColumn::new(self.table, "c.", block).contains(out)?; - limit.filter(out); - out.push_sql(" and c.id = any(p.child_ids)"); - self.and_filter(out.reborrow())?; - limit.restrict(out)?; - out.push_sql(") c"); + out.push_sql("\n/* children_type_c */ "); + + // An empty `self.ids` leads to an empty `(values )` clause which is + // not legal SQL. In that case we generate some dummy SQL where the + // resulting empty table has the same structure as the one we + // generate when `self.ids` is not empty + if !self.ids.is_empty() { + // Generate + // from (values ({parent_id}, {child_ids}), ...) + // as p(id, child_ids) + // cross join lateral + // (select {column names} + // from children c + // where c.id = any(p.child_ids) + // and .. other conditions on c .. + // order by c.{sort_key} + // limit {first} offset {skip}) c + // order by c.{sort_key} + + out.push_sql("from (values "); + for i in 0..self.ids.len() { + let parent_id = &self.ids[i]; + let child_ids = &child_ids[i]; + if i > 0 { + out.push_sql(", ("); + } else { + out.push_sql("("); + } + parent_primary_key.bind_id(parent_id, out)?; + out.push_sql(","); + self.table.primary_key().bind_ids(child_ids, out)?; + out.push_sql(")"); + } + out.push_sql(") as p(id, child_ids)"); + out.push_sql(" cross join lateral (select "); + write_column_names(&self.column_names, self.table, None, out)?; + out.push_sql(" from "); + out.push_sql(self.table.qualified_name.as_str()); + out.push_sql(" c where "); + BlockRangeColumn::new(self.table, "c.", block).contains(out)?; + limit.filter(out); + out.push_sql(" and c.id = any(p.child_ids)"); + self.and_filter(out.reborrow())?; + limit.restrict(out)?; + out.push_sql(") c"); + } else { + // Generate + // from unnest(array[]::text[]) as p(id) cross join + // (select {column names} + // from children c + // where false) c + + out.push_sql("from unnest(array[]::text[]) as p(id) cross join (select "); + write_column_names(&self.column_names, self.table, None, out)?; + out.push_sql(" from "); + out.push_sql(self.table.qualified_name.as_str()); + out.push_sql(" c where false) c"); + } Ok(()) } @@ -2628,10 +2569,7 @@ impl<'a> fmt::Display for FilterCollection<'a> { write!(f, "many:{}={}", col.name(), ids.join(","))? } TableLink::Parent(_, ParentIds::List(css)) => { - let css = css - .iter() - .map(|cs| cs.iter().filter_map(|c| c.as_ref().map(|s| &s.0)).join(",")) - .join("],["); + let css = css.iter().map(|cs| cs.join(",")).join("],["); write!(f, "uniq:id=[{}]", css)? } TableLink::Parent(_, ParentIds::Scalar(cs)) => { diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 967d7cfce5d..5a352263866 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -433,6 +433,7 @@ async fn insert_test_entities( vec![ entity! { is => id: "rl2", title: "Rock", songs: vec![s[2]] }, entity! { is => id: "rl3", title: "Cheesy", songs: vec![s[1]] }, + entity! { is => id: "rl4", title: "Silence", songs: Vec::::new() }, ], ), ]; @@ -2797,3 +2798,23 @@ fn can_compare_id() { }) } } + +#[test] +fn empty_type_c() { + // Single `rl4` has no songs. Make sure our SQL query generation does + // not cause a syntax error + const QUERY: &str = " + query { + single(id: \"rl4\") { + songs { id } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + single: object! { songs: Vec::::new() } + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} From 066edf76d59b4d4f7a518a5dbbbaafc7d1b96922 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Wed, 20 Sep 2023 11:01:59 -0700 Subject: [PATCH 0451/2104] graph: Move data::store::IdType to its own file --- graph/src/data/store/id.rs | 35 +++++++++++++++++++++++++++++++++++ graph/src/data/store/mod.rs | 31 ++++--------------------------- 2 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 graph/src/data/store/id.rs diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs new file mode 100644 index 00000000000..bab0c419ec3 --- /dev/null +++ b/graph/src/data/store/id.rs @@ -0,0 +1,35 @@ +//! Types and helpers to deal with entity IDs which support a subset of the +//! types that more general values support +use anyhow::{anyhow, Error}; + +use crate::{ + data::graphql::{ObjectTypeExt, TypeExt}, + prelude::s, +}; + +/// The types that can be used for the `id` of an entity +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum IdType { + String, + Bytes, +} + +impl<'a> TryFrom<&s::ObjectType> for IdType { + type Error = Error; + + fn try_from(obj_type: &s::ObjectType) -> Result { + let base_type = obj_type.field("id").unwrap().field_type.get_base_type(); + + match base_type { + "ID" | "String" => Ok(IdType::String), + "Bytes" => Ok(IdType::Bytes), + s => { + return Err(anyhow!( + "Entity type {} uses illegal type {} for id column", + obj_type.name, + s + )) + } + } + } +} diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 14145e0214a..a243913b51f 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -26,6 +26,10 @@ use super::{ value::Word, }; +/// Handling of entity ids +mod id; +pub use id::IdType; + /// Custom scalars in GraphQL. pub mod scalar; @@ -175,33 +179,6 @@ impl ValueType { } } -/// The types that can be used for the `id` of an entity -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -pub enum IdType { - String, - Bytes, -} - -impl<'a> TryFrom<&s::ObjectType> for IdType { - type Error = Error; - - fn try_from(obj_type: &s::ObjectType) -> Result { - let base_type = obj_type.field("id").unwrap().field_type.get_base_type(); - - match base_type { - "ID" | "String" => Ok(IdType::String), - "Bytes" => Ok(IdType::Bytes), - s => { - return Err(anyhow!( - "Entity type {} uses illegal type {} for id column", - obj_type.name, - s - )) - } - } - } -} - // Note: Do not modify fields without also making a backward compatible change to the StableHash impl (below) /// An attribute value is represented as an enum with variants for all supported value types. #[derive(Clone, Deserialize, Serialize, PartialEq, Eq)] From 2aee12829a67e54b60fd23756393aa7486fe8d1b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 12 Sep 2023 09:51:54 -0700 Subject: [PATCH 0452/2104] all: Introduce an `Id` type to handle id values We used to pass ids around mostly as strings, though that was never quite clear from the code and it may have also been the case that in some instances we passed ids around as bytes when the type in the GraphQL schema was Bytes. This PR makes it a lot clearer which type of id we are dealing with and tries to restrict conversions from/to strings for Bytes ids to places where we take them in from the outside, mostly mappings, substreams, and reading and writing from/to the database. It should make it possible to add more functionality and avoid conversion bugs we encountered in the past as well as make it easier to support other types of id, like integers. --- chain/substreams/src/mapper.rs | 4 +- chain/substreams/src/trigger.rs | 6 +- graph/src/components/store/entity_cache.rs | 3 +- graph/src/components/store/err.rs | 4 +- graph/src/components/store/mod.rs | 24 +- graph/src/components/store/write.rs | 31 +- .../subgraph/proof_of_indexing/mod.rs | 3 +- .../subgraph/proof_of_indexing/online.rs | 13 +- graph/src/data/query/error.rs | 16 +- graph/src/data/store/id.rs | 367 +++++++++++++++++- graph/src/data/store/mod.rs | 12 +- graph/src/data/store/scalar.rs | 11 +- graph/src/data/value.rs | 11 + graph/src/schema/entity_key.rs | 13 +- graph/src/schema/entity_type.rs | 47 ++- graph/src/schema/input_schema.rs | 31 +- graphql/src/store/prefetch.rs | 104 +++-- runtime/test/src/test.rs | 25 +- runtime/wasm/src/host_exports.rs | 57 +-- server/index-node/src/resolver.rs | 5 +- store/postgres/src/deployment_store.rs | 13 +- store/postgres/src/relational.rs | 78 +++- store/postgres/src/relational_queries.rs | 255 ++++++------ store/postgres/src/writable.rs | 18 +- store/test-store/tests/core/interfaces.rs | 10 +- store/test-store/tests/graph/entity_cache.rs | 114 +++--- store/test-store/tests/graphql/query.rs | 76 +++- store/test-store/tests/postgres/graft.rs | 25 +- store/test-store/tests/postgres/relational.rs | 48 ++- .../tests/postgres/relational_bytes.rs | 48 ++- store/test-store/tests/postgres/store.rs | 52 +-- store/test-store/tests/postgres/writable.rs | 3 +- 32 files changed, 1019 insertions(+), 508 deletions(-) diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 532b9da52fd..764fd6bd5d7 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -152,9 +152,9 @@ fn parse_changes( } }; // Substreams don't currently support offchain data - let key = entity_type.key_in(Word::from(entity_id), CausalityRegion::ONCHAIN); + let key = entity_type.parse_key_in(Word::from(entity_id), CausalityRegion::ONCHAIN)?; - let id = key.id_value()?; + let id = key.id_value(); parsed_data.insert(Word::from("id"), id); let changes = match entity_change.operation() { diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index b74bc0046eb..c51d21f1f95 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -195,7 +195,7 @@ where proof_of_indexing, &ProofOfIndexingEvent::SetEntity { entity_type: key.entity_type.as_str(), - id: &key.entity_id, + id: &key.entity_id.to_string(), data: &entity, }, causality_region, @@ -213,11 +213,11 @@ where proof_of_indexing, &ProofOfIndexingEvent::RemoveEntity { entity_type: entity_type.as_str(), - id: id.as_str(), + id: &id.to_string(), }, causality_region, logger, - ) + ); } } } diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index c24d342cf75..44cfad831d1 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -201,14 +201,13 @@ impl EntityCache { &mut self, eref: &LoadRelatedRequest, ) -> Result, anyhow::Error> { - let (base_type, field, id_is_bytes) = self.schema.get_field_related(eref)?; + let (base_type, field) = self.schema.get_field_related(eref)?; let query = DerivedEntityQuery { entity_type: self.schema.entity_type(base_type)?, entity_field: field.name.clone().into(), value: eref.entity_id.clone(), causality_region: eref.causality_region, - id_is_bytes, }; let mut entity_map = self.store.get_derived(&query)?; diff --git a/graph/src/components/store/err.rs b/graph/src/components/store/err.rs index b9ef0a59f98..5187c65cf14 100644 --- a/graph/src/components/store/err.rs +++ b/graph/src/components/store/err.rs @@ -75,10 +75,10 @@ pub enum StoreError { #[macro_export] macro_rules! constraint_violation { ($msg:expr) => {{ - StoreError::ConstraintViolation(format!("{}", $msg)) + $crate::prelude::StoreError::ConstraintViolation(format!("{}", $msg)) }}; ($fmt:expr, $($arg:tt)*) => {{ - StoreError::ConstraintViolation(format!($fmt, $($arg)*)) + $crate::prelude::StoreError::ConstraintViolation(format!($fmt, $($arg)*)) }} } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 0f65c30da02..7a4ff42a844 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -21,7 +21,6 @@ use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::fmt; use std::fmt::Display; -use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -31,7 +30,7 @@ use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; use crate::constraint_violation; use crate::data::store::scalar::Bytes; -use crate::data::store::Value; +use crate::data::store::{Id, IdList, Value}; use crate::data::value::Word; use crate::data_source::CausalityRegion; use crate::env::ENV_VARS; @@ -57,7 +56,7 @@ pub struct LoadRelatedRequest { /// Name of the entity type. pub entity_type: EntityType, /// ID of the individual entity. - pub entity_id: Word, + pub entity_id: Id, /// Field the shall be loaded pub entity_field: Word, @@ -76,9 +75,7 @@ pub struct DerivedEntityQuery { /// The field to check pub entity_field: Word, /// The value to compare against - pub value: Word, - /// Boolean indicating if the id is of the type `Bytes` - pub id_is_bytes: bool, + pub value: Id, /// This is the causality region of the data source that created the entity. /// @@ -94,12 +91,7 @@ impl DerivedEntityQuery { key.entity_type == self.entity_type && entity .get(&self.entity_field) - .map(|v| match v { - Value::String(s) => s.as_str() == self.value.as_str(), - Value::Bytes(b) => Bytes::from_str(self.value.as_str()) - .map_or(false, |bytes_value| &bytes_value == b), - _ => false, - }) + .map(|v| &self.value == v) .unwrap_or(false) } } @@ -324,11 +316,11 @@ impl WindowAttribute { pub enum ParentLink { /// The parent stores a list of child ids. The ith entry in the outer /// vector contains the id of the children for `EntityWindow.ids[i]` - List(Vec>), + List(Vec), /// The parent stores the id of one child. The ith entry in the /// vector contains the id of the child of the parent with id /// `EntityWindow.ids[i]` - Scalar(Vec), + Scalar(IdList), } /// How many children a parent can have when the child stores @@ -364,7 +356,7 @@ pub struct EntityWindow { /// The entity type for this window pub child_type: EntityType, /// The ids of parents that should be considered for this window - pub ids: Vec, + pub ids: IdList, /// How to get the parent id pub link: EntityLink, pub column_names: AttributeNames, @@ -512,7 +504,7 @@ impl EntityQuery { if windows.len() == 1 { let window = windows.first().expect("we just checked"); if window.ids.len() == 1 { - let id = window.ids.first().expect("we just checked"); + let id = window.ids.first().expect("we just checked").to_value(); if let EntityLink::Direct(attribute, _) = &window.link { let filter = match attribute { WindowAttribute::Scalar(name) => { diff --git a/graph/src/components/store/write.rs b/graph/src/components/store/write.rs index 232a5122930..e787d8b3455 100644 --- a/graph/src/components/store/write.rs +++ b/graph/src/components/store/write.rs @@ -6,7 +6,7 @@ use crate::{ cheap_clone::CheapClone, components::subgraph::Entity, constraint_violation, - data::{subgraph::schema::SubgraphError, value::Word}, + data::{store::Id, subgraph::schema::SubgraphError}, data_source::CausalityRegion, prelude::DeploymentHash, util::cache_weight::CacheWeight, @@ -59,7 +59,7 @@ pub enum EntityModification { /// A helper struct for passing entity writes to the outside world, viz. the /// SQL query generation that inserts rows pub struct EntityWrite<'a> { - pub id: &'a Word, + pub id: &'a Id, pub entity: &'a Entity, pub causality_region: CausalityRegion, pub block: BlockNumber, @@ -98,7 +98,7 @@ impl<'a> TryFrom<&'a EntityModification> for EntityWrite<'a> { } impl EntityModification { - pub fn id(&self) -> &Word { + pub fn id(&self) -> &Id { match self { EntityModification::Insert { key, .. } | EntityModification::Overwrite { key, .. } @@ -383,7 +383,7 @@ impl RowGroup { } /// Find the most recent entry for `id` - fn prev_row_mut(&mut self, id: &Word) -> Option<&mut EntityModification> { + fn prev_row_mut(&mut self, id: &Id) -> Option<&mut EntityModification> { self.rows.iter_mut().rfind(|emod| emod.id() == id) } @@ -475,8 +475,8 @@ impl RowGroup { Ok(()) } - pub fn ids(&self) -> impl Iterator { - self.rows.iter().map(|emod| emod.id().as_str()) + pub fn ids(&self) -> impl Iterator { + self.rows.iter().map(|emod| emod.id()) } } @@ -899,6 +899,7 @@ mod test { components::store::{ write::EntityModification, write::EntityOp, BlockNumber, EntityType, StoreError, }, + data::{store::Id, value::Word}, entity, prelude::DeploymentHash, schema::InputSchema, @@ -909,13 +910,17 @@ mod test { #[track_caller] fn check_runs(values: &[usize], blocks: &[BlockNumber], exp: &[(BlockNumber, &[usize])]) { + fn as_id(n: &usize) -> Id { + Id::String(Word::from(n.to_string())) + } + assert_eq!(values.len(), blocks.len()); let rows = values .iter() .zip(blocks.iter()) .map(|(value, block)| EntityModification::Remove { - key: ROW_GROUP_TYPE.key(value.to_string()), + key: ROW_GROUP_TYPE.key(Id::String(Word::from(value.to_string()))), block: *block, }) .collect(); @@ -931,14 +936,14 @@ mod test { block, entries .iter() - .map(|entry| entry.id().parse().unwrap()) + .map(|entry| entry.id().clone()) .collect::>(), ) }) .collect::>(); let exp = Vec::from_iter( exp.into_iter() - .map(|(block, values)| (*block, Vec::from_iter(values.iter().cloned()))), + .map(|(block, values)| (*block, Vec::from_iter(values.iter().map(as_id)))), ); assert_eq!(exp, act); } @@ -988,7 +993,7 @@ mod test { use Mod::*; let value = value.clone(); - let key = THING_TYPE.key("one"); + let key = THING_TYPE.parse_key("one").unwrap(); match value { Ins(block) => EntityModification::Insert { key, @@ -1092,7 +1097,7 @@ mod test { fn last_op() { #[track_caller] fn is_remove(group: &RowGroup, at: BlockNumber) { - let key = THING_TYPE.key("one"); + let key = THING_TYPE.parse_key("one").unwrap(); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1104,7 +1109,7 @@ mod test { } #[track_caller] fn is_write(group: &RowGroup, at: BlockNumber) { - let key = THING_TYPE.key("one"); + let key = THING_TYPE.parse_key("one").unwrap(); let op = group.last_op(&key, at).unwrap(); assert!( @@ -1117,7 +1122,7 @@ mod test { use Mod::*; - let key = THING_TYPE.key("one"); + let key = THING_TYPE.parse_key("one").unwrap(); // This will result in two mods int the group: // [ InsC(1,2), InsC(2,3) ] diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 2569f7dac93..13418b9f61b 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -36,6 +36,7 @@ pub type SharedProofOfIndexing = Option>>; mod tests { use super::*; use crate::{ + data::store::Id, prelude::{BlockPtr, DeploymentHash, Value}, schema::InputSchema, }; @@ -71,7 +72,7 @@ mod tests { // pretty foolproof so that the actual usage will also match. // Create a database which stores intermediate PoIs - let mut db = HashMap::>::new(); + let mut db = HashMap::>::new(); let mut block_count = 1; for causality_region in case.data.causality_regions.values() { diff --git a/graph/src/components/subgraph/proof_of_indexing/online.rs b/graph/src/components/subgraph/proof_of_indexing/online.rs index 06e970dd1e1..caaa76f0a76 100644 --- a/graph/src/components/subgraph/proof_of_indexing/online.rs +++ b/graph/src/components/subgraph/proof_of_indexing/online.rs @@ -5,6 +5,7 @@ use super::{ProofOfIndexingEvent, ProofOfIndexingVersion}; use crate::{ blockchain::BlockPtr, + data::store::Id, prelude::{debug, BlockNumber, DeploymentHash, Logger, ENV_VARS}, util::stable_hash_glue::AsBytes, }; @@ -166,7 +167,7 @@ pub struct ProofOfIndexing { /// some data sources (eg: IPFS files) may be unreliable and therefore cannot mix /// state with other data sources. This may also give us some freedom to change /// the order of triggers in the future. - per_causality_region: HashMap, + per_causality_region: HashMap, } impl fmt::Debug for ProofOfIndexing { @@ -227,18 +228,18 @@ impl ProofOfIndexing { where F: FnOnce(&mut BlockEventStream) -> T, { - if let Some(causality_region) = self.per_causality_region.get_mut(causality_region) { + let causality_region = Id::String(causality_region.to_owned().into()); + if let Some(causality_region) = self.per_causality_region.get_mut(&causality_region) { f(causality_region) } else { let mut entry = BlockEventStream::new(self.block_number, self.version); let result = f(&mut entry); - self.per_causality_region - .insert(causality_region.to_owned(), entry); + self.per_causality_region.insert(causality_region, entry); result } } - pub fn take(self) -> HashMap { + pub fn take(self) -> HashMap { self.per_causality_region } } @@ -274,7 +275,7 @@ impl ProofOfIndexingFinisher { } } - pub fn add_causality_region(&mut self, name: &str, region: &[u8]) { + pub fn add_causality_region(&mut self, name: &Id, region: &[u8]) { let mut state = Hashers::from_bytes(region); // Finish the blocks vec by writing kvp[v], PoICausalityRegion.blocks.len() diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index ec0d314ba11..e1564041a93 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -75,6 +75,8 @@ pub enum QueryExecutionError { InvalidSubgraphManifest, ResultTooBig(usize, usize), DeploymentNotFound(String), + IdMissing, + IdNotString, } impl QueryExecutionError { @@ -132,7 +134,9 @@ impl QueryExecutionError { | InvalidSubgraphManifest | ValidationError(_, _) | ResultTooBig(_, _) - | DeploymentNotFound(_) => false, + | DeploymentNotFound(_) + | IdMissing + | IdNotString => false, } } } @@ -279,7 +283,9 @@ impl fmt::Display for QueryExecutionError { SubgraphManifestResolveError(e) => write!(f, "failed to resolve subgraph manifest: {}", e), InvalidSubgraphManifest => write!(f, "invalid subgraph manifest file"), ResultTooBig(actual, limit) => write!(f, "the result size of {} is larger than the allowed limit of {}", actual, limit), - DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name) + DeploymentNotFound(id_or_name) => write!(f, "deployment `{}` does not exist", id_or_name), + IdMissing => write!(f, "entity is missing an `id` attribute"), + IdNotString => write!(f, "entity `id` attribute is not a string"), } } } @@ -328,6 +334,12 @@ impl From for QueryExecutionError { } } +impl From for diesel::result::Error { + fn from(e: QueryExecutionError) -> Self { + diesel::result::Error::QueryBuilderError(Box::new(e)) + } +} + /// Error caused while processing a [Query](struct.Query.html) request. #[derive(Clone, Debug)] pub enum QueryError { diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index bab0c419ec3..9881410b571 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -1,12 +1,26 @@ //! Types and helpers to deal with entity IDs which support a subset of the //! types that more general values support use anyhow::{anyhow, Error}; +use stable_hash::{StableHash, StableHasher}; +use std::convert::TryFrom; +use std::fmt; use crate::{ data::graphql::{ObjectTypeExt, TypeExt}, prelude::s, }; +use crate::{ + components::store::StoreError, + constraint_violation, + data::value::Word, + prelude::{CacheWeight, QueryExecutionError}, + runtime::gas::{Gas, GasSizeOf}, + schema::EntityType, +}; + +use super::{scalar, Value}; + /// The types that can be used for the `id` of an entity #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum IdType { @@ -14,6 +28,23 @@ pub enum IdType { Bytes, } +impl IdType { + /// Parse the given string into an ID of this type + pub fn parse(&self, s: Word) -> Result { + match self { + IdType::String => Ok(Id::String(s)), + IdType::Bytes => Ok(Id::Bytes(s.parse()?)), + } + } + + pub fn as_str(&self) -> &str { + match self { + IdType::String => "String", + IdType::Bytes => "Bytes", + } + } +} + impl<'a> TryFrom<&s::ObjectType> for IdType { type Error = Error; @@ -23,13 +54,337 @@ impl<'a> TryFrom<&s::ObjectType> for IdType { match base_type { "ID" | "String" => Ok(IdType::String), "Bytes" => Ok(IdType::Bytes), - s => { - return Err(anyhow!( - "Entity type {} uses illegal type {} for id column", - obj_type.name, - s - )) + s => Err(anyhow!( + "Entity type {} uses illegal type {} for id column", + obj_type.name, + s + )), + } + } +} + +impl std::fmt::Display for IdType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +/// Values for the ids of entities +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum Id { + String(Word), + Bytes(scalar::Bytes), +} + +impl Id { + pub fn id_type(&self) -> IdType { + match self { + Id::String(_) => IdType::String, + Id::Bytes(_) => IdType::Bytes, + } + } +} + +impl std::hash::Hash for Id { + fn hash(&self, state: &mut H) { + core::mem::discriminant(self).hash(state); + match self { + Id::String(s) => s.hash(state), + Id::Bytes(b) => b.hash(state), + } + } +} + +impl PartialEq for Id { + fn eq(&self, other: &Value) -> bool { + match (self, other) { + (Id::String(s), Value::String(v)) => s.as_str() == v.as_str(), + (Id::Bytes(s), Value::Bytes(v)) => s == v, + _ => false, + } + } +} + +impl PartialEq for Value { + fn eq(&self, other: &Id) -> bool { + other.eq(self) + } +} + +impl TryFrom for Id { + type Error = Error; + + fn try_from(value: Value) -> Result { + match value { + Value::String(s) => Ok(Id::String(Word::from(s))), + Value::Bytes(b) => Ok(Id::Bytes(b)), + _ => Err(anyhow!( + "expected string or bytes for id but found {:?}", + value + )), + } + } +} + +impl From for Value { + fn from(value: Id) -> Self { + match value { + Id::String(s) => Value::String(s.into()), + Id::Bytes(b) => Value::Bytes(b), + } + } +} + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Id::String(s) => write!(f, "{}", s), + Id::Bytes(b) => write!(f, "{}", b), + } + } +} + +impl CacheWeight for Id { + fn indirect_weight(&self) -> usize { + match self { + Id::String(s) => s.indirect_weight(), + Id::Bytes(b) => b.indirect_weight(), + } + } +} + +impl GasSizeOf for Id { + fn gas_size_of(&self) -> Gas { + match self { + Id::String(s) => s.gas_size_of(), + Id::Bytes(b) => b.gas_size_of(), + } + } +} + +impl StableHash for Id { + fn stable_hash(&self, field_address: H::Addr, state: &mut H) { + match self { + Id::String(s) => stable_hash::StableHash::stable_hash(s, field_address, state), + Id::Bytes(b) => { + // We have to convert here to a string `0xdeadbeef` for + // backwards compatibility. It would be nice to avoid that + // allocation and just use the bytes directly, but that will + // break PoI compatibility + stable_hash::StableHash::stable_hash(&b.to_string(), field_address, state) } } } } + +impl stable_hash_legacy::StableHash for Id { + fn stable_hash( + &self, + sequence_number: H::Seq, + state: &mut H, + ) { + match self { + Id::String(s) => stable_hash_legacy::StableHash::stable_hash(s, sequence_number, state), + Id::Bytes(b) => { + stable_hash_legacy::StableHash::stable_hash(&b.to_string(), sequence_number, state) + } + } + } +} + +/// A value that contains a reference to the underlying data for an entity +/// ID. This is used to avoid cloning the ID when it is not necessary. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum IdRef<'a> { + String(&'a str), + Bytes(&'a [u8]), +} + +impl std::fmt::Display for IdRef<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IdRef::String(s) => write!(f, "{}", s), + IdRef::Bytes(b) => write!(f, "0x{}", hex::encode(b)), + } + } +} + +impl<'a> IdRef<'a> { + pub fn to_value(self) -> Id { + match self { + IdRef::String(s) => Id::String(Word::from(s.to_owned())), + IdRef::Bytes(b) => Id::Bytes(scalar::Bytes::from(b)), + } + } +} + +/// A homogeneous list of entity ids, i.e., all ids in the list are of the +/// same `IdType` +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum IdList { + String(Vec), + Bytes(Vec), +} + +impl IdList { + pub fn new(typ: IdType) -> Self { + match typ { + IdType::String => IdList::String(Vec::new()), + IdType::Bytes => IdList::Bytes(Vec::new()), + } + } + + pub fn len(&self) -> usize { + match self { + IdList::String(ids) => ids.len(), + IdList::Bytes(ids) => ids.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Turn a list of ids into an `IdList` and check that they are all the + /// same type + pub fn try_from_iter>( + entity_type: &EntityType, + mut iter: I, + ) -> Result { + let id_type = entity_type.id_type()?; + match id_type { + IdType::String => { + let ids: Vec = iter.try_fold(vec![], |mut ids, id| match id { + Id::String(id) => { + ids.push(id); + Ok(ids) + } + Id::Bytes(id) => Err(constraint_violation!( + "expected string id, got bytes: {}", + id, + )), + })?; + Ok(IdList::String(ids)) + } + IdType::Bytes => { + let ids: Vec = iter.try_fold(vec![], |mut ids, id| match id { + Id::String(id) => Err(constraint_violation!( + "expected bytes id, got string: {}", + id, + )), + Id::Bytes(id) => { + ids.push(id); + Ok(ids) + } + })?; + Ok(IdList::Bytes(ids)) + } + } + } + + /// Turn a list of references to ids into an `IdList` and check that + /// they are all the same type. Note that this method clones all the ids + /// and `try_from_iter` is therefore preferrable + pub fn try_from_iter_ref<'a, I: Iterator>>( + mut iter: I, + ) -> Result { + let first = match iter.next() { + Some(id) => id, + None => return Ok(IdList::String(Vec::new())), + }; + match first { + IdRef::String(s) => { + let ids: Vec<_> = iter.try_fold(vec![Word::from(s)], |mut ids, id| match id { + IdRef::String(id) => { + ids.push(Word::from(id)); + Ok(ids) + } + IdRef::Bytes(id) => Err(constraint_violation!( + "expected string id, got bytes: 0x{}", + hex::encode(id), + )), + })?; + Ok(IdList::String(ids)) + } + IdRef::Bytes(b) => { + let ids: Vec<_> = + iter.try_fold(vec![scalar::Bytes::from(b)], |mut ids, id| match id { + IdRef::String(id) => Err(constraint_violation!( + "expected bytes id, got string: {}", + id, + )), + IdRef::Bytes(id) => { + ids.push(scalar::Bytes::from(id)); + Ok(ids) + } + })?; + Ok(IdList::Bytes(ids)) + } + } + } + + pub fn index(&self, index: usize) -> IdRef<'_> { + match self { + IdList::String(ids) => IdRef::String(&ids[index]), + IdList::Bytes(ids) => IdRef::Bytes(ids[index].as_slice()), + } + } + + pub fn first(&self) -> Option> { + if self.len() > 0 { + Some(self.index(0)) + } else { + None + } + } + + pub fn iter(&self) -> Box> + '_> { + match self { + IdList::String(ids) => Box::new(ids.iter().map(|id| IdRef::String(id))), + IdList::Bytes(ids) => Box::new(ids.iter().map(|id| IdRef::Bytes(id))), + } + } + + pub fn as_unique(self) -> Self { + match self { + IdList::String(mut ids) => { + ids.sort_unstable(); + ids.dedup(); + IdList::String(ids) + } + IdList::Bytes(mut ids) => { + ids.sort_unstable_by(|id1, id2| id1.as_slice().cmp(id2.as_slice())); + ids.dedup(); + IdList::Bytes(ids) + } + } + } + + pub fn push(&mut self, entity_id: Id) -> Result<(), StoreError> { + match (self, entity_id) { + (IdList::String(ids), Id::String(id)) => { + ids.push(id); + Ok(()) + } + (IdList::Bytes(ids), Id::Bytes(id)) => { + ids.push(id); + Ok(()) + } + (IdList::String(_), Id::Bytes(b)) => Err(constraint_violation!( + "expected id of type string, but got Bytes[{}]", + b + )), + (IdList::Bytes(_), Id::String(s)) => Err(constraint_violation!( + "expected id of type bytes, but got String[{}]", + s + )), + } + } + + pub fn as_ids(self) -> Vec { + match self { + IdList::String(ids) => ids.into_iter().map(Id::String).collect(), + IdList::Bytes(ids) => ids.into_iter().map(Id::Bytes).collect(), + } + } +} diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index a243913b51f..55e4cea632e 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -28,7 +28,7 @@ use super::{ /// Handling of entity ids mod id; -pub use id::IdType; +pub use id::{Id, IdList, IdRef, IdType}; /// Custom scalars in GraphQL. pub mod scalar; @@ -806,12 +806,8 @@ impl Entity { /// string. If it is `Bytes`, return it as a hex string with a `0x` /// prefix. If the ID is not set or anything but a `String` or `Bytes`, /// return an error - pub fn id(&self) -> Word { - match self.get("id") { - Some(Value::String(s)) => Word::from(s.clone()), - Some(Value::Bytes(b)) => Word::from(b.to_string()), - None | Some(_) => unreachable!("we checked the id when constructing this entity"), - } + pub fn id(&self) -> Id { + Id::try_from(self.get("id").unwrap().clone()).expect("the id is set to a valid value") } /// Merges an entity update `update` into this entity. @@ -1047,7 +1043,7 @@ impl std::fmt::Debug for Entity { /// possibly a pointer to its parent if the query that constructed it is one /// that depends on parents pub struct QueryObject { - pub parent: Option, + pub parent: Option, pub entity: r::Object, } diff --git a/graph/src/data/store/scalar.rs b/graph/src/data/store/scalar.rs index 43d9b168004..53b247917ef 100644 --- a/graph/src/data/store/scalar.rs +++ b/graph/src/data/store/scalar.rs @@ -589,7 +589,7 @@ impl GasSizeOf for BigInt { } /// A byte array that's serialized as a hex string prefixed by `0x`. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct Bytes(Box<[u8]>); impl Deref for Bytes { @@ -678,6 +678,15 @@ impl From> for Bytes { } } +impl ToSql for Bytes { + fn to_sql( + &self, + out: &mut diesel::serialize::Output, + ) -> diesel::serialize::Result { + <_ as ToSql>::to_sql(self.as_slice(), out) + } +} + #[cfg(test)] mod test { use super::{BigDecimal, BigInt, Bytes}; diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index c17d52b6d47..0edf237e42e 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -1,9 +1,14 @@ use crate::prelude::{q, s, CacheWeight}; use crate::runtime::gas::{Gas, GasSizeOf, SaturatingInto}; +use diesel::pg::Pg; +use diesel::serialize::{self, Output}; +use diesel::sql_types::Text; +use diesel::types::ToSql; use serde::ser::{SerializeMap, SerializeSeq, Serializer}; use serde::Serialize; use std::collections::BTreeMap; use std::convert::TryFrom; +use std::io::Write; use std::iter::FromIterator; /// An immutable string that is more memory-efficient since it only has an @@ -68,6 +73,12 @@ impl<'de> serde::Deserialize<'de> for Word { } } +impl ToSql for Word { + fn to_sql(&self, out: &mut Output) -> serialize::Result { + >::to_sql(&self.0, out) + } +} + impl stable_hash_legacy::StableHash for Word { #[inline] fn stable_hash( diff --git a/graph/src/schema/entity_key.rs b/graph/src/schema/entity_key.rs index 8c1e8fbd6eb..6508131b35a 100644 --- a/graph/src/schema/entity_key.rs +++ b/graph/src/schema/entity_key.rs @@ -1,10 +1,7 @@ use std::fmt; -use anyhow::Error; - use crate::components::store::StoreError; -use crate::data::store::Value; -use crate::data::value::Word; +use crate::data::store::{Id, Value}; use crate::data_source::CausalityRegion; use crate::schema::EntityType; use crate::util::intern; @@ -17,7 +14,7 @@ pub struct EntityKey { pub entity_type: EntityType, /// ID of the individual entity. - pub entity_id: Word, + pub entity_id: Id, /// This is the causality region of the data source that created the entity. /// @@ -38,7 +35,7 @@ impl EntityKey { impl EntityKey { pub(in crate::schema) fn new( entity_type: EntityType, - entity_id: Word, + entity_id: Id, causality_region: CausalityRegion, ) -> Self { Self { @@ -49,8 +46,8 @@ impl EntityKey { } } - pub fn id_value(&self) -> Result { - self.entity_type.id_value(self.entity_id.clone()) + pub fn id_value(&self) -> Value { + Value::from(self.entity_id.clone()) } } diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index ac7ce7fb540..b57f0cf757e 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -5,7 +5,7 @@ use serde::Serialize; use crate::{ cheap_clone::CheapClone, - data::store::Value, + data::store::{Id, IdList}, data::{graphql::ObjectOrInterface, store::IdType, value::Word}, data_source::causality_region::CausalityRegion, prelude::s, @@ -57,27 +57,52 @@ impl EntityType { } /// Create a key from this type for an onchain entity - pub fn key(&self, id: impl Into) -> EntityKey { + pub fn key(&self, id: Id) -> EntityKey { self.key_in(id, CausalityRegion::ONCHAIN) } /// Create a key from this type for an entity in the given causality region - pub fn key_in(&self, id: impl Into, causality_region: CausalityRegion) -> EntityKey { - EntityKey::new(self.cheap_clone(), id.into(), causality_region) + pub fn key_in(&self, id: Id, causality_region: CausalityRegion) -> EntityKey { + EntityKey::new(self.cheap_clone(), id, causality_region) } - /// Construct a `Value` for the given id and parse it into the correct - /// type if necessary - pub fn id_value(&self, id: impl Into) -> Result { + /// Construct an `Id` from the given string and parse it into the + /// correct type if necessary + pub fn parse_id(&self, id: impl Into) -> Result { let id = id.into(); let id_type = self .schema .id_type(self.atom) .with_context(|| format!("error determining id_type for {}[{}]", self.as_str(), id))?; - match id_type { - IdType::String => Ok(Value::String(id.to_string())), - IdType::Bytes => Ok(Value::Bytes(id.parse()?)), - } + id_type.parse(id) + } + + /// Construct an `IdList` from a list of given strings and parse them + /// into the correct type if necessary + pub fn parse_ids(&self, ids: Vec>) -> Result { + let ids: Vec<_> = ids + .into_iter() + .map(|id| self.parse_id(id)) + .collect::>()?; + IdList::try_from_iter(self, ids.into_iter()).map_err(|e| anyhow::anyhow!("error: {}", e)) + } + + /// Parse the given `id` into an `Id` and construct a key for an onchain + /// entity from it + pub fn parse_key(&self, id: impl Into) -> Result { + let id_value = self.parse_id(id)?; + Ok(self.key(id_value)) + } + + /// Parse the given `id` into an `Id` and construct a key for an entity + /// in the give causality region from it + pub fn parse_key_in( + &self, + id: impl Into, + causality_region: CausalityRegion, + ) -> Result { + let id_value = self.parse_id(id.into())?; + Ok(self.key_in(id_value, causality_region)) } fn same_pool(&self, other: &EntityType) -> bool { diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index e99d7f0f0ad..c4ffe1b3fda 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -223,34 +223,7 @@ impl InputSchema { /// /// When asked to load the related entities from "Account" in the field "wallets" /// This function will return the type "Wallet" with the field "account" - pub fn get_field_related( - &self, - key: &LoadRelatedRequest, - ) -> Result<(&str, &s::Field, bool), Error> { - let id_field = self - .inner - .schema - .document - .get_object_type_definition(key.entity_type.as_str()) - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown entity type `{}`", - key.entity_type, - key.entity_id, - key.entity_type, - ) - })? - .field("id") - .ok_or_else(|| { - anyhow!( - "Entity {}[{}]: unknown field `{}`", - key.entity_type, - key.entity_id, - key.entity_field, - ) - })?; - - let id_is_bytes = id_field.field_type.get_base_type() == "Bytes"; + pub fn get_field_related(&self, key: &LoadRelatedRequest) -> Result<(&str, &s::Field), Error> { let field = self .inner .schema @@ -301,7 +274,7 @@ impl InputSchema { ) })?; - Ok((base_type, field, id_is_bytes)) + Ok((base_type, field)) } else { Err(anyhow!( "Entity {}[{}]: field `{}` is not derived", diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index f404a180a4a..9a919e773a0 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -1,15 +1,17 @@ //! Run a GraphQL query and fetch all the entitied needed to build the //! final result -use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; +use graph::data::store::Id; +use graph::data::store::IdList; +use graph::data::store::IdType; use graph::data::store::QueryObject; use graph::data::value::{Object, Word}; use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; use graph::util::cache_weight; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::rc::Rc; use std::time::Instant; @@ -19,8 +21,8 @@ use graph::{ data::graphql::ext::DirectiveFinder, prelude::{ s, AttributeNames, ChildMultiplicity, EntityCollection, EntityFilter, EntityLink, - EntityOrder, EntityWindow, ParentLink, QueryExecutionError, StoreError, - Value as StoreValue, WindowAttribute, ENV_VARS, + EntityOrder, EntityWindow, ParentLink, QueryExecutionError, Value as StoreValue, + WindowAttribute, ENV_VARS, }, }; @@ -41,7 +43,7 @@ struct Node { /// the keys and values of the `children` map, but not of the map itself children_weight: usize, - parent: Option, + parent: Option, entity: Object, /// We are using an `Rc` here for two reasons: it allows us to defer @@ -163,6 +165,7 @@ impl From for r::Value { trait ValueExt { fn as_str(&self) -> Option<&str>; + fn as_id(&self, id_type: IdType) -> Option; } impl ValueExt for r::Value { @@ -172,14 +175,25 @@ impl ValueExt for r::Value { _ => None, } } + + fn as_id(&self, id_type: IdType) -> Option { + match self { + r::Value::String(s) => id_type.parse(Word::from(s.as_str())).ok(), + _ => None, + } + } } impl Node { - fn id(&self) -> Result { + fn id(&self, schema: &InputSchema) -> Result { + let entity_type = schema.entity_type(self.typename())?; match self.get("id") { - None => Err(anyhow!("Entity is missing an `id` attribute")), - Some(r::Value::String(s)) => Ok(s.clone()), - _ => Err(anyhow!("Entity has non-string `id` attribute")), + None => Err(QueryExecutionError::IdMissing), + Some(r::Value::String(s)) => { + let id = entity_type.parse_id(s.as_str())?; + Ok(id) + } + _ => Err(QueryExecutionError::IdNotString), } } @@ -282,38 +296,45 @@ impl<'a> JoinCond<'a> { fn entity_link( &self, - parents_by_id: Vec<(String, &Node)>, + parents_by_id: Vec<(Id, &Node)>, multiplicity: ChildMultiplicity, - ) -> (Vec, EntityLink) { + ) -> Result<(IdList, EntityLink), QueryExecutionError> { match &self.relation { JoinRelation::Direct(field) => { // we only need the parent ids - let ids = parents_by_id.into_iter().map(|(id, _)| id).collect(); - ( + let ids = IdList::try_from_iter( + &self.parent_type, + parents_by_id.into_iter().map(|(id, _)| id), + )?; + Ok(( ids, EntityLink::Direct(field.window_attribute(), multiplicity), - ) + )) } JoinRelation::Derived(field) => { let (ids, parent_link) = match field { JoinField::Scalar(child_field) => { // child_field contains a String id of the child; extract // those and the parent ids + let id_type = self.child_type.id_type().unwrap(); let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() .filter_map(|(id, node)| { node.get(child_field) - .and_then(|value| value.as_str()) + .and_then(|value| value.as_id(id_type)) .map(|child_id| (id, child_id.to_owned())) }) .unzip(); - + let ids = IdList::try_from_iter(&self.parent_type, ids.into_iter())?; + let child_ids = + IdList::try_from_iter(&self.child_type, child_ids.into_iter())?; (ids, ParentLink::Scalar(child_ids)) } JoinField::List(child_field) => { // child_field stores a list of child ids; extract them, // turn them into a list of strings and combine with the // parent ids + let id_type = self.child_type.id_type().unwrap(); let (ids, child_ids): (Vec<_>, Vec<_>) = parents_by_id .into_iter() .filter_map(|(id, node)| { @@ -322,9 +343,7 @@ impl<'a> JoinCond<'a> { r::Value::List(values) => { let values: Vec<_> = values .iter() - .filter_map(|value| { - value.as_str().map(|value| value.to_owned()) - }) + .filter_map(|value| value.as_id(id_type)) .collect(); if values.is_empty() { None @@ -337,13 +356,18 @@ impl<'a> JoinCond<'a> { .map(|child_ids| (id, child_ids)) }) .unzip(); + let ids = IdList::try_from_iter(&self.parent_type, ids.into_iter())?; + let child_ids = child_ids + .into_iter() + .map(|ids| IdList::try_from_iter(&self.child_type, ids.into_iter())) + .collect::, _>>()?; (ids, ParentLink::List(child_ids)) } }; - ( + Ok(( ids, EntityLink::Parent(self.parent_type.clone(), parent_link), - ) + )) } } } @@ -380,24 +404,25 @@ impl<'a> Join<'a> { fn windows( &self, + schema: &InputSchema, parents: &[&mut Node], multiplicity: ChildMultiplicity, previous_collection: &EntityCollection, - ) -> Vec { + ) -> Result, QueryExecutionError> { let mut windows = vec![]; let column_names_map = previous_collection.entity_types_and_column_names(); for cond in &self.conds { let mut parents_by_id = parents .iter() .filter(|parent| parent.typename() == cond.parent_type.as_str()) - .filter_map(|parent| parent.id().ok().map(|id| (id, &**parent))) + .filter_map(|parent| parent.id(schema).ok().map(|id| (id, &**parent))) .collect::>(); if !parents_by_id.is_empty() { parents_by_id.sort_unstable_by(|(id1, _), (id2, _)| id1.cmp(id2)); parents_by_id.dedup_by(|(id1, _), (id2, _)| id1 == id2); - let (ids, link) = cond.entity_link(parents_by_id, multiplicity); + let (ids, link) = cond.entity_link(parents_by_id, multiplicity)?; let child_type: EntityType = cond.child_type.clone(); let column_names = match column_names_map.get(&child_type) { Some(column_names) => column_names.clone(), @@ -411,7 +436,7 @@ impl<'a> Join<'a> { }); } } - windows + Ok(windows) } } @@ -450,6 +475,7 @@ impl<'a> MaybeJoin<'a> { /// If `parents` only has one entry, add all children to that one parent. In /// particular, this is what happens for toplevel queries. fn add_children( + schema: &InputSchema, parents: &mut [&mut Node], children: Vec, response_key: &str, @@ -466,19 +492,19 @@ fn add_children( // children to their parent. This relies on the fact that interfaces // make sure that id's are distinct across all implementations of the // interface. - let mut grouped: BTreeMap<&str, Vec>> = BTreeMap::default(); + let mut grouped: HashMap<&Id, Vec>> = HashMap::default(); for child in children.iter() { let parent = child.parent.as_ref().ok_or_else(|| { QueryExecutionError::Panic(format!( "child {}[{}] is missing a parent id", child.typename(), - child.id().unwrap_or_else(|_| "".to_owned()) + child + .id(schema) + .map(|id| id.to_string()) + .unwrap_or_else(|_| "".to_owned()) )) })?; - match parent { - r::Value::String(key) => grouped.entry(key).or_default().push(child.clone()), - _ => unreachable!("the parent_id returned by the query is always a string"), - } + grouped.entry(parent).or_default().push(child.clone()); } // Add appropriate children using grouped map @@ -490,7 +516,10 @@ fn add_children( // interface level and in nested object type conditions. The values for the interface // query are always joined first, and may then be overwritten by the merged selection // set under the object type condition. See also: e0d6da3e-60cf-41a5-b83c-b60a7a766d4a - let values = parent.id().ok().and_then(|id| grouped.get(&*id).cloned()); + let values = parent + .id(schema) + .ok() + .and_then(|id| grouped.get(&id).cloned()); parent.set_children(response_key.to_owned(), values.unwrap_or_default()); } @@ -649,7 +678,12 @@ fn execute_selection_set<'a>( &field.selection_set, ) { Ok((children, trace)) => { - add_children(&mut parents, children, field.response_key())?; + add_children( + &input_schema, + &mut parents, + children, + field.response_key(), + )?; let weight = parents.iter().map(|parent| parent.weight()).sum::(); check_result_size(ctx, weight)?; @@ -723,7 +757,7 @@ fn fetch( selected_attrs, &super::query::SchemaPair { api: ctx.query.schema.clone(), - input: input_schema, + input: input_schema.cheap_clone(), }, )?; query.trace = ctx.trace; @@ -746,7 +780,7 @@ fn fetch( if let MaybeJoin::Nested(join) = join { // For anything but the root node, restrict the children we select // by the parent list - let windows = join.windows(parents, multiplicity, &query.collection); + let windows = join.windows(&input_schema, parents, multiplicity, &query.collection)?; if windows.is_empty() { return Ok((vec![], Trace::None)); } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 360a694aa07..984a298fefc 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -439,7 +439,7 @@ fn make_thing(id: &str, value: &str) -> (String, EntityModification) { static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA }; - let key = THING_TYPE.key(id); + let key = THING_TYPE.parse_key(id).unwrap(); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), EntityModification::insert(key, data, 0), @@ -1341,7 +1341,7 @@ async fn test_store_set_id() { let entity = host.store_get(USER, UID).unwrap().unwrap(); assert_eq!( "u1", - entity.id().as_str(), + entity.id().to_string(), "store.set sets id automatically" ); @@ -1349,7 +1349,10 @@ async fn test_store_set_id() { let err = host .store_setv(USER, "0xbeef", vec![("id", beef)]) .expect_err("setting with Bytes id fails"); - err_says(err, "must have type ID! but has type Bytes"); + err_says( + err, + "Attribute `User.id` has wrong type: expected String but got Bytes", + ); host.store_setv(USER, UID, vec![("id", Value::Int(32))]) .expect_err("id must be a string"); @@ -1362,7 +1365,10 @@ async fn test_store_set_id() { let err = host .store_set(BINARY, BID, vec![("id", BID), ("name", "user1")]) .expect_err("setting with string id in values fails"); - err_says(err, "must have type Bytes! but has type String"); + err_says( + err, + "Attribute `Binary.id` has wrong type: expected Bytes but got String", + ); host.store_setv( BINARY, @@ -1381,12 +1387,19 @@ async fn test_store_set_id() { .expect("setting with no id works"); let entity = host.store_get(BINARY, BID).unwrap().unwrap(); - assert_eq!(BID, entity.id().as_str(), "store.set sets id automatically"); + assert_eq!( + BID, + entity.id().to_string(), + "store.set sets id automatically" + ); let err = host .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) .expect_err("id must be Bytes"); - err_says(err, "Unsupported type for `id` attribute"); + err_says( + err, + "Attribute `Binary.id` has wrong type: expected Bytes but got Int", + ); } /// Test setting fields that are not defined in the schema diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index c1c4aea0cc2..7edef87be78 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -6,7 +6,7 @@ use std::time::{Duration, Instant}; use graph::data::value::Word; -use graph::schema::{EntityKey, EntityType}; +use graph::schema::EntityType; use never::Never; use semver::Version; use wasmtime::Trap; @@ -17,7 +17,7 @@ use graph::components::store::{EnsLookup, GetScope, LoadRelatedRequest}; use graph::components::subgraph::{ PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, }; -use graph::data::store; +use graph::data::store::{self}; use graph::data_source::{CausalityRegion, DataSource, DataSourceTemplate, EntityTypeAccess}; use graph::ensure; use graph::prelude::ethabi::param_type::Reader; @@ -162,22 +162,8 @@ impl HostExports { stopwatch: &StopwatchMetrics, gas: &GasCounter, ) -> Result<(), HostExportError> { - fn check_id(key: &EntityKey, prev_id: &str) -> Result<(), anyhow::Error> { - if prev_id != key.entity_id.as_str() { - Err(anyhow!( - "Value of {} attribute 'id' conflicts with ID passed to `store.set()`: \ - {} != {}", - key.entity_type, - prev_id, - key.entity_id, - )) - } else { - Ok(()) - } - } - let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let key = entity_type.key_in(entity_id, self.data_source_causality_region); + let key = entity_type.parse_key_in(entity_id, self.data_source_causality_region)?; self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; @@ -185,13 +171,29 @@ impl HostExports { // Set the id if there isn't one yet, and make sure that a // previously set id agrees with the one in the `key` match data.get(&store::ID) { - Some(Value::String(s)) => check_id(&key, s)?, - Some(Value::Bytes(b)) => check_id(&key, &b.to_string())?, - Some(_) => { - // The validation will catch the type mismatch + Some(v) => { + if v != &key.entity_id { + if v.type_name() != key.entity_id.id_type().as_str() { + return Err(anyhow!( + "Attribute `{}.id` has wrong type: expected {} but got {}", + key.entity_type, + key.entity_id.id_type().as_str(), + v.type_name(), + ) + .into()); + } + return Err(anyhow!( + "Value of {} attribute 'id' conflicts with ID passed to `store.set()`: \ + {:?} != {:?}", + key.entity_type, + v, + key.entity_id, + ) + .into()); + } } None => { - let value = key.entity_type.id_value(key.entity_id.clone())?; + let value = Value::from(key.entity_id.clone()); data.insert(store::ID.clone(), value); } } @@ -206,7 +208,7 @@ impl HostExports { proof_of_indexing, &ProofOfIndexingEvent::SetEntity { entity_type: &key.entity_type.as_str(), - id: &key.entity_id.as_str(), + id: &key.entity_id.to_string(), data: &entity, }, &self.poi_causality_region, @@ -238,7 +240,7 @@ impl HostExports { logger, ); let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let key = entity_type.key_in(entity_id, self.data_source_causality_region); + let key = entity_type.parse_key_in(entity_id, self.data_source_causality_region)?; self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_REMOVE.with_args(complexity::Size, &key))?; @@ -257,7 +259,7 @@ impl HostExports { scope: GetScope, ) -> Result>, anyhow::Error> { let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; - let store_key = entity_type.key_in(entity_id, self.data_source_causality_region); + let store_key = entity_type.parse_key_in(entity_id, self.data_source_causality_region)?; self.check_entity_type_access(&store_key.entity_type)?; let result = state.entity_cache.get(&store_key, scope)?; @@ -279,9 +281,10 @@ impl HostExports { gas: &GasCounter, ) -> Result, anyhow::Error> { let entity_type = state.entity_cache.schema.entity_type(&entity_type)?; + let key = entity_type.parse_key_in(entity_id, self.data_source_causality_region)?; let store_key = LoadRelatedRequest { - entity_type, - entity_id: entity_id.into(), + entity_type: key.entity_type, + entity_id: key.entity_id, entity_field: entity_field.into(), causality_region: self.data_source_causality_region, }; diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 3eaea81ba34..6f2b2d4e365 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::convert::TryInto; use graph::data::query::Trace; +use graph::data::store::Id; use graph::schema::EntityType; use web3::types::Address; @@ -11,7 +12,7 @@ use graph::components::store::{BlockPtrForNumber, BlockStore, Store}; use graph::components::versions::VERSIONS; use graph::data::graphql::{object, IntoValue, ObjectOrInterface, ValueMap}; use graph::data::subgraph::status; -use graph::data::value::{Object, Word}; +use graph::data::value::Object; use graph::prelude::*; use graph_graphql::prelude::{a, ExecutionContext, Resolver}; @@ -589,7 +590,7 @@ fn entity_changes_to_graphql(entity_changes: Vec) -> r::Value { // First, we isolate updates and deletions with the same entity type. let mut updates: BTreeMap> = BTreeMap::new(); - let mut deletions: BTreeMap> = BTreeMap::new(); + let mut deletions: BTreeMap> = BTreeMap::new(); for change in entity_changes { match change { diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index e0d9d485668..8377375138c 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -12,8 +12,8 @@ use graph::components::store::{ }; use graph::components::versions::VERSIONS; use graph::data::query::Trace; +use graph::data::store::{Id, IdList}; use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; -use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::futures03::FutureExt; use graph::prelude::{ @@ -279,7 +279,7 @@ impl DeploymentStore { conn: &PgConnection, layout: &Layout, entity_type: &EntityType, - entity_id: &Word, + entity_id: &Id, ) -> Result<(), StoreError> { // Collect all types that share an interface implementation with this // entity type, and make sure there are no conflicting IDs. @@ -339,12 +339,7 @@ impl DeploymentStore { let section = stopwatch.start_section("check_interface_entity_uniqueness"); for row in group.writes().filter(|emod| emod.creates_entity()) { // WARNING: This will potentially execute 2 queries for each entity key. - self.check_interface_entity_uniqueness( - conn, - layout, - &group.entity_type, - &row.id(), - )?; + self.check_interface_entity_uniqueness(conn, layout, &group.entity_type, row.id())?; } section.end(); @@ -1064,7 +1059,7 @@ impl DeploymentStore { pub(crate) fn get_many( &self, site: Arc, - ids_for_type: &BTreeMap<(EntityType, CausalityRegion), Vec>, + ids_for_type: &BTreeMap<(EntityType, CausalityRegion), IdList>, block: BlockNumber, ) -> Result, StoreError> { if ids_for_type.is_empty() { diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 3d7a36f8c0e..a80e94cedb5 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -30,7 +30,7 @@ use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; use graph::data::value::Word; use graph::data_source::CausalityRegion; -use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; +use graph::prelude::{q, s, serde_json, EntityQuery, StopwatchMetrics, ENV_VARS}; use graph::schema::{ EntityKey, EntityType, FulltextConfig, FulltextDefinition, InputSchema, SCHEMA_TYPE_NAME, }; @@ -56,7 +56,7 @@ use crate::{ }; use graph::components::store::DerivedEntityQuery; use graph::data::graphql::ext::{DirectiveFinder, ObjectTypeExt}; -use graph::data::store::BYTES_SCALAR; +use graph::data::store::{Id, IdList, BYTES_SCALAR}; use graph::data::subgraph::schema::POI_TABLE; use graph::prelude::{ anyhow, info, BlockNumber, DeploymentHash, Entity, EntityChange, EntityOperation, Logger, @@ -197,10 +197,22 @@ pub(crate) enum IdType { } impl IdType { - pub fn sql_type(&self) -> &str { - match self { - IdType::String => "text", - IdType::Bytes => "bytea", + pub fn parse_id(self, json: serde_json::Value) -> Result { + const HEX_PREFIX: &str = "\\x"; + let id_type = graph::data::store::IdType::from(self); + if let serde_json::Value::String(s) = json { + let s = if s.starts_with(HEX_PREFIX) { + Word::from(s.trim_start_matches(HEX_PREFIX)) + } else { + Word::from(s) + }; + id_type.parse(s).map_err(StoreError::from) + } else { + Err(graph::constraint_violation!( + "the value {:?} can not be converted into an id of type {}", + json, + self + )) } } } @@ -234,6 +246,24 @@ impl TryFrom<&s::Type> for IdType { } } +impl From for graph::data::store::IdType { + fn from(id_type: IdType) -> Self { + match id_type { + IdType::String => graph::data::store::IdType::String, + IdType::Bytes => graph::data::store::IdType::Bytes, + } + } +} + +impl std::fmt::Display for IdType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IdType::String => write!(f, "String"), + IdType::Bytes => write!(f, "Bytes"), + } + } +} + type IdTypeMap = HashMap; type EnumMap = BTreeMap>>; @@ -540,7 +570,7 @@ impl Layout { pub fn find_many( &self, conn: &PgConnection, - ids_for_type: &BTreeMap<(EntityType, CausalityRegion), Vec>, + ids_for_type: &BTreeMap<(EntityType, CausalityRegion), IdList>, block: BlockNumber, ) -> Result, StoreError> { if ids_for_type.is_empty() { @@ -551,12 +581,7 @@ impl Layout { for (entity_type, cr) in ids_for_type.keys() { tables.push((self.table_for_entity(entity_type)?.as_ref(), *cr)); } - let query = FindManyQuery { - _namespace: &self.catalog.site.namespace, - ids_for_type, - tables, - block, - }; + let query = FindManyQuery::new(tables, ids_for_type, block); let mut entities: BTreeMap = BTreeMap::new(); for data in query.load::(conn)? { let entity_type = data.entity_type(&self.input_schema); @@ -637,10 +662,10 @@ impl Layout { for del in &deletions { let entity_type = del.entity_type(&self.input_schema); - let entity_id = Word::from(del.id()); // See the doc comment of `FindPossibleDeletionsQuery` for details // about why this check is necessary. + let entity_id = entity_type.parse_id(del.id())?; if !processed_entities.contains(&(entity_type.clone(), entity_id.clone())) { changes.push(EntityOperation::Remove { key: entity_type.key_in(entity_id, del.causality_region()), @@ -675,7 +700,7 @@ impl Layout { pub fn conflicting_entity( &self, conn: &PgConnection, - entity_id: &str, + entity_id: &Id, entities: Vec, ) -> Result, StoreError> { Ok(ConflictingEntityQuery::new(self, entities, entity_id)? @@ -805,7 +830,11 @@ impl Layout { ) -> Result { let table = self.table_for_entity(&group.entity_type)?; if table.immutable && group.has_clamps() { - let ids = group.ids().collect::>().join(", "); + let ids = group + .ids() + .map(|id| id.to_string()) + .collect::>() + .join(", "); return Err(constraint_violation!( "entities of type `{}` can not be updated since they are immutable. Entity ids are [{}]", group.entity_type, @@ -815,8 +844,12 @@ impl Layout { let section = stopwatch.start_section("update_modification_clamp_range_query"); for (block, rows) in group.clamps_by_block() { - let entity_keys: Vec<&str> = rows.iter().map(|row| row.id().as_str()).collect(); - + let entity_keys: Vec<_> = rows.iter().map(|row| row.id()).collect(); + // FIXME: we clone all the ids here + let entity_keys = IdList::try_from_iter( + &group.entity_type, + entity_keys.into_iter().map(|id| id.to_owned()), + )?; ClampRangeQuery::new(table, &entity_keys, block)?.execute(conn)?; } section.end(); @@ -856,9 +889,14 @@ impl Layout { let _section = stopwatch.start_section("delete_modification_clamp_range_query"); let mut count = 0; for (block, rows) in group.clamps_by_block() { - let ids: Vec<_> = rows.iter().map(|eref| eref.id().as_str()).collect(); + let ids: Vec<_> = rows.iter().map(|eref| eref.id()).collect(); for chunk in ids.chunks(DELETE_OPERATION_CHUNK_SIZE) { - count += ClampRangeQuery::new(table, chunk, block)?.execute(conn)? + // FIXME: we clone all the ids here + let chunk = IdList::try_from_iter( + &group.entity_type, + chunk.into_iter().map(|id| (*id).to_owned()), + )?; + count += ClampRangeQuery::new(table, &chunk, block)?.execute(conn)? } } Ok(count) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 96175db9699..49cae1e2541 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -14,8 +14,8 @@ use diesel::Connection; use graph::components::store::write::WriteChunk; use graph::components::store::DerivedEntityQuery; -use graph::data::store::QueryObject; -use graph::data::store::NULL; +use graph::data::store::{Id, NULL}; +use graph::data::store::{IdList, IdRef, QueryObject}; use graph::data::value::{Object, Word}; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -102,41 +102,12 @@ macro_rules! constraint_violation { }} } -fn str_as_bytes(id: &str) -> QueryResult { - scalar::Bytes::from_str(id).map_err(|e| DieselError::SerializationError(Box::new(e))) -} - /// Convert Postgres string representation of bytes "\xdeadbeef" /// to ours of just "deadbeef". fn bytes_as_str(id: &str) -> String { id.trim_start_matches("\\x").to_owned() } -impl IdType { - /// Add `ids` as a bind variable to `out`, using the right SQL type - fn bind_ids(&self, ids: &[S], out: &mut AstPass) -> QueryResult<()> - where - S: AsRef + diesel::serialize::ToSql, - { - match self { - IdType::String => out.push_bind_param::, _>(&ids)?, - IdType::Bytes => { - let ids = ids - .iter() - .map(|id| str_as_bytes(id.as_ref())) - .collect::, _>>()?; - let id_slices = ids.iter().map(|id| id.as_slice()).collect::>(); - out.push_bind_param::, _>(&id_slices)?; - } - } - // Generate '::text[]' or '::bytea[]' - out.push_sql("::"); - out.push_sql(self.sql_type()); - out.push_sql("[]"); - Ok(()) - } -} - /// Conveniences for handling foreign keys depending on whether we are using /// `IdType::Bytes` or `IdType::String` as the primary key /// @@ -151,48 +122,61 @@ trait ForeignKeyClauses { /// The name of the column fn name(&self) -> &str; - /// Add `id` as a bind variable to `out`, using the right SQL type - fn bind_id(&self, id: &str, out: &mut AstPass) -> QueryResult<()> { - match self.column_type().id_type()? { - IdType::String => out.push_bind_param::(&id)?, - IdType::Bytes => out.push_bind_param::(&str_as_bytes(id)?.as_slice())?, - } - // Generate '::text' or '::bytea' - out.push_sql("::"); - out.push_sql(self.column_type().sql_type()); - Ok(()) - } - - /// Add `ids` as a bind variable to `out`, using the right SQL type - fn bind_ids(&self, ids: &[S], out: &mut AstPass) -> QueryResult<()> - where - S: AsRef + diesel::serialize::ToSql, - { - self.column_type().id_type()?.bind_ids(ids, out) - } - /// Generate a clause `{name()} = $id` using the right types to bind `$id` /// into `out` - fn eq(&self, id: &str, out: &mut AstPass) -> QueryResult<()> { + fn eq(&self, id: &Id, out: &mut AstPass) -> QueryResult<()> { out.push_sql(self.name()); out.push_sql(" = "); - self.bind_id(id, out) + id.push_bind_param(out) } /// Generate a clause /// `exists (select 1 from unnest($ids) as p(g$id) where id = p.g$id)` /// using the right types to bind `$ids` into `out` - fn is_in(&self, ids: &[S], out: &mut AstPass) -> QueryResult<()> - where - S: AsRef + diesel::serialize::ToSql, - { + fn is_in(&self, ids: &IdList, out: &mut AstPass) -> QueryResult<()> { out.push_sql("exists (select 1 from unnest("); - self.bind_ids(ids, out)?; + ids.push_bind_param(out)?; out.push_sql(") as p(g$id) where id = p.g$id)"); Ok(()) } } +/// This trait is here to deal with the fact that we can't implement `ToSql` +/// for `Id` and similar types since `ToSql` can only be implemented when +/// the SQL type of the bind parameter is known at compile time. For `Id`, +/// we have to switch between `Text` and `Binary` and therefore use this +/// trait to make passing `Id` values to the database convenient +trait PushBindParam { + fn push_bind_param(&self, out: &mut AstPass) -> QueryResult<()>; +} + +impl PushBindParam for Id { + fn push_bind_param(&self, out: &mut AstPass) -> QueryResult<()> { + match self { + Id::String(s) => out.push_bind_param::(s), + Id::Bytes(b) => out.push_bind_param::(&b.as_slice()), + } + } +} + +impl PushBindParam for IdList { + fn push_bind_param(&self, out: &mut AstPass) -> QueryResult<()> { + match self { + IdList::String(ids) => out.push_bind_param::, _>(ids), + IdList::Bytes(ids) => out.push_bind_param::, _>(ids), + } + } +} + +impl<'a> PushBindParam for IdRef<'a> { + fn push_bind_param(&self, out: &mut AstPass) -> QueryResult<()> { + match self { + IdRef::String(s) => out.push_bind_param::(s), + IdRef::Bytes(b) => out.push_bind_param::(b), + } + } +} + impl ForeignKeyClauses for Column { fn column_type(&self) -> &ColumnType { &self.column_type @@ -211,7 +195,7 @@ pub trait FromEntityData: Sized { fn from_data>>( schema: &InputSchema, - parent_id: Option, + parent_id: Option, iter: I, ) -> Result; } @@ -223,7 +207,7 @@ impl FromEntityData for Entity { fn from_data>>( schema: &InputSchema, - parent_id: Option, + parent_id: Option, iter: I, ) -> Result { debug_assert_eq!(None, parent_id); @@ -238,7 +222,7 @@ impl FromEntityData for QueryObject { fn from_data>>( _schema: &InputSchema, - parent: Option, + parent: Option, iter: I, ) -> Result { let entity = as FromIterator< @@ -501,9 +485,12 @@ impl EntityData { "query unexpectedly produces parent ids" ))) } - Some(parent_type) => { - Some(T::Value::from_column_value(parent_type, json)) - } + Some(parent_type) => Some( + parent_type + .id_type() + .map_err(StoreError::from) + .and_then(|id_type| id_type.parse_id(json)), + ), } } else { None @@ -1621,11 +1608,10 @@ impl<'a, Conn> RunQueryDsl for FindPossibleDeletionsQuery<'a> {} #[derive(Debug, Clone, Constructor)] pub struct FindManyQuery<'a> { - pub(crate) _namespace: &'a Namespace, pub(crate) tables: Vec<(&'a Table, CausalityRegion)>, // Maps object name to ids. - pub(crate) ids_for_type: &'a BTreeMap<(EntityType, CausalityRegion), Vec>, + pub(crate) ids_for_type: &'a BTreeMap<(EntityType, CausalityRegion), IdList>, pub(crate) block: BlockNumber, } @@ -1699,7 +1685,6 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { entity_field, value: entity_id, causality_region, - id_is_bytes, } = self.derived_query; // Generate @@ -1721,27 +1706,13 @@ impl<'a> QueryFragment for FindDerivedQuery<'a> { out.push_sql(", "); } - if *id_is_bytes { - out.push_sql("decode("); - out.push_bind_param::( - &value.entity_id.as_str().strip_prefix("0x").unwrap(), - )?; - out.push_sql(", 'hex')"); - } else { - out.push_bind_param::(&value.entity_id.as_str())?; - } + value.entity_id.push_bind_param(&mut out)?; } out.push_sql(") and "); } out.push_identifier(entity_field.to_snake_case().as_str())?; out.push_sql(" = "); - if *id_is_bytes { - out.push_sql("decode("); - out.push_bind_param::(&entity_id.as_str().strip_prefix("0x").unwrap())?; - out.push_sql(", 'hex')"); - } else { - out.push_bind_param::(&entity_id.as_str())?; - } + entity_id.push_bind_param(&mut out)?; out.push_sql(" and "); if self.table.has_causality_region { out.push_sql("causality_region = "); @@ -1767,11 +1738,11 @@ impl<'a> LoadQuery for FindDerivedQuery<'a> { impl<'a, Conn> RunQueryDsl for FindDerivedQuery<'a> {} #[derive(Debug)] -struct FulltextValues<'a>(HashMap<&'a Word, Vec<(&'a str, Value)>>); +struct FulltextValues<'a>(HashMap<&'a Id, Vec<(&'a str, Value)>>); impl<'a> FulltextValues<'a> { fn new(table: &'a Table, rows: &'a WriteChunk<'a>) -> Self { - let mut map: HashMap<&Word, Vec<(&str, Value)>> = HashMap::new(); + let mut map: HashMap<&Id, Vec<(&str, Value)>> = HashMap::new(); for column in table.columns.iter().filter(|column| column.is_fulltext()) { for row in rows { if let Some(fields) = column.fulltext_fields.as_ref() { @@ -1791,7 +1762,7 @@ impl<'a> FulltextValues<'a> { Self(map) } - fn get(&self, entity_id: &Word, field: &str) -> &Value { + fn get(&self, entity_id: &Id, field: &str) -> &Value { self.0 .get(entity_id) .and_then(|values| { @@ -1974,13 +1945,13 @@ impl<'a, Conn> RunQueryDsl for InsertQuery<'a> {} pub struct ConflictingEntityQuery<'a> { _layout: &'a Layout, tables: Vec<&'a Table>, - entity_id: &'a str, + entity_id: &'a Id, } impl<'a> ConflictingEntityQuery<'a> { pub fn new( layout: &'a Layout, entities: Vec, - entity_id: &'a str, + entity_id: &'a Id, ) -> Result { let tables = entities .iter() @@ -2013,7 +1984,7 @@ impl<'a> QueryFragment for ConflictingEntityQuery<'a> { out.push_sql(" as entity from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" where id = "); - table.primary_key().bind_id(self.entity_id, &mut out)?; + self.entity_id.push_bind_param(&mut out)?; } Ok(()) } @@ -2041,16 +2012,17 @@ impl<'a, Conn> RunQueryDsl for ConflictingEntityQuery<'a> {} #[derive(Debug, Clone)] enum ParentIds { - List(Vec>), - Scalar(Vec), + List(Vec), + Scalar(IdList), } impl ParentIds { - fn new(link: ParentLink) -> Self { - match link { + fn new(link: ParentLink) -> Result { + let link = match link { ParentLink::Scalar(child_ids) => ParentIds::Scalar(child_ids), ParentLink::List(child_ids) => ParentIds::List(child_ids), - } + }; + Ok(link) } } @@ -2076,7 +2048,10 @@ impl<'a> TableLink<'a> { } EntityLink::Parent(parent_type, parent_link) => { let parent_table = layout.table_for_entity(&parent_type)?; - Ok(TableLink::Parent(parent_table, ParentIds::new(parent_link))) + Ok(TableLink::Parent( + parent_table, + ParentIds::new(parent_link)?, + )) } } } @@ -2141,7 +2116,7 @@ pub struct FilterWindow<'a> { /// we always compare these ids with a column in `table`, and that /// column must have the same type as the primary key of the parent /// table, we can deduce the correct `IdType` that way - ids: Vec, + ids: IdList, /// How to filter by a set of parents link: TableLink<'a>, column_names: AttributeNames, @@ -2218,7 +2193,7 @@ impl<'a> FilterWindow<'a> { // order by c.{sort_key} out.push_sql("\n/* children_type_a */ from unnest("); - column.bind_ids(&self.ids, out)?; + self.ids.push_bind_param(out)?; out.push_sql(") as p(id) cross join lateral (select "); write_column_names(&self.column_names, self.table, None, out)?; out.push_sql(" from "); @@ -2256,7 +2231,7 @@ impl<'a> FilterWindow<'a> { // TYPEA_BATCH_SIZE children and helps Postgres to narrow down the // rows it needs to pick from `children` to join with `p(id)` out.push_sql("\n/* child_type_a */ from unnest("); - column.bind_ids(&self.ids, out)?; + self.ids.push_bind_param(out)?; out.push_sql(") as p(id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2269,7 +2244,7 @@ impl<'a> FilterWindow<'a> { out.push_sql(" and c."); out.push_identifier(column.name.as_str())?; out.push_sql(" && "); - column.bind_ids(&self.ids, out)?; + self.ids.push_bind_param(out)?; } self.and_filter(out.reborrow())?; limit.single_limit(self.ids.len(), out); @@ -2297,7 +2272,7 @@ impl<'a> FilterWindow<'a> { // order by c.{sort_key} out.push_sql("\n/* children_type_b */ from unnest("); - column.bind_ids(&self.ids, out)?; + self.ids.push_bind_param(out)?; out.push_sql(") as p(id) cross join lateral (select "); write_column_names(&self.column_names, self.table, None, out)?; out.push_sql(" from "); @@ -2329,7 +2304,7 @@ impl<'a> FilterWindow<'a> { // limit {parent_ids.len} + 1 out.push_sql("\n/* child_type_b */ from unnest("); - column.bind_ids(&self.ids, out)?; + self.ids.push_bind_param(out)?; out.push_sql(") as p(id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2344,8 +2319,7 @@ impl<'a> FilterWindow<'a> { fn children_type_c( &self, - parent_primary_key: &Column, - child_ids: &[Vec], + child_ids: &[IdList], limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass, @@ -2371,16 +2345,16 @@ impl<'a> FilterWindow<'a> { out.push_sql("from (values "); for i in 0..self.ids.len() { - let parent_id = &self.ids[i]; + let parent_id = self.ids.index(i); let child_ids = &child_ids[i]; if i > 0 { out.push_sql(", ("); } else { out.push_sql("("); } - parent_primary_key.bind_id(parent_id, out)?; + parent_id.push_bind_param(out)?; out.push_sql(","); - self.table.primary_key().bind_ids(child_ids, out)?; + child_ids.push_bind_param(out)?; out.push_sql(")"); } out.push_sql(") as p(id, child_ids)"); @@ -2413,7 +2387,7 @@ impl<'a> FilterWindow<'a> { fn child_type_d( &self, - child_ids: &[String], + child_ids: &IdList, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass, @@ -2425,9 +2399,9 @@ impl<'a> FilterWindow<'a> { // and .. other conditions on c .. out.push_sql("\n/* child_type_d */ from rows from (unnest("); - out.push_bind_param::, _>(&self.ids)?; + self.ids.push_bind_param(out)?; out.push_sql("), unnest("); - self.table.primary_key().bind_ids(child_ids, out)?; + child_ids.push_bind_param(out)?; out.push_sql(")) as p(id, child_id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2438,13 +2412,11 @@ impl<'a> FilterWindow<'a> { // is below the threshold set by environment variable. Set it to // 0 to turn off this optimization. if ENV_VARS.store.typed_children_set_size > 0 { - let mut child_set: Vec<&str> = child_ids.iter().map(|id| id.as_str()).collect(); - child_set.sort_unstable(); - child_set.dedup(); + let child_set = child_ids.clone().as_unique(); if child_set.len() <= ENV_VARS.store.typed_children_set_size { out.push_sql(" and c.id = any("); - self.table.primary_key().bind_ids(&child_set, out)?; + child_set.push_bind_param(out)?; out.push_sql(")"); } } @@ -2476,13 +2448,9 @@ impl<'a> FilterWindow<'a> { } } } - TableLink::Parent(parent_table, ParentIds::List(child_ids)) => self.children_type_c( - parent_table.primary_key(), - child_ids, - limit, - block, - &mut out, - ), + TableLink::Parent(_, ParentIds::List(child_ids)) => { + self.children_type_c(child_ids, limit, block, &mut out) + } TableLink::Parent(_, ParentIds::Scalar(child_ids)) => { self.child_type_d(child_ids, limit, block, &mut out) } @@ -2507,10 +2475,10 @@ impl<'a> FilterWindow<'a> { } /// Collect all the parent id's from all windows - fn collect_parents(windows: &[FilterWindow]) -> Vec { - let parent_ids: HashSet = - HashSet::from_iter(windows.iter().flat_map(|window| window.ids.iter().cloned())); - parent_ids.into_iter().collect() + fn collect_parents(windows: &'a [FilterWindow]) -> Result { + let parent_ids: HashSet> = + HashSet::from_iter(windows.iter().flat_map(|window| window.ids.iter())); + IdList::try_from_iter_ref(parent_ids.into_iter()) } } @@ -2523,7 +2491,7 @@ pub enum FilterCollection<'a> { All(Vec<(&'a Table, Option>, AttributeNames)>), /// Collection made from windows of the same or different entity types SingleWindow(FilterWindow<'a>), - MultiWindow(Vec>, Vec), + MultiWindow(Vec>, IdList), } /// String representation that is useful for debugging when `walk_ast` fails @@ -2561,6 +2529,7 @@ impl<'a> fmt::Display for FilterCollection<'a> { write!(f, "<")?; + let ids = ids.iter().map(|id| id.to_string()).collect::>(); match link { TableLink::Direct(col, Single) => { write!(f, "uniq:{}={}", col.name(), ids.join(","))? @@ -2569,11 +2538,14 @@ impl<'a> fmt::Display for FilterCollection<'a> { write!(f, "many:{}={}", col.name(), ids.join(","))? } TableLink::Parent(_, ParentIds::List(css)) => { - let css = css.iter().map(|cs| cs.join(",")).join("],["); + let css = css + .iter() + .map(|cs| cs.iter().map(|c| c.to_string()).join(",")) + .join("],["); write!(f, "uniq:id=[{}]", css)? } TableLink::Parent(_, ParentIds::Scalar(cs)) => { - write!(f, "uniq:id={}", cs.join(","))? + write!(f, "uniq:id={}", cs.iter().map(|c| c.to_string()).join(","))? } }; write!(f, " for {}>", ids.join(","))?; @@ -2645,7 +2617,7 @@ impl<'a> FilterCollection<'a> { windows.pop().expect("we just checked there is an element"), ) } else { - let parent_ids = FilterWindow::collect_parents(&windows); + let parent_ids = FilterWindow::collect_parents(&windows)?; FilterCollection::MultiWindow(windows, parent_ids) }; Ok(collection) @@ -4027,7 +3999,7 @@ impl<'a> FilterQuery<'a> { fn query_window( &self, windows: &[FilterWindow], - parent_ids: &[String], + parent_ids: &IdList, mut out: AstPass, ) -> QueryResult<()> { // Note that a CTE is an optimization fence, and since we use @@ -4059,8 +4031,7 @@ impl<'a> FilterQuery<'a> { out.push_sql("with matches as ("); out.push_sql("select c.* from "); out.push_sql("unnest("); - // windows always has at least 2 entries - windows[0].parent_type()?.bind_ids(parent_ids, &mut out)?; + parent_ids.push_bind_param(&mut out)?; out.push_sql(") as q(id)\n"); out.push_sql(" cross join lateral ("); for (i, window) in windows.iter().enumerate() { @@ -4174,16 +4145,16 @@ impl<'a, Conn> RunQueryDsl for FilterQuery<'a> {} /// Reduce the upper bound of the current entry's block range to `block` as /// long as that does not result in an empty block range #[derive(Debug)] -pub struct ClampRangeQuery<'a, S> { +pub struct ClampRangeQuery<'a> { table: &'a Table, - entity_ids: &'a [S], + entity_ids: &'a IdList, br_column: BlockRangeColumn<'a>, } -impl<'a, S> ClampRangeQuery<'a, S> { +impl<'a> ClampRangeQuery<'a> { pub fn new( table: &'a Table, - entity_ids: &'a [S], + entity_ids: &'a IdList, block: BlockNumber, ) -> Result { if table.immutable { @@ -4202,10 +4173,7 @@ impl<'a, S> ClampRangeQuery<'a, S> { } } -impl<'a, S> QueryFragment for ClampRangeQuery<'a, S> -where - S: AsRef + diesel::serialize::ToSql, -{ +impl<'a> QueryFragment for ClampRangeQuery<'a> { fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { // update table // set block_range = int4range(lower(block_range), $block) @@ -4218,7 +4186,7 @@ where self.br_column.clamp(&mut out)?; out.push_sql("\n where "); - self.table.primary_key().is_in(self.entity_ids, &mut out)?; + self.table.primary_key().is_in(&self.entity_ids, &mut out)?; out.push_sql(" and ("); self.br_column.latest(&mut out); out.push_sql(")"); @@ -4227,16 +4195,13 @@ where } } -impl<'a, S> QueryId for ClampRangeQuery<'a, S> -where - S: AsRef + diesel::serialize::ToSql, -{ +impl<'a> QueryId for ClampRangeQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } -impl<'a, S, Conn> RunQueryDsl for ClampRangeQuery<'a, S> {} +impl<'a, Conn> RunQueryDsl for ClampRangeQuery<'a> {} /// Helper struct for returning the id's touched by the RevertRemove and /// RevertExtend queries diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index eed3dbff0a9..29c0a4c1ebf 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,6 +1,5 @@ use std::collections::BTreeSet; use std::ops::Deref; -use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Mutex, RwLock, TryLockError as RwLockError}; use std::time::{Duration, Instant}; @@ -9,8 +8,7 @@ use std::{collections::BTreeMap, sync::Arc}; use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::{Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore}; use graph::constraint_violation; -use graph::data::store::scalar::Bytes; -use graph::data::store::Value; +use graph::data::store::IdList; use graph::data::subgraph::schema; use graph::data_source::CausalityRegion; use graph::prelude::{ @@ -238,12 +236,13 @@ impl SyncStore { keys: BTreeSet, block: BlockNumber, ) -> Result, StoreError> { - let mut by_type: BTreeMap<(EntityType, CausalityRegion), Vec> = BTreeMap::new(); + let mut by_type: BTreeMap<(EntityType, CausalityRegion), IdList> = BTreeMap::new(); for key in keys { + let id_type = key.entity_type.id_type()?; by_type .entry((key.entity_type, key.causality_region)) - .or_default() - .push(key.entity_id.into()); + .or_insert_with(|| IdList::new(id_type)) + .push(key.entity_id)?; } retry::forever(&self.logger, "get_many", || { @@ -1121,12 +1120,7 @@ impl Queue { fn is_related(derived_query: &DerivedEntityQuery, entity: &Entity) -> bool { entity .get(&derived_query.entity_field) - .map(|v| match v { - Value::String(s) => s.as_str() == derived_query.value.as_str(), - Value::Bytes(b) => Bytes::from_str(derived_query.value.as_str()) - .map_or(false, |bytes_value| &bytes_value == b), - _ => false, - }) + .map(|v| &derived_query.value == v) .unwrap_or(false) } diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index 53f6854e525..d4d459205d0 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -1298,6 +1298,10 @@ async fn mixed_mutability() { #[tokio::test] async fn derived_interface_bytes() { + fn b(s: &str) -> Value { + Value::Bytes(s.parse().unwrap()) + } + let subgraph_id = "DerivedInterfaceBytes"; let document = r#" type Pool { id: Bytes!, @@ -1322,9 +1326,9 @@ async fn derived_interface_bytes() { let query = "query { pools { trades { id } } }"; let entities = vec![ - ("Pool", entity! { schema => id: "0xf001" }), - ("Sell", entity! { schema => id: "0xc0", pool: "0xf001"}), - ("Buy", entity! { schema => id: "0xb0", pool: "0xf001"}), + ("Pool", entity! { schema => id: b("0xf001") }), + ("Sell", entity! { schema => id: b("0xc0"), pool: "0xf001"}), + ("Buy", entity! { schema => id: b("0xb0"), pool: "0xf001"}), ]; let res = insert_and_query(subgraph_id, document, entities, query) diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 278e4baee9f..c62a6a27bb3 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -3,6 +3,7 @@ use graph::components::store::{ DeploymentCursorTracker, DerivedEntityQuery, GetScope, LoadRelatedRequest, ReadStore, StoredDynamicDataSource, WritableStore, }; +use graph::data::store::Id; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; use graph::data_source::CausalityRegion; use graph::schema::{EntityKey, EntityType, InputSchema}; @@ -178,7 +179,7 @@ impl WritableStore for MockStore { } fn make_band_key(id: &'static str) -> EntityKey { - SCHEMA.entity_type("Band").unwrap().key(id) + SCHEMA.entity_type("Band").unwrap().parse_key(id).unwrap() } fn sort_by_entity_key(mut mods: Vec) -> Vec { @@ -417,12 +418,14 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator // 1 account 3 wallets let test_entity_1 = create_account_entity("1", "Johnton", "tonofjohn@email.com", 67_i32); - let wallet_entity_1 = create_wallet_operation("1", "1", 67_i32); - let wallet_entity_2 = create_wallet_operation("2", "1", 92_i32); - let wallet_entity_3 = create_wallet_operation("3", "1", 192_i32); + let id_one = WALLET_TYPE.parse_id("1").unwrap(); + let wallet_entity_1 = create_wallet_operation("1", &id_one, 67_i32); + let wallet_entity_2 = create_wallet_operation("2", &id_one, 92_i32); + let wallet_entity_3 = create_wallet_operation("3", &id_one, 192_i32); // 1 account 1 wallet let test_entity_2 = create_account_entity("2", "Cindini", "dinici@email.com", 42_i32); - let wallet_entity_4 = create_wallet_operation("4", "2", 32_i32); + let id_two = WALLET_TYPE.parse_id("2").unwrap(); + let wallet_entity_4 = create_wallet_operation("4", &id_two, 32_i32); // 1 account 0 wallets let test_entity_3 = create_account_entity("3", "Shaqueeena", "queensha@email.com", 28_i32); transact_entity_operations( @@ -449,18 +452,19 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age }; EntityOperation::Set { - key: ACCOUNT_TYPE.key(id), + key: ACCOUNT_TYPE.parse_key(id).unwrap(), data: test_entity, } } -fn create_wallet_entity(id: &str, account_id: &str, balance: i32) -> Entity { +fn create_wallet_entity(id: &str, account_id: &Id, balance: i32) -> Entity { + let account_id = Value::from(account_id.clone()); entity! { LOAD_RELATED_SUBGRAPH => id: id, account: account_id, balance: balance } } -fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOperation { +fn create_wallet_operation(id: &str, account_id: &Id, balance: i32) -> EntityOperation { let test_wallet = create_wallet_entity(id, account_id, balance); EntityOperation::Set { - key: WALLET_TYPE.key(id), + key: WALLET_TYPE.parse_key(id).unwrap(), data: test_wallet, } } @@ -468,17 +472,17 @@ fn create_wallet_operation(id: &str, account_id: &str, balance: i32) -> EntityOp #[test] fn check_for_account_with_multiple_wallets() { run_store_test(|mut cache, _store, _deployment, _writable| async move { - let account_id = "1"; + let account_id = ACCOUNT_TYPE.parse_id("1").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("1", account_id, 67_i32); - let wallet_2 = create_wallet_entity("2", account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let wallet_1 = create_wallet_entity("1", &account_id, 67_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -488,15 +492,15 @@ fn check_for_account_with_multiple_wallets() { #[test] fn check_for_account_with_single_wallet() { run_store_test(|mut cache, _store, _deployment, _writable| async move { - let account_id = "2"; + let account_id = ACCOUNT_TYPE.parse_id("2").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("4", account_id, 32_i32); + let wallet_1 = create_wallet_entity("4", &account_id, 32_i32); let expeted_vec = vec![wallet_1]; assert_eq!(result, expeted_vec); @@ -506,11 +510,11 @@ fn check_for_account_with_single_wallet() { #[test] fn check_for_account_with_no_wallet() { run_store_test(|mut cache, _store, _deployment, _writable| async move { - let account_id = "3"; + let account_id = ACCOUNT_TYPE.parse_id("3").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); @@ -523,11 +527,11 @@ fn check_for_account_with_no_wallet() { #[test] fn check_for_account_that_doesnt_exist() { run_store_test(|mut cache, _store, _deployment, _writable| async move { - let account_id = "4"; + let account_id = ACCOUNT_TYPE.parse_id("4").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); @@ -540,11 +544,11 @@ fn check_for_account_that_doesnt_exist() { #[test] fn check_for_non_existent_field() { run_store_test(|mut cache, _store, _deployment, _writable| async move { - let account_id = "1"; + let account_id = ACCOUNT_TYPE.parse_id("1").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "friends".into(), - entity_id: account_id.into(), + entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap_err(); @@ -560,10 +564,10 @@ fn check_for_non_existent_field() { #[test] fn check_for_insert_async_store() { run_store_test(|mut cache, store, deployment, _writable| async move { - let account_id = "2"; + let account_id = ACCOUNT_TYPE.parse_id("2").unwrap(); // insert a new wallet - let wallet_entity_5 = create_wallet_operation("5", account_id, 79_i32); - let wallet_entity_6 = create_wallet_operation("6", account_id, 200_i32); + let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32); + let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32); transact_entity_operations( &store, @@ -576,13 +580,13 @@ fn check_for_insert_async_store() { let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("4", account_id, 32_i32); - let wallet_2 = create_wallet_entity("5", account_id, 79_i32); - let wallet_3 = create_wallet_entity("6", account_id, 200_i32); + let wallet_1 = create_wallet_entity("4", &account_id, 32_i32); + let wallet_2 = create_wallet_entity("5", &account_id, 79_i32); + let wallet_3 = create_wallet_entity("6", &account_id, 200_i32); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -591,10 +595,10 @@ fn check_for_insert_async_store() { #[test] fn check_for_insert_async_not_related() { run_store_test(|mut cache, store, deployment, _writable| async move { - let account_id = "2"; + let account_id = ACCOUNT_TYPE.parse_id("2").unwrap(); // insert a new wallet - let wallet_entity_5 = create_wallet_operation("5", account_id, 79_i32); - let wallet_entity_6 = create_wallet_operation("6", account_id, 200_i32); + let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32); + let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32); transact_entity_operations( &store, @@ -604,17 +608,17 @@ fn check_for_insert_async_not_related() { ) .await .unwrap(); - let account_id = "1"; + let account_id = ACCOUNT_TYPE.parse_id("1").unwrap(); let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("1", account_id, 67_i32); - let wallet_2 = create_wallet_entity("2", account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let wallet_1 = create_wallet_entity("1", &account_id, 67_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -624,9 +628,9 @@ fn check_for_insert_async_not_related() { #[test] fn check_for_update_async_related() { run_store_test(|mut cache, store, deployment, writable| async move { - let account_id = "1"; - let entity_key = WALLET_TYPE.key("1"); - let wallet_entity_update = create_wallet_operation("1", account_id, 79_i32); + let entity_key = WALLET_TYPE.parse_key("1").unwrap(); + let account_id = entity_key.entity_id.clone(); + let wallet_entity_update = create_wallet_operation("1", &account_id, 79_i32); let new_data = match wallet_entity_update { EntityOperation::Set { ref data, .. } => data.clone(), @@ -646,12 +650,12 @@ fn check_for_update_async_related() { let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_2 = create_wallet_entity("2", account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); let expeted_vec = vec![new_data, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -661,8 +665,8 @@ fn check_for_update_async_related() { #[test] fn check_for_delete_async_related() { run_store_test(|mut cache, store, deployment, _writable| async move { - let account_id = "1"; - let del_key = WALLET_TYPE.key("1"); + let account_id = ACCOUNT_TYPE.parse_id("1").unwrap(); + let del_key = WALLET_TYPE.parse_key("1").unwrap(); // delete wallet transact_entity_operations( &store, @@ -676,12 +680,12 @@ fn check_for_delete_async_related() { let request = LoadRelatedRequest { entity_type: ACCOUNT_TYPE.clone(), entity_field: "wallets".into(), - entity_id: account_id.into(), + entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_2 = create_wallet_entity("2", account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", account_id, 192_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); let expeted_vec = vec![wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -692,12 +696,14 @@ fn check_for_delete_async_related() { fn scoped_get() { run_store_test(|mut cache, _store, _deployment, _writable| async move { // Key for an existing entity that is in the store - let key1 = WALLET_TYPE.key("1"); - let wallet1 = create_wallet_entity("1", "1", 67); + let account1 = ACCOUNT_TYPE.parse_id("1").unwrap(); + let key1 = WALLET_TYPE.parse_key("1").unwrap(); + let wallet1 = create_wallet_entity("1", &account1, 67); // Create a new entity that is not in the store - let wallet5 = create_wallet_entity("5", "5", 100); - let key5 = WALLET_TYPE.key("5"); + let account5 = ACCOUNT_TYPE.parse_id("5").unwrap(); + let wallet5 = create_wallet_entity("5", &account5, 100); + let key5 = WALLET_TYPE.parse_key("5").unwrap(); cache.set(key5.clone(), wallet5.clone()).unwrap(); // For the new entity, we can retrieve it with either scope @@ -740,7 +746,7 @@ fn no_internal_keys() { // the schema entity.validate(schema, key).expect("the entity is valid"); } - let key = WALLET_TYPE.key("1"); + let key = WALLET_TYPE.parse_key("1").unwrap(); let schema = cache.schema.cheap_clone(); diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 5a352263866..63ce1dea779 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -1,6 +1,6 @@ use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; -use graph::prelude::SubscriptionResult; +use graph::prelude::{SubscriptionResult, Value}; use graph::schema::InputSchema; use graphql_parser::Pos; use std::iter::FromIterator; @@ -36,24 +36,75 @@ use test_store::{ GENESIS_PTR, LOAD_MANAGER, LOGGER, METRICS_REGISTRY, STORE, SUBSCRIPTION_MANAGER, }; +/// Ids for the various entities that we create in `insert_entities` and +/// access through `IdType` to check results in the tests const NETWORK_NAME: &str = "fake_network"; const SONGS_STRING: [&str; 5] = ["s0", "s1", "s2", "s3", "s4"]; const SONGS_BYTES: [&str; 5] = ["0xf0", "0xf1", "0xf2", "0xf3", "0xf4"]; const MEDIA_STRING: [&str; 7] = ["md0", "md1", "md2", "md3", "md4", "md5", "md6"]; const MEDIA_BYTES: [&str; 7] = ["0xf0", "0xf1", "0xf2", "0xf3", "0xf4", "0xf5", "0xf6"]; +lazy_static! { + /// The id of the sole publisher in the test data + static ref PUB1: IdVal = IdType::Bytes.parse("0xb1"); +} + +/// A convenience wrapper for `Value` and `r::Value` that clones a lot, +/// which is fine in tests, in order to keep test notation concise +#[derive(Debug)] +struct IdVal(Value); + +impl From<&IdVal> for Value { + fn from(id: &IdVal) -> Self { + id.0.clone() + } +} + +impl graph::data::graphql::IntoValue for &IdVal { + fn into_value(self) -> r::Value { + self.0.clone().into() + } +} + +impl std::fmt::Display for IdVal { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + #[derive(Clone, Copy, Debug)] enum IdType { String, - #[allow(dead_code)] Bytes, } impl IdType { - fn songs(&self) -> &[&str] { + fn parse(&self, s: &str) -> IdVal { + let value = match self { + IdType::String => Value::String(s.to_string()), + IdType::Bytes => Value::Bytes(s.parse().unwrap()), + }; + IdVal(value) + } + + fn songs(&self) -> &[&IdVal] { + lazy_static! { + static ref SONGS_STRING_VAL: Vec = SONGS_STRING + .iter() + .map(|s| IdType::String.parse(s)) + .collect::>(); + static ref SONGS_BYTES_VAL: Vec = SONGS_BYTES + .iter() + .map(|s| IdType::Bytes.parse(s)) + .collect::>(); + static ref SONGS_STRING_REF: Vec<&'static IdVal> = + SONGS_STRING_VAL.iter().collect::>(); + static ref SONGS_BYTES_REF: Vec<&'static IdVal> = + SONGS_BYTES_VAL.iter().collect::>(); + } match self { - IdType::String => SONGS_STRING.as_slice(), - IdType::Bytes => SONGS_BYTES.as_slice(), + IdType::String => SONGS_STRING_REF.as_slice(), + IdType::Bytes => SONGS_BYTES_REF.as_slice(), } } @@ -341,6 +392,7 @@ async fn insert_test_entities( let s = id_type.songs(); let md = id_type.medias(); let is = &manifest.schema; + let pub1 = &*PUB1; let entities0 = vec![ ( "Musician", @@ -349,7 +401,7 @@ async fn insert_test_entities( entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100 }, ], ), - ("Publisher", vec![entity! { is => id: "0xb1" }]), + ("Publisher", vec![entity! { is => id: pub1 }]), ( "Band", vec![ @@ -360,10 +412,10 @@ async fn insert_test_entities( ( "Song", vec![ - entity! { is => id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, - entity! { is => id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, - entity! { is => id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, - entity! { is => id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, + entity! { is => id: s[1], sid: "s1", title: "Cheesy Tune", publisher: pub1, writtenBy: "m1", media: vec![md[1], md[2]] }, + entity! { is => id: s[2], sid: "s2", title: "Rock Tune", publisher: pub1, writtenBy: "m2", media: vec![md[3], md[4]] }, + entity! { is => id: s[3], sid: "s3", title: "Pop Tune", publisher: pub1, writtenBy: "m1", media: vec![md[5]] }, + entity! { is => id: s[4], sid: "s4", title: "Folk Tune", publisher: pub1, writtenBy: "m3", media: vec![md[6]] }, ], ), ( @@ -1522,8 +1574,8 @@ fn mixed_parent_child_id() { run_query(QUERY, |result, _| { let exp = object! { songs: vec![ - object! { publisher: object! { id: "0xb1" } }, - object! { publisher: object! { id: "0xb1" } } + object! { publisher: object! { id: &*PUB1 } }, + object! { publisher: object! { id: &*PUB1 } } ] }; let data = extract_data!(result).unwrap(); diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index a8550994060..23c1edf138f 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -1,5 +1,4 @@ use graph::blockchain::block_stream::FirehoseCursor; -use graph::data::value::Word; use graph::schema::{EntityType, InputSchema}; use graph_store_postgres::command_support::OnSync; use lazy_static::lazy_static; @@ -10,7 +9,7 @@ use graph::components::store::{ DeploymentLocator, EntityOrder, EntityQuery, PruneReporter, PruneRequest, PruningStrategy, VersionStats, }; -use graph::data::store::scalar; +use graph::data::store::{scalar, Id}; use graph::data::subgraph::schema::*; use graph::data::subgraph::*; use graph::semver::Version; @@ -258,7 +257,7 @@ fn create_test_entity( let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(entity_type).unwrap(); EntityOperation::Set { - key: entity_type.key(id), + key: entity_type.parse_key(id).unwrap(), data: test_entity, } } @@ -283,7 +282,7 @@ async fn create_grafted_subgraph( fn find_entities( store: &DieselSubgraphStore, deployment: &DeploymentLocator, -) -> (Vec, Vec) { +) -> (Vec, Vec) { let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(USER).unwrap(); let query = EntityQuery::new( deployment.hash.clone(), @@ -312,7 +311,8 @@ async fn check_graft( ) -> Result<(), StoreError> { let (entities, ids) = find_entities(store.as_ref(), &deployment); - assert_eq!(vec!["3", "1", "2"], ids); + let ids_str = ids.iter().map(|id| id.to_string()).collect::>(); + assert_eq!(vec!["3", "1", "2"], ids_str); // Make sure we caught Shaqueeena at block 1, before the change in // email address @@ -322,7 +322,7 @@ async fn check_graft( // Make our own entries for block 2 shaq.set("email", "shaq@gmail.com").unwrap(); let op = EntityOperation::Set { - key: USER_TYPE.key("3"), + key: USER_TYPE.parse_key("3").unwrap(), data: shaq, }; transact_and_wait(&store, &deployment, BLOCKS[2].clone(), vec![op]) @@ -401,7 +401,8 @@ fn graft() { .expect("grafting onto block 0 works"); let (entities, ids) = find_entities(store.as_ref(), &deployment); - assert_eq!(vec!["1"], ids); + let ids_str = ids.iter().map(|id| id.to_string()).collect::>(); + assert_eq!(vec!["1"], ids_str); let shaq = entities.first().unwrap().clone(); assert_eq!(Some(&Value::from("tonofjohn@email.com")), shaq.get("email")); Ok(()) @@ -554,15 +555,17 @@ fn prune() { block: BlockNumber, exp: Vec<&str>, ) { + let user_type = TEST_SUBGRAPH_SCHEMA.entity_type("User").unwrap(); let query = EntityQuery::new( src.hash.clone(), block, - EntityCollection::All(vec![( - TEST_SUBGRAPH_SCHEMA.entity_type("User").unwrap(), - AttributeNames::All, - )]), + EntityCollection::All(vec![(user_type.clone(), AttributeNames::All)]), ); + let exp = exp + .into_iter() + .map(|id| user_type.parse_id(id).unwrap()) + .collect::>(); let act: Vec<_> = store .find(query) .unwrap() diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 92f16f7a2f5..9f07e834652 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -1,8 +1,7 @@ //! Test mapping of GraphQL schema to a relational schema use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; -use graph::data::store::scalar; -use graph::data::value::Word; +use graph::data::store::{scalar, Id}; use graph::entity; use graph::prelude::{ o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, @@ -539,14 +538,22 @@ fn find() { // Happy path: find existing entity let entity = layout - .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) + .find( + conn, + &SCALAR_TYPE.parse_key("one").unwrap(), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&SCALAR_ENTITY), entity); // Find non-existing entity let entity = layout - .find(conn, &SCALAR_TYPE.key("noone"), BLOCK_NUMBER_MAX) + .find( + conn, + &SCALAR_TYPE.parse_key("noone").unwrap(), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[noone]"); assert!(entity.is_none()); }); @@ -564,7 +571,11 @@ fn insert_null_fulltext_fields() { // Find entity with null string values let entity = layout - .find(conn, &NULLABLE_STRINGS_TYPE.key("one"), BLOCK_NUMBER_MAX) + .find( + conn, + &NULLABLE_STRINGS_TYPE.parse_key("one").unwrap(), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read NullableStrings[one]") .unwrap(); assert_entity_eq!(scrub(&EMPTY_NULLABLESTRINGS_ENTITY), entity); @@ -591,7 +602,11 @@ fn update() { .expect("Failed to update"); let actual = layout - .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) + .find( + conn, + &SCALAR_TYPE.parse_key("one").unwrap(), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&entity), actual); @@ -631,7 +646,7 @@ fn update_many() { let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let keys: Vec = ["one", "two", "three"] .iter() - .map(|id| SCALAR_TYPE.key(*id)) + .map(|id| SCALAR_TYPE.parse_key(*id).unwrap()) .collect(); let entities_vec = vec![one, two, three]; @@ -646,7 +661,7 @@ fn update_many() { .iter() .map(|&id| { layout - .find(conn, &SCALAR_TYPE.key(id), BLOCK_NUMBER_MAX) + .find(conn, &SCALAR_TYPE.parse_key(id).unwrap(), BLOCK_NUMBER_MAX) .unwrap_or_else(|_| panic!("Failed to read Scalar[{}]", id)) .unwrap() }) @@ -707,7 +722,11 @@ fn serialize_bigdecimal() { .expect("Failed to update"); let actual = layout - .find(conn, &SCALAR_TYPE.key("one"), BLOCK_NUMBER_MAX) + .find( + conn, + &SCALAR_TYPE.parse_key("one").unwrap(), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(entity, actual); @@ -740,7 +759,7 @@ fn delete() { insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]); // Delete where nothing is getting deleted - let key = SCALAR_TYPE.key("no such entity"); + let key = SCALAR_TYPE.parse_key("no such entity").unwrap(); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let mut entity_keys = vec![key]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); @@ -753,7 +772,7 @@ fn delete() { // Delete entity two entity_keys .get_mut(0) - .map(|key| key.entity_id = Word::from("two")) + .map(|key| key.entity_id = SCALAR_TYPE.parse_id("two").unwrap()) .expect("Failed to update key"); let group = row_group_delete(&entity_type, 1, entity_keys); @@ -781,7 +800,7 @@ fn insert_many_and_delete_many() { // Delete entities with ids equal to "two" and "three" let entity_keys: Vec<_> = vec!["two", "three"] .into_iter() - .map(|key| SCALAR_TYPE.key(key)) + .map(|key| SCALAR_TYPE.parse_key(key).unwrap()) .collect(); let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); let num_removed = layout @@ -849,7 +868,8 @@ fn conflicting_entity() { fn check(conn: &PgConnection, layout: &Layout, id: Value, cat: &str, dog: &str, ferret: &str) { let conflicting = |types: Vec<&EntityType>| { let types = types.into_iter().cloned().collect(); - layout.conflicting_entity(conn, &id.to_string(), types) + let id = Id::try_from(id.clone()).unwrap(); + layout.conflicting_entity(conn, &id, types) }; let cat_type = layout.input_schema.entity_type(cat).unwrap(); @@ -896,7 +916,7 @@ fn revert_block() { let assert_fred = |name: &str| { let fred = layout - .find(conn, &CAT_TYPE.key(id), BLOCK_NUMBER_MAX) + .find(conn, &CAT_TYPE.parse_key(id).unwrap(), BLOCK_NUMBER_MAX) .unwrap() .expect("there's a fred"); assert_eq!(name, fred.get("name").unwrap().as_str().unwrap()) diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 5d25ed80def..a0573ff284e 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -3,7 +3,6 @@ use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; use graph::components::store::write::RowGroup; use graph::data::store::scalar; -use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::entity; use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry, StoreError}; @@ -15,6 +14,7 @@ use std::str::FromStr; use std::{collections::BTreeMap, sync::Arc}; use graph::data::store::scalar::{BigDecimal, BigInt}; +use graph::data::store::IdList; use graph::prelude::{ o, slog, web3::types::H256, AttributeNames, ChildMultiplicity, DeploymentHash, Entity, EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, StopwatchMetrics, @@ -216,7 +216,7 @@ fn bad_id() { layout: &Layout, id: &str, ) -> Result, StoreError> { - let key = THING_TYPE.key(id); + let key = THING_TYPE.parse_key(id)?; layout.find(conn, &key, BLOCK_NUMBER_MAX) } @@ -256,7 +256,7 @@ fn bad_id() { fn find() { run_test(|conn, layout| { fn find_entity(conn: &PgConnection, layout: &Layout, id: &str) -> Option { - let key = THING_TYPE.key(id); + let key = THING_TYPE.parse_key(id).unwrap(); layout .find(conn, &key, BLOCK_NUMBER_MAX) .expect(&format!("Failed to read Thing[{}]", id)) @@ -288,18 +288,22 @@ fn find_many() { insert_thing(conn, layout, ID2, NAME2); let mut id_map = BTreeMap::default(); - id_map.insert( - (THING_TYPE.clone(), CausalityRegion::ONCHAIN), - vec![ID.to_string(), ID2.to_string(), "badd".to_string()], - ); + let ids = IdList::try_from_iter( + &*THING_TYPE, + vec![ID, ID2, "badd"] + .into_iter() + .map(|id| THING_TYPE.parse_id(id).unwrap()), + ) + .unwrap(); + id_map.insert((THING_TYPE.clone(), CausalityRegion::ONCHAIN), ids); let entities = layout .find_many(conn, &id_map, BLOCK_NUMBER_MAX) .expect("Failed to read many things"); assert_eq!(2, entities.len()); - let id_key = THING_TYPE.key(ID); - let id2_key = THING_TYPE.key(ID2); + let id_key = THING_TYPE.parse_key(ID).unwrap(); + let id2_key = THING_TYPE.parse_key(ID2).unwrap(); assert!(entities.contains_key(&id_key), "Missing ID"); assert!(entities.contains_key(&id2_key), "Missing ID2"); }); @@ -343,7 +347,7 @@ fn delete() { insert_entity(conn, layout, "Thing", two); // Delete where nothing is getting deleted - let key = THING_TYPE.key("ffff"); + let key = THING_TYPE.parse_key("ffff").unwrap(); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.clone()]; let group = row_group_delete(&entity_type, 1, entity_keys.clone()); @@ -355,7 +359,7 @@ fn delete() { // Delete entity two entity_keys .get_mut(0) - .map(|key| key.entity_id = Word::from(TWO_ID)) + .map(|key| key.entity_id = entity_type.parse_id(TWO_ID).unwrap()) .expect("Failed to update entity types"); let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout @@ -422,7 +426,7 @@ fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Ent #[test] fn query() { - fn fetch(conn: &PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { + fn fetch(conn: &PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, coll).first(10); layout @@ -430,7 +434,7 @@ fn query() { .map(|(entities, _)| entities) .expect("the query succeeds") .into_iter() - .map(|e| e.id()) + .map(|e| e.id().to_string()) .collect::>() } @@ -455,7 +459,7 @@ fn query() { // things(where: { children_contains: [CHILD1] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![CHILD1.to_owned()], + ids: THING_TYPE.parse_ids(vec![CHILD1]).unwrap(), link: EntityLink::Direct( WindowAttribute::List("children".to_string()), ChildMultiplicity::Many, @@ -469,7 +473,9 @@ fn query() { // things(where: { children_contains: [GRANDCHILD1, GRANDCHILD2] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![GRANDCHILD1.to_owned(), GRANDCHILD2.to_owned()], + ids: THING_TYPE + .parse_ids(vec![GRANDCHILD1, GRANDCHILD2]) + .unwrap(), link: EntityLink::Direct( WindowAttribute::List("children".to_string()), ChildMultiplicity::Single, @@ -483,7 +489,7 @@ fn query() { // things(where: { parent: [ROOT] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![ROOT.to_owned()], + ids: THING_TYPE.parse_ids(vec![ROOT]).unwrap(), link: EntityLink::Direct( WindowAttribute::Scalar("parent".to_string()), ChildMultiplicity::Many, @@ -497,7 +503,7 @@ fn query() { // things(where: { parent: [CHILD1, CHILD2] }) { id } let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], + ids: THING_TYPE.parse_ids(vec![CHILD1, CHILD2]).unwrap(), link: EntityLink::Direct( WindowAttribute::Scalar("parent".to_string()), ChildMultiplicity::Single, @@ -512,10 +518,10 @@ fn query() { // This is the inner 'children' query let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![ROOT.to_owned()], + ids: THING_TYPE.parse_ids(vec![ROOT]).unwrap(), link: EntityLink::Parent( THING_TYPE.clone(), - ParentLink::List(vec![vec![CHILD1.to_owned(), CHILD2.to_owned()]]), + ParentLink::List(vec![THING_TYPE.parse_ids(vec![CHILD1, CHILD2]).unwrap()]), ), column_names: AttributeNames::All, }]); @@ -527,10 +533,10 @@ fn query() { // This is the inner 'parent' query let coll = EntityCollection::Window(vec![EntityWindow { child_type: THING_TYPE.clone(), - ids: vec![CHILD1.to_owned(), CHILD2.to_owned()], + ids: THING_TYPE.parse_ids(vec![CHILD1, CHILD2]).unwrap(), link: EntityLink::Parent( THING_TYPE.clone(), - ParentLink::Scalar(vec![ROOT.to_owned(), ROOT.to_owned()]), + ParentLink::Scalar(THING_TYPE.parse_ids(vec![ROOT, ROOT]).unwrap()), ), column_names: AttributeNames::All, }]); diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 113a7eeb90c..2e3bb5f9430 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -282,7 +282,7 @@ fn create_test_entity( }; EntityOperation::Set { - key: entity_type.key(id), + key: entity_type.parse_key(id).unwrap(), data: test_entity, } } @@ -305,7 +305,7 @@ fn get_entity_count(store: Arc, subgraph_id: &DeploymentHash) -> u6 #[test] fn delete_entity() { run_test(|store, writable, deployment| async move { - let entity_key = USER_TYPE.key("3"); + let entity_key = USER_TYPE.parse_key("3").unwrap(); // Check that there is an entity to remove. writable.get(&entity_key).unwrap().unwrap(); @@ -334,7 +334,7 @@ fn get_entity_1() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = USER_TYPE.key("1"); + let key = USER_TYPE.parse_key("1").unwrap(); let result = writable.get(&key).unwrap(); let bin_name = Value::Bytes("Johnton".as_bytes().into()); @@ -360,7 +360,7 @@ fn get_entity_1() { fn get_entity_3() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); - let key = USER_TYPE.key("3"); + let key = USER_TYPE.parse_key("3").unwrap(); let result = writable.get(&key).unwrap(); let expected_entity = entity! { schema => @@ -383,7 +383,7 @@ fn get_entity_3() { #[test] fn insert_entity() { run_test(|store, writable, deployment| async move { - let entity_key = USER_TYPE.key("7"); + let entity_key = USER_TYPE.parse_key("7").unwrap(); let test_entity = create_test_entity( "7", &*USER_TYPE, @@ -413,7 +413,7 @@ fn insert_entity() { #[test] fn update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = USER_TYPE.key("1"); + let entity_key = USER_TYPE.parse_key("1").unwrap(); let op = create_test_entity( "1", @@ -459,7 +459,7 @@ fn update_existing() { #[test] fn partially_update_existing() { run_test(|store, writable, deployment| async move { - let entity_key = USER_TYPE.key("1"); + let entity_key = USER_TYPE.parse_key("1").unwrap(); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -1024,7 +1024,7 @@ fn revert_block_with_delete() { .desc("name"); // Delete entity with id=2 - let del_key = USER_TYPE.key("2"); + let del_key = USER_TYPE.parse_key("2").unwrap(); // Process deletion transact_and_wait( @@ -1069,7 +1069,7 @@ fn revert_block_with_delete() { #[test] fn revert_block_with_partial_update() { run_test(|store, writable, deployment| async move { - let entity_key = USER_TYPE.key("1"); + let entity_key = USER_TYPE.parse_key("1").unwrap(); let schema = writable.input_schema(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; @@ -1165,7 +1165,7 @@ fn revert_block_with_dynamic_data_source_operations() { let schema = writable.input_schema(); // Create operations to add a user - let user_key = USER_TYPE.key("1"); + let user_key = USER_TYPE.parse_key("1").unwrap(); let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; // Get the original user for comparisons @@ -1295,7 +1295,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { added_entities .iter() .map(|(id, data)| EntityOperation::Set { - key: USER_TYPE.key(id.as_str()), + key: USER_TYPE.parse_key(id.as_str()).unwrap(), data: data.clone(), }) .collect(), @@ -1306,13 +1306,13 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { // Update an entity in the store let updated_entity = entity! { schema => id: "1", name: "Johnny" }; let update_op = EntityOperation::Set { - key: USER_TYPE.key("1"), + key: USER_TYPE.parse_key("1").unwrap(), data: updated_entity.clone(), }; // Delete an entity in the store let delete_op = EntityOperation::Remove { - key: USER_TYPE.key("2"), + key: USER_TYPE.parse_key("2").unwrap(), }; // Commit update & delete ops @@ -1501,7 +1501,7 @@ fn handle_large_string_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, name: name }; - let key = USER_TYPE.key(id); + let key = USER_TYPE.parse_key(id).unwrap(); EntityModification::insert(key, data, block) } @@ -1558,8 +1558,8 @@ fn handle_large_string_with_index() { .iter() .map(|e| e.id()) .collect(); - - assert_eq!(vec![ONE], ids); + let exp = USER_TYPE.parse_ids(vec![ONE]).unwrap().as_ids(); + assert_eq!(exp, ids); // Make sure we check the full string and not just a prefix let mut prefix = long_text.clone(); @@ -1578,7 +1578,8 @@ fn handle_large_string_with_index() { .collect(); // Users with name 'Cindini' and 'Johnton' - assert_eq!(vec!["2", "1"], ids); + let exp = USER_TYPE.parse_ids(vec!["2", "1"]).unwrap().as_ids(); + assert_eq!(exp, ids); }) } @@ -1596,7 +1597,7 @@ fn handle_large_bytea_with_index() { ) -> EntityModification { let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; - let key = USER_TYPE.key(id); + let key = USER_TYPE.parse_key(id).unwrap(); EntityModification::insert(key, data, block) } @@ -1659,7 +1660,8 @@ fn handle_large_bytea_with_index() { .map(|e| e.id()) .collect(); - assert_eq!(vec![ONE], ids); + let exp = USER_TYPE.parse_ids(vec![ONE]).unwrap().as_ids(); + assert_eq!(exp, ids); // Make sure we check the full string and not just a prefix let prefix = scalar::Bytes::from(&long_bytea.as_slice()[..64]); @@ -1677,7 +1679,8 @@ fn handle_large_bytea_with_index() { .collect(); // Users with name 'Cindini' and 'Johnton' - assert_eq!(vec!["2", "1"], ids); + let exp = USER_TYPE.parse_ids(vec!["2", "1"]).unwrap().as_ids(); + assert_eq!(exp, ids); }) } @@ -1707,10 +1710,9 @@ impl WindowQuery { .map(|(child_type, column_names)| { let attribute = WindowAttribute::Scalar("favorite_color".to_owned()); let link = EntityLink::Direct(attribute, ChildMultiplicity::Many); - let ids = vec!["red", "green", "yellow", "blue"] - .into_iter() - .map(String::from) - .collect(); + let ids = child_type + .parse_ids(vec!["red", "green", "yellow", "blue"]) + .unwrap(); EntityWindow { child_type, ids, @@ -1801,7 +1803,7 @@ fn window() { let entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color }; EntityOperation::Set { - key: entity_type.key(id), + key: entity_type.parse_key(id).unwrap(), data: entity, } } diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 14c8ec327e4..4d4abfc211c 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -107,7 +107,7 @@ fn block_pointer(number: u8) -> BlockPtr { } fn count_key(id: &str) -> EntityKey { - COUNTER_TYPE.key(id) + COUNTER_TYPE.parse_key(id).unwrap() } async fn insert_count(store: &Arc, deployment: &DeploymentLocator, count: u8) { @@ -198,7 +198,6 @@ fn count_get_derived(writable: &dyn WritableStore) -> i32 { entity_type: key.entity_type.clone(), entity_field: Word::from("id"), value: key.entity_id.clone(), - id_is_bytes: false, causality_region: CausalityRegion::ONCHAIN, }; let map = writable.get_derived(&query).unwrap(); From cf0601fe541f2f5ab165714179ebd88338d14731 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 26 Sep 2023 11:42:51 -0700 Subject: [PATCH 0453/2104] graph, store: Ignore unknown entity types in offchain mappings --- graph/src/data_source/offchain.rs | 16 ++++++++++++---- store/postgres/src/deployment.rs | 5 ++++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index da7a5d91975..7246dc92fed 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -16,7 +16,7 @@ use anyhow::{anyhow, Context, Error}; use itertools::Itertools; use lazy_static::lazy_static; use serde::Deserialize; -use slog::{info, Logger}; +use slog::{info, warn, Logger}; use std::{ collections::HashMap, fmt, @@ -414,11 +414,19 @@ impl UnresolvedMapping { logger: &Logger, ) -> Result { info!(logger, "Resolve offchain mapping"; "link" => &self.file.link); - let entities = self + // It is possible for a manifest to mention entity types that do not + // exist in the schema. Rather than fail the subgraph, which could + // fail existing subgraphs, filter them out and just log a warning. + let (entities, errs) = self .entities .iter() - .map(|s| schema.entity_type(s)) - .collect::>()?; + .map(|s| schema.entity_type(s).map_err(|_| s)) + .partition::, _>(Result::is_ok); + if !errs.is_empty() { + let errs = errs.into_iter().map(Result::unwrap_err).join(", "); + warn!(logger, "Ignoring unknown entity types in mapping"; "entities" => errs, "link" => &self.file.link); + } + let entities = entities.into_iter().map(Result::unwrap).collect::>(); Ok(Mapping { language: self.language, api_version: semver::Version::parse(&self.api_version)?, diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 2e1f0c7db0e..4c71620f3d9 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -919,8 +919,11 @@ pub(crate) fn entities_with_causality_region( .get_result::>(conn) .map_err(|e| e.into()) .map(|ents| { + // It is possible to have entity types in + // `entities_with_causality_region` that are not mentioned in + // the schema. ents.into_iter() - .map(|ent| schema.entity_type(&ent).unwrap()) + .filter_map(|ent| schema.entity_type(&ent).ok()) .collect() }) } From cc67e46659a330721c95f3603f0b5851ac99fdea Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 26 Sep 2023 15:09:14 -0700 Subject: [PATCH 0454/2104] store: Simplify some logic in deserialize_with_layout --- store/postgres/src/relational_queries.rs | 35 ++++++++++++------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 49cae1e2541..67d1cfd3ab1 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -475,25 +475,24 @@ impl EntityData { let parent_id = map .remove(PARENT_ID) .and_then(|json| { - if T::WITH_INTERNAL_KEYS { - match &parent_type { - None => { - // A query that does not have parents - // somehow returned parent ids. We have no - // idea how to deserialize that - Some(Err(graph::constraint_violation!( - "query unexpectedly produces parent ids" - ))) - } - Some(parent_type) => Some( - parent_type - .id_type() - .map_err(StoreError::from) - .and_then(|id_type| id_type.parse_id(json)), - ), + if !T::WITH_INTERNAL_KEYS { + return None; + } + match &parent_type { + None => { + // A query that does not have parents + // somehow returned parent ids. We have no + // idea how to deserialize that + Some(Err(graph::constraint_violation!( + "query unexpectedly produces parent ids" + ))) } - } else { - None + Some(parent_type) => Some( + parent_type + .id_type() + .map_err(StoreError::from) + .and_then(|id_type| id_type.parse_id(json)), + ), } }) .transpose()?; From 022dc366eccdfbfad2a14d27269a08b50d6d26fc Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 26 Sep 2023 15:11:29 -0700 Subject: [PATCH 0455/2104] graph: Use ID constant instead of "id" in a few places --- graph/src/data/store/id.rs | 4 ++-- graph/src/data/store/mod.rs | 4 ++-- graph/src/schema/mod.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/graph/src/data/store/id.rs b/graph/src/data/store/id.rs index 9881410b571..13a9539a715 100644 --- a/graph/src/data/store/id.rs +++ b/graph/src/data/store/id.rs @@ -19,7 +19,7 @@ use crate::{ schema::EntityType, }; -use super::{scalar, Value}; +use super::{scalar, Value, ID}; /// The types that can be used for the `id` of an entity #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -49,7 +49,7 @@ impl<'a> TryFrom<&s::ObjectType> for IdType { type Error = Error; fn try_from(obj_type: &s::ObjectType) -> Result { - let base_type = obj_type.field("id").unwrap().field_type.get_base_type(); + let base_type = obj_type.field(&*ID).unwrap().field_type.get_base_type(); match base_type { "ID" | "String" => Ok(IdType::String), diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 55e4cea632e..395898b37dd 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -867,7 +867,7 @@ impl Entity { s::Type::NamedType(name) => ValueType::from_str(name).unwrap_or_else(|_| { match schema.get_named_type(name) { Some(t::Object(obj_type)) => { - let id = obj_type.field("id").expect("all object types have an id"); + let id = obj_type.field(&*ID).expect("all object types have an id"); scalar_value_type(schema, &id.field_type) } Some(t::Interface(intf)) => { @@ -887,7 +887,7 @@ impl Entity { } Some(obj_type) => { let id = - obj_type.field("id").expect("all object types have an id"); + obj_type.field(&*ID).expect("all object types have an id"); scalar_value_type(schema, &id.field_type) } } diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index f5106c6d783..d53acb44a04 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -1,6 +1,6 @@ use crate::data::graphql::ext::{DirectiveExt, DirectiveFinder, DocumentExt, TypeExt, ValueExt}; use crate::data::graphql::ObjectTypeExt; -use crate::data::store::ValueType; +use crate::data::store::{ValueType, ID}; use crate::data::subgraph::DeploymentHash; use crate::prelude::{ anyhow, @@ -786,7 +786,7 @@ impl Schema { let id_types: HashSet<&str> = HashSet::from_iter( obj_types .iter() - .filter_map(|obj_type| obj_type.field("id")) + .filter_map(|obj_type| obj_type.field(&*ID)) .map(|f| f.field_type.get_base_type()) .map(|name| if name == "ID" { "String" } else { name }), ); From 53dbe9356ff4515cee08f9a0b92c70161dee11fa Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Wed, 27 Sep 2023 15:48:03 +0100 Subject: [PATCH 0456/2104] Filipe/substreams triggers (#4887) * substreams triggers * unify firehose and substreams mapper * wire up substreams near * cleanup * fix test * add trigger-filter and tests --- Cargo.lock | 132 +- Cargo.toml | 8 +- chain/arweave/build.rs | 2 +- .../proto/{type.proto => arweave.proto} | 0 chain/arweave/src/chain.rs | 59 +- .../common/proto/near-filter-substreams.proto | 15 + chain/cosmos/build.rs | 4 +- .../cosmos/proto/{type.proto => cosmos.proto} | 0 chain/cosmos/src/chain.rs | 62 +- chain/ethereum/build.rs | 2 +- .../proto/{codec.proto => ethereum.proto} | 0 chain/ethereum/src/chain.rs | 70 +- chain/near/Cargo.toml | 2 + chain/near/build.rs | 6 +- chain/near/proto/{codec.proto => near.proto} | 0 chain/near/proto/substreams-triggers.proto | 12 + chain/near/src/adapter.rs | 148 ++- chain/near/src/chain.rs | 164 ++- chain/near/src/codec.rs | 4 + chain/near/src/protobuf/receipts.v1.rs | 10 + chain/substreams/examples/substreams.rs | 5 +- chain/substreams/src/block_ingestor.rs | 20 +- chain/substreams/src/block_stream.rs | 1 + chain/substreams/src/chain.rs | 3 +- chain/substreams/src/data_source.rs | 55 +- chain/substreams/src/mapper.rs | 163 +-- graph/src/blockchain/block_stream.rs | 86 +- graph/src/blockchain/builder.rs | 6 +- graph/src/blockchain/firehose_block_stream.rs | 16 +- graph/src/env/mod.rs | 8 + graph/src/substreams/mod.rs | 17 + node/src/main.rs | 9 +- .../substreams-head-tracker}/Cargo.lock | 0 .../substreams-head-tracker}/Cargo.toml | 0 .../substreams-head-tracker}/Makefile | 0 .../rust-toolchain.toml | 0 .../substreams-head-tracker}/src/lib.rs | 0 .../substreams-head-tracker-v1.0.0.spkg | Bin .../substreams-head-tracker}/substreams.yaml | 0 .../substreams-trigger-filter/Cargo.lock | 498 +++++++ .../substreams-trigger-filter/Cargo.toml | 22 + substreams/substreams-trigger-filter/Makefile | 35 + substreams/substreams-trigger-filter/build.rs | 18 + .../substreams-trigger-filter/bun.lockb | Bin 0 -> 197771 bytes .../substreams-trigger-filter/package.json | 1 + .../proto/near.proto | 521 ++++++++ .../proto/receipts.proto | 15 + .../rust-toolchain.toml | 2 + .../substreams-trigger-filter/schema.graphql | 4 + .../substreams-trigger-filter/src/lib.rs | 157 +++ .../substreams-trigger-filter/src/pb/mod.rs | 8 + .../src/pb/receipts.v1.rs | 16 + .../src/pb/sf.near.type.v1.rs | 1181 +++++++++++++++++ .../substreams-trigger-filter/subgraph.yaml | 16 + .../substreams-trigger-filter-v0.1.0.spkg | Bin 0 -> 497306 bytes .../substreams-trigger-filter/substreams.yaml | 36 + substreams/trigger-filters/Cargo.toml | 7 + substreams/trigger-filters/src/lib.rs | 80 ++ tests/integration-tests/package.json | 1 - tests/integration-tests/yarn.lock | 910 +------------ tests/tests/integration_tests.rs | 2 +- 61 files changed, 3496 insertions(+), 1123 deletions(-) rename chain/arweave/proto/{type.proto => arweave.proto} (100%) create mode 100644 chain/common/proto/near-filter-substreams.proto rename chain/cosmos/proto/{type.proto => cosmos.proto} (100%) rename chain/ethereum/proto/{codec.proto => ethereum.proto} (100%) rename chain/near/proto/{codec.proto => near.proto} (100%) create mode 100644 chain/near/proto/substreams-triggers.proto create mode 100644 chain/near/src/protobuf/receipts.v1.rs rename {substreams-head-tracker => substreams/substreams-head-tracker}/Cargo.lock (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/Cargo.toml (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/Makefile (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/rust-toolchain.toml (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/src/lib.rs (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/substreams-head-tracker-v1.0.0.spkg (100%) rename {substreams-head-tracker => substreams/substreams-head-tracker}/substreams.yaml (100%) create mode 100755 substreams/substreams-trigger-filter/Cargo.lock create mode 100755 substreams/substreams-trigger-filter/Cargo.toml create mode 100755 substreams/substreams-trigger-filter/Makefile create mode 100644 substreams/substreams-trigger-filter/build.rs create mode 100755 substreams/substreams-trigger-filter/bun.lockb create mode 100644 substreams/substreams-trigger-filter/package.json create mode 100644 substreams/substreams-trigger-filter/proto/near.proto create mode 100755 substreams/substreams-trigger-filter/proto/receipts.proto create mode 100755 substreams/substreams-trigger-filter/rust-toolchain.toml create mode 100644 substreams/substreams-trigger-filter/schema.graphql create mode 100755 substreams/substreams-trigger-filter/src/lib.rs create mode 100755 substreams/substreams-trigger-filter/src/pb/mod.rs create mode 100644 substreams/substreams-trigger-filter/src/pb/receipts.v1.rs create mode 100644 substreams/substreams-trigger-filter/src/pb/sf.near.type.v1.rs create mode 100644 substreams/substreams-trigger-filter/subgraph.yaml create mode 100644 substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg create mode 100755 substreams/substreams-trigger-filter/substreams.yaml create mode 100644 substreams/trigger-filters/Cargo.toml create mode 100644 substreams/trigger-filters/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 671db9a331c..48250d72590 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -282,12 +282,23 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1374191e2dd25f9ae02e3aa95041ed5d747fc77b3c102b49fe2dd9a8117a6244" dependencies = [ - "num-bigint", + "num-bigint 0.2.6", "num-integer", "num-traits", "serde", ] +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint 0.4.4", + "num-integer", + "num-traits", +] + [[package]] name = "bincode" version = "1.3.3" @@ -929,12 +940,12 @@ version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" dependencies = [ - "bigdecimal", + "bigdecimal 0.1.2", "bitflags 1.3.1", "byteorder", "chrono", "diesel_derives", - "num-bigint", + "num-bigint 0.2.6", "num-integer", "num-traits", "pq-sys", @@ -1503,7 +1514,7 @@ dependencies = [ "async-stream", "async-trait", "atomic_refcell", - "bigdecimal", + "bigdecimal 0.1.2", "bytes", "chrono", "cid", @@ -1516,13 +1527,13 @@ dependencies = [ "futures 0.3.16", "graphql-parser", "hex", - "hex-literal", + "hex-literal 0.4.1", "http", "isatty", "itertools 0.11.0", "lazy_static", "maplit", - "num-bigint", + "num-bigint 0.2.6", "num-traits", "num_cpus", "parking_lot 0.12.1", @@ -1634,6 +1645,7 @@ dependencies = [ name = "graph-chain-near" version = "0.32.0" dependencies = [ + "anyhow", "base64 0.20.0", "diesel", "graph", @@ -1643,6 +1655,7 @@ dependencies = [ "prost-types", "serde", "tonic-build", + "trigger-filters", ] [[package]] @@ -2056,6 +2069,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hex-literal" version = "0.4.1" @@ -2880,6 +2899,17 @@ dependencies = [ "serde", ] +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -2990,6 +3020,15 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +[[package]] +name = "pad" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" +dependencies = [ + "unicode-width", +] + [[package]] name = "parity-scale-codec" version = "3.0.0" @@ -3949,7 +3988,7 @@ dependencies = [ "indexmap 2.0.0", "serde", "serde_json", - "time 0.3.17", + "time", ] [[package]] @@ -4243,10 +4282,80 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "substreams" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af972e374502cdfc9998132f5343848d1c58f27a295dc061a89804371f408a46" +dependencies = [ + "anyhow", + "bigdecimal 0.3.1", + "hex", + "hex-literal 0.3.4", + "num-bigint 0.4.4", + "num-traits", + "pad", + "prost", + "prost-build", + "prost-types", + "substreams-macro", + "thiserror", +] + +[[package]] +name = "substreams-entity-change" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" +dependencies = [ + "base64 0.13.1", + "prost", + "prost-types", + "substreams", +] + [[package]] name = "substreams-head-tracker" version = "1.0.0" +[[package]] +name = "substreams-macro" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6521ccd011a4c3f52cd3c31fc7400733e4feba2094e0e0e6354adca25b2b3f37" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.107", + "thiserror", +] + +[[package]] +name = "substreams-near-core" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9922f437e6cb86b62cfd8bdede93937def710616ac2825ffff06b8770bbd06df" +dependencies = [ + "bs58", + "prost", + "prost-build", + "prost-types", +] + +[[package]] +name = "substreams-trigger-filter" +version = "0.32.0" +dependencies = [ + "anyhow", + "hex", + "prost", + "substreams", + "substreams-entity-change", + "substreams-near-core", + "tonic-build", + "trigger-filters", +] + [[package]] name = "subtle" version = "2.4.1" @@ -4368,7 +4477,7 @@ dependencies = [ "graph-store-postgres", "graphql-parser", "hex", - "hex-literal", + "hex-literal 0.4.1", "lazy_static", "pretty_assertions", "prost-types", @@ -4871,6 +4980,13 @@ dependencies = [ "tracing", ] +[[package]] +name = "trigger-filters" +version = "0.32.0" +dependencies = [ + "anyhow", +] + [[package]] name = "try-lock" version = "0.2.3" diff --git a/Cargo.toml b/Cargo.toml index 24c9d54df16..6d6142b5e83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ members = [ "runtime/*", "server/*", "store/*", - "substreams-head-tracker", + "substreams/*", "graph", "tests", ] @@ -33,3 +33,9 @@ incremental = false [profile.dev] incremental = false + +[profile.release] +lto = true +opt-level = 's' +strip = "debuginfo" + diff --git a/chain/arweave/build.rs b/chain/arweave/build.rs index e2ede2acef2..ea8153e7bd1 100644 --- a/chain/arweave/build.rs +++ b/chain/arweave/build.rs @@ -2,6 +2,6 @@ fn main() { println!("cargo:rerun-if-changed=proto"); tonic_build::configure() .out_dir("src/protobuf") - .compile(&["proto/type.proto"], &["proto"]) + .compile(&["proto/arweave.proto"], &["proto"]) .expect("Failed to compile Firehose Arweave proto(s)"); } diff --git a/chain/arweave/proto/type.proto b/chain/arweave/proto/arweave.proto similarity index 100% rename from chain/arweave/proto/type.proto rename to chain/arweave/proto/arweave.proto diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index d371bbe7c9c..7b10f0bd847 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -8,8 +8,10 @@ use graph::blockchain::{ use graph::cheap_clone::CheapClone; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; +use graph::env::EnvVars; use graph::firehose::FirehoseEndpoint; use graph::prelude::MetricsRegistry; +use graph::substreams::Clock; use graph::{ blockchain::{ block_stream::{ @@ -33,7 +35,7 @@ use crate::{ codec, data_source::{DataSource, UnresolvedDataSource}, }; -use graph::blockchain::block_stream::{BlockStream, FirehoseCursor}; +use graph::blockchain::block_stream::{BlockStream, FirehoseCursor, SubstreamsMapper}; pub struct Chain { logger_factory: LoggerFactory, @@ -50,7 +52,7 @@ impl std::fmt::Debug for Chain { } impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self) -> Chain { + fn build(self, _config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -127,7 +129,7 @@ impl Blockchain for Chain { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper {}); + let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -135,8 +137,6 @@ impl Blockchain for Chain { store.block_ptr(), store.firehose_cursor(), firehose_mapper, - adapter, - filter, start_blocks, logger, self.metrics_registry.clone(), @@ -252,16 +252,54 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper {} +pub struct FirehoseMapper { + adapter: Arc>, + filter: Arc, +} + +#[async_trait] +impl SubstreamsMapper for FirehoseMapper { + fn decode_block( + &self, + output: Option<&prost_types::Any>, + ) -> Result, Error> { + let block = match output { + Some(block) => codec::Block::decode(block.value.as_ref())?, + None => anyhow::bail!("Arweave mapper is expected to always have a block"), + }; + + Ok(Some(block)) + } + + async fn block_with_triggers( + &self, + logger: &Logger, + block: codec::Block, + ) -> Result, Error> { + self.adapter + .triggers_in_block(logger, block, self.filter.as_ref()) + .await + } + async fn decode_triggers( + &self, + _logger: &Logger, + _clock: &Clock, + _block: &prost_types::Any, + ) -> Result, Error> { + unimplemented!() + } +} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { + fn trigger_filter(&self) -> &TriggerFilter { + self.filter.as_ref() + } + async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, - adapter: &Arc>, - filter: &TriggerFilter, ) -> Result, FirehoseError> { let step = ForkStep::from_i32(response.step).unwrap_or_else(|| { panic!( @@ -282,12 +320,13 @@ impl FirehoseMapperTrait for FirehoseMapper { // // Check about adding basic information about the block in the bstream::BlockResponseV2 or maybe // define a slimmed down stuct that would decode only a few fields and ignore all the rest. - let block = codec::Block::decode(any_block.value.as_ref())?; + // unwrap: Input cannot be None so output will be error or block. + let block = self.decode_block(Some(&any_block))?.unwrap(); use ForkStep::*; match step { StepNew => Ok(BlockStreamEvent::ProcessBlock( - adapter.triggers_in_block(logger, block, filter).await?, + self.block_with_triggers(&logger, block).await?, FirehoseCursor::from(response.cursor.clone()), )), diff --git a/chain/common/proto/near-filter-substreams.proto b/chain/common/proto/near-filter-substreams.proto new file mode 100644 index 00000000000..d7e4a822573 --- /dev/null +++ b/chain/common/proto/near-filter-substreams.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +import "near.proto"; + +package receipts.v1; + +message BlockAndReceipts { + sf.near.codec.v1.Block block = 1; + repeated sf.near.codec.v1.ExecutionOutcomeWithId outcome = 2; + repeated sf.near.codec.v1.Receipt receipt = 3; +} + + + + diff --git a/chain/cosmos/build.rs b/chain/cosmos/build.rs index fc07b4907e0..c618d3b466d 100644 --- a/chain/cosmos/build.rs +++ b/chain/cosmos/build.rs @@ -1,4 +1,4 @@ -const PROTO_FILE: &str = "proto/type.proto"; +const PROTO_FILE: &str = "proto/cosmos.proto"; fn main() { println!("cargo:rerun-if-changed=proto"); @@ -49,6 +49,6 @@ fn main() { } builder - .compile(&["proto/type.proto"], &["proto"]) + .compile(&[PROTO_FILE], &["proto"]) .expect("Failed to compile Firehose Cosmos proto(s)"); } diff --git a/chain/cosmos/proto/type.proto b/chain/cosmos/proto/cosmos.proto similarity index 100% rename from chain/cosmos/proto/type.proto rename to chain/cosmos/proto/cosmos.proto diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index 6ebd291a269..c3e9c1f29bc 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,9 +1,11 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::BlockIngestor; +use graph::env::EnvVars; use graph::prelude::MetricsRegistry; +use graph::substreams::Clock; use std::sync::Arc; -use graph::blockchain::block_stream::FirehoseCursor; +use graph::blockchain::block_stream::{FirehoseCursor, SubstreamsMapper}; use graph::blockchain::client::ChainClient; use graph::blockchain::{BasicBlockchainBuilder, BlockchainBuilder, NoopRuntimeAdapter}; use graph::cheap_clone::CheapClone; @@ -46,7 +48,7 @@ impl std::fmt::Debug for Chain { } impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self) -> Chain { + fn build(self, _config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -122,7 +124,7 @@ impl Blockchain for Chain { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper {}); + let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -130,8 +132,6 @@ impl Blockchain for Chain { store.block_ptr(), store.firehose_cursor(), firehose_mapper, - adapter, - filter, start_blocks, logger, self.metrics_registry.clone(), @@ -325,16 +325,55 @@ fn build_tx_context(tx: &codec::TxResult) -> codec::TransactionContext { } } -pub struct FirehoseMapper {} +pub struct FirehoseMapper { + adapter: Arc>, + filter: Arc, +} + +#[async_trait] +impl SubstreamsMapper for FirehoseMapper { + fn decode_block( + &self, + output: Option<&prost_types::Any>, + ) -> Result, Error> { + let block = match output { + Some(block) => crate::Block::decode(block.value.as_ref())?, + None => anyhow::bail!("cosmos mapper is expected to always have a block"), + }; + + Ok(Some(block)) + } + + async fn block_with_triggers( + &self, + logger: &Logger, + block: crate::Block, + ) -> Result, Error> { + self.adapter + .triggers_in_block(logger, block, self.filter.as_ref()) + .await + } + + async fn decode_triggers( + &self, + _logger: &Logger, + _clock: &Clock, + _block: &prost_types::Any, + ) -> Result, Error> { + unimplemented!() + } +} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { + fn trigger_filter(&self) -> &TriggerFilter { + self.filter.as_ref() + } + async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, - adapter: &Arc>, - filter: &TriggerFilter, ) -> Result, FirehoseError> { let step = ForkStep::from_i32(response.step).unwrap_or_else(|| { panic!( @@ -355,16 +394,17 @@ impl FirehoseMapperTrait for FirehoseMapper { // // Check about adding basic information about the block in the bstream::BlockResponseV2 or maybe // define a slimmed down struct that would decode only a few fields and ignore all the rest. - let sp = codec::Block::decode(any_block.value.as_ref())?; + // unwrap: Input cannot be None so output will be error or block. + let block = self.decode_block(Some(&any_block))?.unwrap(); match step { ForkStep::StepNew => Ok(BlockStreamEvent::ProcessBlock( - adapter.triggers_in_block(logger, sp, filter).await?, + self.block_with_triggers(logger, block).await?, FirehoseCursor::from(response.cursor.clone()), )), ForkStep::StepUndo => { - let parent_ptr = sp + let parent_ptr = block .parent_ptr() .map_err(FirehoseError::from)? .expect("Genesis block should never be reverted"); diff --git a/chain/ethereum/build.rs b/chain/ethereum/build.rs index 0efb360140d..8ccae67aa92 100644 --- a/chain/ethereum/build.rs +++ b/chain/ethereum/build.rs @@ -3,6 +3,6 @@ fn main() { tonic_build::configure() .out_dir("src/protobuf") - .compile(&["proto/codec.proto"], &["proto"]) + .compile(&["proto/ethereum.proto"], &["proto"]) .expect("Failed to compile Firehose Ethereum proto(s)"); } diff --git a/chain/ethereum/proto/codec.proto b/chain/ethereum/proto/ethereum.proto similarity index 100% rename from chain/ethereum/proto/codec.proto rename to chain/ethereum/proto/ethereum.proto diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 12a322c6422..43bfab20f48 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -11,6 +11,7 @@ use graph::prelude::{ EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, MetricsRegistry, }; use graph::schema::InputSchema; +use graph::substreams::Clock; use graph::{ blockchain::{ block_stream::{ @@ -53,7 +54,9 @@ use crate::{ }, SubgraphEthRpcMetrics, TriggerFilter, ENV_VARS, }; -use graph::blockchain::block_stream::{BlockStream, BlockStreamBuilder, FirehoseCursor}; +use graph::blockchain::block_stream::{ + BlockStream, BlockStreamBuilder, FirehoseCursor, SubstreamsMapper, +}; /// Celo Mainnet: 42220, Testnet Alfajores: 44787, Testnet Baklava: 62320 const CELO_CHAIN_IDS: [u64; 3] = [42220, 44787, 62320]; @@ -87,7 +90,7 @@ impl BlockStreamBuilder for EthereumStreamBuilder { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper {}); + let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -95,8 +98,6 @@ impl BlockStreamBuilder for EthereumStreamBuilder { subgraph_current_block, block_cursor, firehose_mapper, - adapter, - filter, start_blocks, logger, chain.registry.clone(), @@ -727,16 +728,59 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper {} +pub struct FirehoseMapper { + adapter: Arc>, + filter: Arc, +} + +#[async_trait] +impl SubstreamsMapper for FirehoseMapper { + fn decode_block( + &self, + output: Option<&prost_types::Any>, + ) -> Result, Error> { + let block = match output { + Some(block) => codec::Block::decode(block.value.as_ref())?, + None => anyhow::bail!("ethereum mapper is expected to always have a block"), + }; + + // See comment(437a9f17-67cc-478f-80a3-804fe554b227) ethereum_block.calls is always Some even if calls + // is empty + let ethereum_block: EthereumBlockWithCalls = (&block).try_into()?; + + Ok(Some(BlockFinality::NonFinal(ethereum_block))) + } + + async fn block_with_triggers( + &self, + logger: &Logger, + block: BlockFinality, + ) -> Result, Error> { + self.adapter + .triggers_in_block(logger, block, &self.filter) + .await + } + + async fn decode_triggers( + &self, + _logger: &Logger, + _clock: &Clock, + _block: &prost_types::Any, + ) -> Result, Error> { + unimplemented!() + } +} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { + fn trigger_filter(&self) -> &TriggerFilter { + self.filter.as_ref() + } + async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, - adapter: &Arc>, - filter: &TriggerFilter, ) -> Result, FirehoseError> { let step = ForkStep::from_i32(response.step).unwrap_or_else(|| { panic!( @@ -761,15 +805,9 @@ impl FirehoseMapperTrait for FirehoseMapper { use firehose::ForkStep::*; match step { StepNew => { - // See comment(437a9f17-67cc-478f-80a3-804fe554b227) ethereum_block.calls is always Some even if calls - // is empty - let ethereum_block: EthereumBlockWithCalls = (&block).try_into()?; - - // triggers in block never actually calls the ethereum traces api. - // TODO: Split the trigger parsing from call retrieving. - let block_with_triggers = adapter - .triggers_in_block(logger, BlockFinality::NonFinal(ethereum_block), filter) - .await?; + // unwrap: Input cannot be None so output will be error or block. + let block = self.decode_block(Some(&any_block))?.unwrap(); + let block_with_triggers = self.block_with_triggers(logger, block).await?; Ok(BlockStreamEvent::ProcessBlock( block_with_triggers, diff --git a/chain/near/Cargo.toml b/chain/near/Cargo.toml index d41b901159e..9f43c90a44e 100644 --- a/chain/near/Cargo.toml +++ b/chain/near/Cargo.toml @@ -12,9 +12,11 @@ graph = { path = "../../graph" } prost = { workspace = true } prost-types = { workspace = true } serde = "1.0" +anyhow = "1" graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] diesel = { version = "1.4.7", features = ["postgres", "serde_json", "numeric", "r2d2"] } +trigger-filters.path = "../../substreams/trigger-filters" diff --git a/chain/near/build.rs b/chain/near/build.rs index 73c33efb26f..611f861baf2 100644 --- a/chain/near/build.rs +++ b/chain/near/build.rs @@ -2,6 +2,10 @@ fn main() { println!("cargo:rerun-if-changed=proto"); tonic_build::configure() .out_dir("src/protobuf") - .compile(&["proto/codec.proto"], &["proto"]) + .extern_path(".sf.near.codec.v1", "crate::codec::pbcodec") + .compile( + &["proto/near.proto", "proto/substreams-triggers.proto"], + &["proto"], + ) .expect("Failed to compile Firehose NEAR proto(s)"); } diff --git a/chain/near/proto/codec.proto b/chain/near/proto/near.proto similarity index 100% rename from chain/near/proto/codec.proto rename to chain/near/proto/near.proto diff --git a/chain/near/proto/substreams-triggers.proto b/chain/near/proto/substreams-triggers.proto new file mode 100644 index 00000000000..947052a2566 --- /dev/null +++ b/chain/near/proto/substreams-triggers.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +import "near.proto"; + +package receipts.v1; + +message BlockAndReceipts { + sf.near.codec.v1.Block block = 1; + repeated sf.near.codec.v1.ExecutionOutcomeWithId outcome = 2; + repeated sf.near.codec.v1.Receipt receipt = 3; +} + diff --git a/chain/near/src/adapter.rs b/chain/near/src/adapter.rs index 89c95b20c28..4d6151aa5ca 100644 --- a/chain/near/src/adapter.rs +++ b/chain/near/src/adapter.rs @@ -4,6 +4,7 @@ use crate::data_source::PartialAccounts; use crate::{data_source::DataSource, Chain}; use graph::blockchain as bc; use graph::firehose::{BasicReceiptFilter, PrefixSuffixPair}; +use graph::itertools::Itertools; use graph::prelude::*; use prost::Message; use prost_types::Any; @@ -17,6 +18,31 @@ pub struct TriggerFilter { pub(crate) receipt_filter: NearReceiptFilter, } +impl TriggerFilter { + pub fn to_module_params(&self) -> String { + let matches = self.receipt_filter.accounts.iter().join(","); + let partial_matches = self + .receipt_filter + .partial_accounts + .iter() + .map(|(starts_with, ends_with)| match (starts_with, ends_with) { + (None, None) => unreachable!(), + (None, Some(e)) => format!(",{}", e), + (Some(s), None) => format!("{},", s), + (Some(s), Some(e)) => format!("{},{}", s, e), + }) + .join("\n"); + + format!( + "{},{}\n{}\n{}", + self.receipt_filter.accounts.len(), + self.receipt_filter.partial_accounts.len(), + matches, + partial_matches + ) + } +} + impl bc::TriggerFilter for TriggerFilter { fn extend<'a>(&mut self, data_sources: impl Iterator + Clone) { let TriggerFilter { @@ -225,13 +251,14 @@ mod test { use std::collections::HashSet; use super::NearBlockFilter; - use crate::adapter::{TriggerFilter, BASIC_RECEIPT_FILTER_TYPE_URL}; + use crate::adapter::{NearReceiptFilter, TriggerFilter, BASIC_RECEIPT_FILTER_TYPE_URL}; use graph::{ blockchain::TriggerFilter as _, firehose::{BasicReceiptFilter, PrefixSuffixPair}, }; use prost::Message; use prost_types::Any; + use trigger_filters::NearFilter; #[test] fn near_trigger_empty_filter() { @@ -244,6 +271,7 @@ mod test { partial_accounts: HashSet::new(), }, }; + assert_eq!(filter.to_module_params(), "0,0\n\n"); assert_eq!(filter.to_firehose_filter(), vec![]); } @@ -337,6 +365,124 @@ mod test { ); } + #[test] + fn test_near_filter_params_serialization() -> anyhow::Result<()> { + struct Case<'a> { + name: &'a str, + input: NearReceiptFilter, + expected: NearFilter<'a>, + } + + let cases = vec![ + Case { + name: "empty", + input: NearReceiptFilter::default(), + expected: NearFilter::default(), + }, + Case { + name: "only full matches", + input: super::NearReceiptFilter { + accounts: HashSet::from_iter(vec!["acc1".into()]), + partial_accounts: HashSet::new(), + }, + expected: NearFilter { + accounts: HashSet::from_iter(vec!["acc1"]), + partial_accounts: HashSet::default(), + }, + }, + Case { + name: "only partial matches", + input: super::NearReceiptFilter { + accounts: HashSet::new(), + partial_accounts: HashSet::from_iter(vec![(Some("acc1".into()), None)]), + }, + expected: NearFilter { + accounts: HashSet::default(), + partial_accounts: HashSet::from_iter(vec![(Some("acc1"), None)]), + }, + }, + Case { + name: "both 1len matches", + input: super::NearReceiptFilter { + accounts: HashSet::from_iter(vec!["acc1".into()]), + partial_accounts: HashSet::from_iter(vec![(Some("s1".into()), None)]), + }, + expected: NearFilter { + accounts: HashSet::from_iter(vec!["acc1"]), + partial_accounts: HashSet::from_iter(vec![(Some("s1"), None)]), + }, + }, + Case { + name: "more partials matches", + input: super::NearReceiptFilter { + accounts: HashSet::from_iter(vec!["acc1".into()]), + partial_accounts: HashSet::from_iter(vec![ + (Some("s1".into()), None), + (None, Some("s3".into())), + (Some("s2".into()), Some("s2".into())), + ]), + }, + expected: NearFilter { + accounts: HashSet::from_iter(vec!["acc1"]), + partial_accounts: HashSet::from_iter(vec![ + (Some("s1"), None), + (None, Some("s3")), + (Some("s2"), Some("s2")), + ]), + }, + }, + Case { + name: "both matches", + input: NearReceiptFilter { + accounts: HashSet::from_iter(vec![ + "acc1".into(), + "=12-30786jhasdgmasd".into(), + "^&%^&^$".into(), + "acc3".into(), + ]), + partial_accounts: HashSet::from_iter(vec![ + (Some("1.2.2.3.45.5".into()), None), + (None, Some("kjysdfoiua6sd".into())), + (Some("120938pokasd".into()), Some("102938poai[sd]".into())), + ]), + }, + expected: NearFilter { + accounts: HashSet::from_iter(vec![ + "acc1", + "=12-30786jhasdgmasd", + "^&%^&^$", + "acc3", + ]), + partial_accounts: HashSet::from_iter(vec![ + (Some("1.2.2.3.45.5"), None), + (None, Some("kjysdfoiua6sd")), + (Some("120938pokasd"), Some("102938poai[sd]")), + ]), + }, + }, + ]; + + for case in cases.into_iter() { + let tf = TriggerFilter { + block_filter: NearBlockFilter::default(), + receipt_filter: case.input, + }; + let param = tf.to_module_params(); + let filter = NearFilter::try_from(param.as_str()).expect(&format!( + "case: {}, the filter to parse params correctly", + case.name + )); + + assert_eq!( + filter, case.expected, + "case {},param:\n{}", + case.name, param + ); + } + + Ok(()) + } + fn decode_filter(firehose_filter: Vec) -> BasicReceiptFilter { let firehose_filter = firehose_filter[0].clone(); assert_eq!( diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 765a88f495f..3fdf5b1bee7 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -1,15 +1,18 @@ -use graph::anyhow; +use anyhow::anyhow; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; +use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; +use graph::env::EnvVars; use graph::firehose::FirehoseEndpoint; use graph::prelude::{MetricsRegistry, TryFutureExt}; use graph::schema::InputSchema; +use graph::substreams::{Clock, Package}; use graph::{ anyhow::Result, blockchain::{ @@ -29,13 +32,21 @@ use prost::Message; use std::sync::Arc; use crate::adapter::TriggerFilter; +use crate::codec::substreams_triggers::BlockAndReceipts; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::trigger::{self, NearTrigger}; use crate::{ codec, data_source::{DataSource, UnresolvedDataSource}, }; -use graph::blockchain::block_stream::{BlockStream, BlockStreamBuilder, FirehoseCursor}; +use graph::blockchain::block_stream::{ + BlockStream, BlockStreamBuilder, FirehoseCursor, SubstreamsMapper, +}; + +const NEAR_FILTER_MODULE_NAME: &str = "near_filter"; +const SUBSTREAMS_TRIGGER_FILTER_BYTES: &[u8; 497306] = include_bytes!( + "../../../substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg" +); pub struct NearStreamBuilder {} @@ -43,14 +54,56 @@ pub struct NearStreamBuilder {} impl BlockStreamBuilder for NearStreamBuilder { async fn build_substreams( &self, - _chain: &Chain, + chain: &Chain, _schema: InputSchema, - _deployment: DeploymentLocator, - _block_cursor: FirehoseCursor, - _subgraph_current_block: Option, - _filter: Arc<::TriggerFilter>, + deployment: DeploymentLocator, + block_cursor: FirehoseCursor, + subgraph_current_block: Option, + filter: Arc<::TriggerFilter>, ) -> Result>> { - unimplemented!() + let mapper = Arc::new(FirehoseMapper { + adapter: Arc::new(TriggersAdapter {}), + filter, + }); + let mut package = + Package::decode(SUBSTREAMS_TRIGGER_FILTER_BYTES.to_vec().as_ref()).unwrap(); + match package.modules.as_mut() { + Some(modules) => modules + .modules + .iter_mut() + .find(|module| module.name == NEAR_FILTER_MODULE_NAME) + .map(|module| { + graph::substreams::patch_module_params( + mapper.filter.to_module_params(), + module, + ); + module + }), + None => None, + }; + + let logger = chain + .logger_factory + .subgraph_logger(&deployment) + .new(o!("component" => "SubstreamsBlockStream")); + let start_block = subgraph_current_block + .as_ref() + .map(|b| b.number) + .unwrap_or_default(); + + Ok(Box::new(SubstreamsBlockStream::new( + deployment.hash, + chain.chain_client(), + subgraph_current_block, + block_cursor.as_ref().clone(), + mapper, + package.modules.clone(), + NEAR_FILTER_MODULE_NAME.to_string(), + vec![start_block], + vec![], + logger, + chain.metrics_registry.clone(), + ))) } async fn build_firehose( @@ -76,7 +129,7 @@ impl BlockStreamBuilder for NearStreamBuilder { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper {}); + let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -84,8 +137,6 @@ impl BlockStreamBuilder for NearStreamBuilder { subgraph_current_block, block_cursor, firehose_mapper, - adapter, - filter, start_blocks, logger, chain.metrics_registry.clone(), @@ -112,6 +163,7 @@ pub struct Chain { chain_store: Arc, metrics_registry: Arc, block_stream_builder: Arc>, + prefer_substreams: bool, } impl std::fmt::Debug for Chain { @@ -121,7 +173,7 @@ impl std::fmt::Debug for Chain { } impl BlockchainBuilder for BasicBlockchainBuilder { - fn build(self) -> Chain { + fn build(self, config: &Arc) -> Chain { Chain { logger_factory: self.logger_factory, name: self.name, @@ -129,6 +181,7 @@ impl BlockchainBuilder for BasicBlockchainBuilder { client: Arc::new(ChainClient::new_firehose(self.firehose_endpoints)), metrics_registry: self.metrics_registry, block_stream_builder: Arc::new(NearStreamBuilder {}), + prefer_substreams: config.prefer_substreams_block_streams, } } } @@ -174,6 +227,20 @@ impl Blockchain for Chain { filter: Arc, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { + if self.prefer_substreams { + return self + .block_stream_builder + .build_substreams( + self, + store.input_schema(), + deployment, + store.firehose_cursor(), + store.block_ptr(), + filter, + ) + .await; + } + self.block_stream_builder .build_firehose( self, @@ -334,16 +401,78 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper {} +pub struct FirehoseMapper { + adapter: Arc>, + filter: Arc, +} + +#[async_trait] +impl SubstreamsMapper for FirehoseMapper { + fn decode_block( + &self, + output: Option<&prost_types::Any>, + ) -> Result, Error> { + let block = match output { + Some(block) => codec::Block::decode(block.value.as_ref())?, + None => anyhow::bail!("near mapper is expected to always have a block"), + }; + + Ok(Some(block)) + } + + async fn block_with_triggers( + &self, + logger: &Logger, + block: codec::Block, + ) -> Result, Error> { + self.adapter + .triggers_in_block(logger, block, self.filter.as_ref()) + .await + } + + async fn decode_triggers( + &self, + _logger: &Logger, + _clock: &Clock, + message: &prost_types::Any, + ) -> Result, Error> { + let BlockAndReceipts { + block, + outcome, + receipt, + } = BlockAndReceipts::decode(message.value.as_ref())?; + let block = block.ok_or_else(|| anyhow!("near block is mandatory on substreams"))?; + let arc_block = Arc::new(block.clone()); + + let trigger_data = outcome + .into_iter() + .zip(receipt.into_iter()) + .map(|(outcome, receipt)| { + NearTrigger::Receipt(Arc::new(trigger::ReceiptWithOutcome { + outcome, + receipt, + block: arc_block.clone(), + })) + }) + .collect(); + + Ok(BlockWithTriggers { + block, + trigger_data, + }) + } +} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { + fn trigger_filter(&self) -> &TriggerFilter { + self.filter.as_ref() + } + async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, - adapter: &Arc>, - filter: &TriggerFilter, ) -> Result, FirehoseError> { let step = ForkStep::from_i32(response.step).unwrap_or_else(|| { panic!( @@ -364,12 +493,13 @@ impl FirehoseMapperTrait for FirehoseMapper { // // Check about adding basic information about the block in the bstream::BlockResponseV2 or maybe // define a slimmed down stuct that would decode only a few fields and ignore all the rest. - let block = codec::Block::decode(any_block.value.as_ref())?; + // unwrap: Input cannot be None so output will be error or block. + let block = self.decode_block(Some(&any_block))?.unwrap(); use ForkStep::*; match step { StepNew => Ok(BlockStreamEvent::ProcessBlock( - adapter.triggers_in_block(logger, block, filter).await?, + self.block_with_triggers(logger, block).await?, FirehoseCursor::from(response.cursor.clone()), )), diff --git a/chain/near/src/codec.rs b/chain/near/src/codec.rs index 854e9dc1341..cd4fba9f906 100644 --- a/chain/near/src/codec.rs +++ b/chain/near/src/codec.rs @@ -2,6 +2,10 @@ #[path = "protobuf/sf.near.codec.v1.rs"] pub mod pbcodec; +#[rustfmt::skip] +#[path = "protobuf/receipts.v1.rs"] +pub mod substreams_triggers; + use graph::{ blockchain::Block as BlockchainBlock, blockchain::BlockPtr, diff --git a/chain/near/src/protobuf/receipts.v1.rs b/chain/near/src/protobuf/receipts.v1.rs new file mode 100644 index 00000000000..5b648d84e90 --- /dev/null +++ b/chain/near/src/protobuf/receipts.v1.rs @@ -0,0 +1,10 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockAndReceipts { + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub outcome: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub receipt: ::prost::alloc::vec::Vec, +} diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index b7caaf08dce..f0a18a610ed 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -68,7 +68,10 @@ async fn main() -> Result<(), Error> { client, None, None, - Arc::new(Mapper { schema: None }), + Arc::new(Mapper { + schema: None, + skip_empty_blocks: false, + }), package.modules.clone(), module_name.to_string(), vec![12369621], diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs index c1a047d7714..83f31702c3f 100644 --- a/chain/substreams/src/block_ingestor.rs +++ b/chain/substreams/src/block_ingestor.rs @@ -18,8 +18,9 @@ use graph::{ }; use prost::Message; -const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = - include_bytes!("../../../substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg"); +const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = include_bytes!( + "../../../substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg" +); pub struct SubstreamsBlockIngestor { chain_store: Arc, @@ -72,7 +73,7 @@ impl SubstreamsBlockIngestor { let mut latest_cursor = cursor; while let Some(message) = stream.next().await { - let (block_ptr, cursor) = match message { + let (block, cursor) = match message { Ok(BlockStreamEvent::ProcessBlock(triggers, cursor)) => { (Arc::new(triggers.block), cursor) } @@ -89,7 +90,7 @@ impl SubstreamsBlockIngestor { } }; - let res = self.process_new_block(block_ptr, cursor.to_string()).await; + let res = self.process_new_block(block, cursor.to_string()).await; if let Err(e) = res { error!(self.logger, "Process block failed: {:#}", e); break; @@ -107,14 +108,14 @@ impl SubstreamsBlockIngestor { async fn process_new_block( &self, - block_ptr: Arc, + block: Arc, cursor: String, ) -> Result<(), Error> { - trace!(self.logger, "Received new block to ingest {:?}", block_ptr); + trace!(self.logger, "Received new block to ingest {:?}", block); self.chain_store .clone() - .set_chain_head(block_ptr, cursor) + .set_chain_head(block, cursor) .await .context("Updating chain head")?; @@ -125,7 +126,10 @@ impl SubstreamsBlockIngestor { #[async_trait] impl BlockIngestor for SubstreamsBlockIngestor { async fn run(self: Box) { - let mapper = Arc::new(Mapper { schema: None }); + let mapper = Arc::new(Mapper { + schema: None, + skip_empty_blocks: false, + }); let mut latest_cursor = self.fetch_head_cursor().await; let mut backoff = ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index ef073789e28..919d19ef4c8 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -42,6 +42,7 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { ) -> Result>> { let mapper = Arc::new(Mapper { schema: Some(schema), + skip_empty_blocks: true, }); let logger = chain diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 1fdd26f96cc..5baf2ebab77 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -6,6 +6,7 @@ use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, EmptyNodeCapabilities, NoopRuntimeAdapter, }; use graph::components::store::DeploymentCursorTracker; +use graph::env::EnvVars; use graph::firehose::FirehoseEndpoints; use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; use graph::schema::EntityKey; @@ -193,7 +194,7 @@ impl Blockchain for Chain { } impl blockchain::BlockchainBuilder for BasicBlockchainBuilder { - fn build(self) -> super::Chain { + fn build(self, _config: &Arc) -> Chain { let BasicBlockchainBuilder { logger_factory, name: _, diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index 90912eeee4f..b78b0c90463 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -7,10 +7,6 @@ use graph::{ components::link_resolver::LinkResolver, prelude::{async_trait, BlockNumber, DataSourceTemplateInfo, Link}, slog::Logger, - substreams::{ - module::input::{Input, Params}, - Module, - }, }; use prost::Message; @@ -161,35 +157,6 @@ pub struct UnresolvedDataSource { pub mapping: UnresolvedMapping, } -/// Replace all the existing params with the provided ones. -fn patch_module_params(params: Option>, module: &mut Module) { - let params = match params { - Some(params) => params, - None => return, - }; - - let mut inputs: Vec = module - .inputs - .iter() - .flat_map(|input| match input.input { - None => None, - Some(Input::Params(_)) => None, - Some(_) => Some(input.clone()), - }) - .collect(); - - inputs.append( - &mut params - .into_iter() - .map(|value| graph::substreams::module::Input { - input: Some(Input::Params(Params { value })), - }) - .collect(), - ); - - module.inputs = inputs; -} - #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] /// Text api_version, before parsing and validation. @@ -216,7 +183,9 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { .iter_mut() .find(|module| module.name == self.source.package.module_name) .map(|module| { - patch_module_params(self.source.package.params, module); + if let Some(params) = self.source.package.params { + graph::substreams::patch_module_params(params.join("\n"), module); + } module }), None => None, @@ -415,19 +384,11 @@ mod test { let mut package = gen_package(); let mut modules = package.modules.unwrap(); modules.modules.get_mut(0).map(|module| { - module.inputs = vec![ - graph::substreams::module::Input { - input: Some(Input::Params(Params { value: "x".into() })), - }, - graph::substreams::module::Input { - input: Some(Input::Params(Params { value: "y".into() })), - }, - graph::substreams::module::Input { - input: Some(Input::Params(Params { - value: "123".into(), - })), - }, - ] + module.inputs = vec![graph::substreams::module::Input { + input: Some(Input::Params(Params { + value: "x\ny\n123".into(), + })), + }] }); package.modules = Some(modules); diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 764fd6bd5d7..1d371580eb6 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -1,23 +1,21 @@ use std::collections::HashMap; use std::str::FromStr; -use crate::codec::entity_change; -use crate::{codec, Block, Chain, EntityChanges, ParsedChanges, TriggerData}; -use graph::blockchain::block_stream::{ - BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, -}; +use crate::codec::{entity_change, EntityChanges}; +use anyhow::{anyhow, Error}; +use graph::blockchain::block_stream::{BlockWithTriggers, SubstreamsError, SubstreamsMapper}; use graph::data::store::scalar::Bytes; use graph::data::store::IdType; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::prelude::BigDecimal; -use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, BlockPtr, Logger, Value}; +use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, Logger, Value}; use graph::schema::InputSchema; -use graph::slog::o; use graph::substreams::Clock; -use graph::substreams_rpc::response::Message as SubstreamsMessage; use prost::Message; +use crate::{Block, Chain, ParsedChanges, TriggerData}; + // Mapper will transform the proto content coming from substreams in the graph-out format // into the internal Block representation. If schema is passed then additional transformation // into from the substreams block representation is performed into the Entity model used by @@ -25,100 +23,73 @@ use prost::Message; // be used for block ingestion where entity content is empty and gets discarded. pub struct Mapper { pub schema: Option, + // Block ingestors need the block to be returned so they can populate the cache + // block streams, however, can shave some time by just skipping. + pub skip_empty_blocks: bool, } #[async_trait] impl SubstreamsMapper for Mapper { - async fn to_block_stream_event( - &self, - logger: &mut Logger, - message: Option, - ) -> Result>, SubstreamsError> { - match message { - Some(SubstreamsMessage::Session(session_init)) => { - *logger = logger.new(o!("trace_id" => session_init.trace_id)); - return Ok(None); - } - Some(SubstreamsMessage::BlockUndoSignal(undo)) => { - let valid_block = match undo.last_valid_block { - Some(clock) => clock, - None => return Err(SubstreamsError::InvalidUndoError), - }; - let valid_ptr = BlockPtr { - hash: valid_block.id.trim_start_matches("0x").try_into()?, - number: valid_block.number as i32, - }; - return Ok(Some(BlockStreamEvent::Revert( - valid_ptr, - FirehoseCursor::from(undo.last_valid_cursor.clone()), - ))); + fn decode_block(&self, output: Option<&prost_types::Any>) -> Result, Error> { + let changes: EntityChanges = match output { + Some(msg) => { + Message::decode(msg.value.as_slice()).map_err(SubstreamsError::DecodingError)? } + None => EntityChanges { + entity_changes: [].to_vec(), + }, + }; - Some(SubstreamsMessage::BlockScopedData(block_scoped_data)) => { - let module_output = match &block_scoped_data.output { - Some(out) => out, - None => return Ok(None), - }; - - let clock = match block_scoped_data.clock { - Some(clock) => clock, - None => return Err(SubstreamsError::MissingClockError), - }; - - let cursor = &block_scoped_data.cursor; - - let Clock { - id: hash, - number, - timestamp: _, - } = clock; - - let hash: BlockHash = hash.as_str().try_into()?; - let number: BlockNumber = number as BlockNumber; - - let changes: EntityChanges = match module_output.map_output.as_ref() { - Some(msg) => Message::decode(msg.value.as_slice()) - .map_err(SubstreamsError::DecodingError)?, - None => EntityChanges { - entity_changes: [].to_vec(), - }, - }; - - let parsed_changes = match self.schema.as_ref() { - Some(schema) => parse_changes(&changes, schema)?, - None => vec![], - }; - let mut triggers = vec![]; - if changes.entity_changes.len() >= 1 { - triggers.push(TriggerData {}); - } + let parsed_changes = match self.schema.as_ref() { + Some(schema) => parse_changes(&changes, schema)?, + None if self.skip_empty_blocks => return Ok(None), + None => vec![], + }; - // Even though the trigger processor for substreams doesn't care about TriggerData - // there are a bunch of places in the runner that check if trigger data - // empty and skip processing if so. This will prolly breakdown - // close to head so we will need to improve things. - - // TODO(filipe): Fix once either trigger data can be empty - // or we move the changes into trigger data. - Ok(Some(BlockStreamEvent::ProcessBlock( - BlockWithTriggers::new( - Block { - hash, - number, - changes, - parsed_changes, - }, - triggers, - logger, - ), - FirehoseCursor::from(cursor.clone()), - ))) - } + let hash = BlockHash::zero(); + let number = BlockNumber::MIN; + let block = Block { + hash, + number, + changes, + parsed_changes, + }; - // ignoring Progress messages and SessionInit - // We are only interested in Data and Undo signals - _ => Ok(None), + Ok(Some(block)) + } + + async fn block_with_triggers( + &self, + logger: &Logger, + block: Block, + ) -> Result, Error> { + let mut triggers = vec![]; + if block.changes.entity_changes.len() >= 1 { + triggers.push(TriggerData {}); } + + Ok(BlockWithTriggers::new(block, triggers, logger)) + } + + async fn decode_triggers( + &self, + logger: &Logger, + clock: &Clock, + block: &prost_types::Any, + ) -> Result, Error> { + let block_number: BlockNumber = clock.number.try_into()?; + let block_hash = clock.id.as_bytes().to_vec().try_into()?; + + let block = self + .decode_block(Some(block))? + .ok_or_else(|| anyhow!("expected block to not be empty"))?; + self.block_with_triggers(logger, block).await.map(|bt| { + let mut block = bt; + + block.block.number = block_number; + block.block.hash = block_hash; + block + }) } } @@ -160,8 +131,8 @@ fn parse_changes( let changes = match entity_change.operation() { entity_change::Operation::Create | entity_change::Operation::Update => { for field in entity_change.fields.iter() { - let new_value: &codec::value::Typed = match &field.new_value { - Some(codec::Value { + let new_value: &crate::codec::value::Typed = match &field.new_value { + Some(crate::codec::Value { typed: Some(new_value), }) => &new_value, _ => continue, @@ -188,7 +159,7 @@ fn parse_changes( } fn decode_value(value: &crate::codec::value::Typed) -> anyhow::Result { - use codec::value::Typed; + use crate::codec::value::Typed; match value { Typed::Int32(new_value) => Ok(Value::Int(*new_value)), diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 18e0fd78ef1..6db29f33f8a 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -1,3 +1,6 @@ +use crate::substreams::Clock; +use crate::substreams_rpc::response::Message as SubstreamsMessage; +use crate::substreams_rpc::BlockScopedData; use anyhow::Error; use async_stream::stream; use futures03::Stream; @@ -293,12 +296,12 @@ pub trait TriggersAdapter: Send + Sync { #[async_trait] pub trait FirehoseMapper: Send + Sync { + fn trigger_filter(&self) -> &C::TriggerFilter; + async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, - adapter: &Arc>, - filter: &C::TriggerFilter, ) -> Result, FirehoseError>; /// Returns the [BlockPtr] value for this given block number. This is the block pointer @@ -334,13 +337,84 @@ pub trait FirehoseMapper: Send + Sync { #[async_trait] pub trait SubstreamsMapper: Send + Sync { + fn decode_block(&self, output: Option<&prost_types::Any>) -> Result, Error>; + + async fn block_with_triggers( + &self, + logger: &Logger, + block: C::Block, + ) -> Result, Error>; + + async fn decode_triggers( + &self, + logger: &Logger, + clock: &Clock, + block: &prost_types::Any, + ) -> Result, Error>; + async fn to_block_stream_event( &self, logger: &mut Logger, - response: Option, - // adapter: &Arc>, - // filter: &C::TriggerFilter, - ) -> Result>, SubstreamsError>; + message: Option, + ) -> Result>, SubstreamsError> { + match message { + Some(SubstreamsMessage::Session(session_init)) => { + *logger = logger.new(o!("trace_id" => session_init.trace_id)); + return Ok(None); + } + Some(SubstreamsMessage::BlockUndoSignal(undo)) => { + let valid_block = match undo.last_valid_block { + Some(clock) => clock, + None => return Err(SubstreamsError::InvalidUndoError), + }; + let valid_ptr = BlockPtr { + hash: valid_block.id.trim_start_matches("0x").try_into()?, + number: valid_block.number as i32, + }; + return Ok(Some(BlockStreamEvent::Revert( + valid_ptr, + FirehoseCursor::from(undo.last_valid_cursor.clone()), + ))); + } + + Some(SubstreamsMessage::BlockScopedData(block_scoped_data)) => { + let BlockScopedData { + output, + clock, + cursor, + final_block_height: _, + debug_map_outputs: _, + debug_store_outputs: _, + } = block_scoped_data; + + let module_output = match output { + Some(out) => out, + None => return Ok(None), + }; + + let clock = match clock { + Some(clock) => clock, + None => return Err(SubstreamsError::MissingClockError), + }; + + let map_output = match module_output.map_output { + Some(mo) => mo, + None => return Ok(None), + }; + + let block = self.decode_triggers(&logger, &clock, &map_output).await?; + + Ok(Some(BlockStreamEvent::ProcessBlock( + block, + FirehoseCursor::from(cursor.clone()), + ))) + } + + // ignoring Progress messages and SessionInit + // We are only interested in Data and Undo signals + _ => Ok(None), + } + } } #[derive(Error, Debug)] diff --git a/graph/src/blockchain/builder.rs b/graph/src/blockchain/builder.rs index dd91610552e..3ea1464a2a3 100644 --- a/graph/src/blockchain/builder.rs +++ b/graph/src/blockchain/builder.rs @@ -1,7 +1,7 @@ use super::Blockchain; use crate::{ - components::store::ChainStore, firehose::FirehoseEndpoints, prelude::LoggerFactory, - prelude::MetricsRegistry, + components::store::ChainStore, env::EnvVars, firehose::FirehoseEndpoints, + prelude::LoggerFactory, prelude::MetricsRegistry, }; use std::sync::Arc; @@ -20,5 +20,5 @@ pub trait BlockchainBuilder where C: Blockchain, { - fn build(self) -> C; + fn build(self, config: &Arc) -> C; } diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index a25f268a358..29febc67108 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -2,7 +2,7 @@ use super::block_stream::{ BlockStream, BlockStreamEvent, FirehoseMapper, FIREHOSE_BUFFER_STREAM_SIZE, }; use super::client::ChainClient; -use super::{Blockchain, TriggersAdapter}; +use super::Blockchain; use crate::blockchain::block_stream::FirehoseCursor; use crate::blockchain::TriggerFilter; use crate::prelude::*; @@ -113,8 +113,6 @@ where subgraph_current_block: Option, cursor: FirehoseCursor, mapper: Arc, - adapter: Arc>, - filter: Arc, start_blocks: Vec, logger: Logger, registry: Arc, @@ -140,8 +138,6 @@ where cursor, deployment, mapper, - adapter, - filter, manifest_start_block_num, subgraph_current_block, logger, @@ -156,8 +152,6 @@ fn stream_blocks>( mut latest_cursor: FirehoseCursor, deployment: DeploymentHash, mapper: Arc, - adapter: Arc>, - filter: Arc, manifest_start_block_num: BlockNumber, subgraph_current_block: Option, logger: Logger, @@ -242,7 +236,7 @@ fn stream_blocks>( }; if endpoint.filters_enabled { - request.transforms = filter.as_ref().clone().to_firehose_filter(); + request.transforms = mapper.trigger_filter().clone().to_firehose_filter(); } let mut connect_start = Instant::now(); @@ -267,8 +261,6 @@ fn stream_blocks>( manifest_start_block_num, subgraph_current_block.as_ref(), mapper.as_ref(), - &adapter, - &filter, &logger, ).await { Ok(BlockResponse::Proceed(event, cursor)) => { @@ -356,14 +348,12 @@ async fn process_firehose_response>( manifest_start_block_num: BlockNumber, subgraph_current_block: Option<&BlockPtr>, mapper: &F, - adapter: &Arc>, - filter: &C::TriggerFilter, logger: &Logger, ) -> Result, Error> { let response = result.context("An error occurred while streaming blocks")?; let event = mapper - .to_block_stream_event(logger, &response, adapter, filter) + .to_block_stream_event(logger, &response) .await .context("Mapping block to BlockStreamEvent failed")?; diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index 30d1dadfdc4..a3aba7bd53e 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -175,6 +175,8 @@ pub struct EnvVars { /// Set by the env var `GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS` which should point /// to a file with subgraph-specific settings pub subgraph_settings: Option, + /// Whether to prefer substreams blocks streams over firehose when available. + pub prefer_substreams_block_streams: bool, } impl EnvVars { @@ -233,6 +235,7 @@ impl EnvVars { static_filters_threshold: inner.static_filters_threshold, reorg_threshold: inner.reorg_threshold, subgraph_settings: inner.subgraph_settings, + prefer_substreams_block_streams: inner.prefer_substreams_block_streams, }) } @@ -353,6 +356,11 @@ struct Inner { reorg_threshold: BlockNumber, #[envconfig(from = "GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS")] subgraph_settings: Option, + #[envconfig( + from = "GRAPH_EXPERIMENTAL_PREFER_SUBSTREAMS_BLOCK_STREAMS", + default = "false" + )] + prefer_substreams_block_streams: bool, } #[derive(Clone, Debug)] diff --git a/graph/src/substreams/mod.rs b/graph/src/substreams/mod.rs index 38e96fd598d..a09801b91ee 100644 --- a/graph/src/substreams/mod.rs +++ b/graph/src/substreams/mod.rs @@ -1,3 +1,20 @@ mod codec; pub use codec::*; + +use self::module::input::{Input, Params}; + +/// Replace all the existing params with the provided ones. +pub fn patch_module_params(params: String, module: &mut Module) { + let mut inputs = vec![crate::substreams::module::Input { + input: Some(Input::Params(Params { value: params })), + }]; + + inputs.extend(module.inputs.iter().flat_map(|input| match input.input { + None => None, + Some(Input::Params(_)) => None, + Some(_) => Some(input.clone()), + })); + + module.inputs = inputs; +} diff --git a/node/src/main.rs b/node/src/main.rs index c01da62117c..0251e91cdc3 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,9 +1,9 @@ use clap::Parser as _; use ethereum::chain::{EthereumAdapterSelector, EthereumBlockRefetcher, EthereumStreamBuilder}; -use ethereum::codec::HeaderOnlyBlock; use ethereum::{BlockIngestor, EthereumNetworks, RuntimeAdapter}; use git_testament::{git_testament, render_testament}; use graph::blockchain::client::ChainClient; +use graph_chain_ethereum::codec::HeaderOnlyBlock; use graph::blockchain::{ BasicBlockchainBuilder, Blockchain, BlockchainBuilder, BlockchainKind, BlockchainMap, @@ -401,6 +401,7 @@ async fn main() { let network_store = store_builder.network_store(network_identifiers); let arweave_chains = networks_as_chains::( + &env_vars, &mut blockchain_map, &logger, &arweave_networks, @@ -443,6 +444,7 @@ async fn main() { ); let near_chains = networks_as_chains::( + &env_vars, &mut blockchain_map, &logger, &near_networks, @@ -453,6 +455,7 @@ async fn main() { ); let cosmos_chains = networks_as_chains::( + &env_vars, &mut blockchain_map, &logger, &cosmos_networks, @@ -463,6 +466,7 @@ async fn main() { ); let substreams_chains = networks_as_chains::( + &env_vars, &mut blockchain_map, &logger, &substreams_networks, @@ -710,6 +714,7 @@ async fn main() { /// Return the hashmap of chains and also add them to `blockchain_map`. fn networks_as_chains( + config: &Arc, blockchain_map: &mut BlockchainMap, logger: &Logger, firehose_networks: &FirehoseNetworks, @@ -751,7 +756,7 @@ where firehose_endpoints: endpoints.clone(), metrics_registry: metrics_registry.clone(), } - .build(), + .build(config), ), ) }) diff --git a/substreams-head-tracker/Cargo.lock b/substreams/substreams-head-tracker/Cargo.lock similarity index 100% rename from substreams-head-tracker/Cargo.lock rename to substreams/substreams-head-tracker/Cargo.lock diff --git a/substreams-head-tracker/Cargo.toml b/substreams/substreams-head-tracker/Cargo.toml similarity index 100% rename from substreams-head-tracker/Cargo.toml rename to substreams/substreams-head-tracker/Cargo.toml diff --git a/substreams-head-tracker/Makefile b/substreams/substreams-head-tracker/Makefile similarity index 100% rename from substreams-head-tracker/Makefile rename to substreams/substreams-head-tracker/Makefile diff --git a/substreams-head-tracker/rust-toolchain.toml b/substreams/substreams-head-tracker/rust-toolchain.toml similarity index 100% rename from substreams-head-tracker/rust-toolchain.toml rename to substreams/substreams-head-tracker/rust-toolchain.toml diff --git a/substreams-head-tracker/src/lib.rs b/substreams/substreams-head-tracker/src/lib.rs similarity index 100% rename from substreams-head-tracker/src/lib.rs rename to substreams/substreams-head-tracker/src/lib.rs diff --git a/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg b/substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg similarity index 100% rename from substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg rename to substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg diff --git a/substreams-head-tracker/substreams.yaml b/substreams/substreams-head-tracker/substreams.yaml similarity index 100% rename from substreams-head-tracker/substreams.yaml rename to substreams/substreams-head-tracker/substreams.yaml diff --git a/substreams/substreams-trigger-filter/Cargo.lock b/substreams/substreams-trigger-filter/Cargo.lock new file mode 100755 index 00000000000..5a22905c7f5 --- /dev/null +++ b/substreams/substreams-trigger-filter/Cargo.lock @@ -0,0 +1,498 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "anyhow" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bigdecimal" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "bytes" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "fastrand" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +dependencies = [ + "instant", +] + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.138" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "pad" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "petgraph" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "prettyplease" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" +dependencies = [ + "bytes", + "heck", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" +dependencies = [ + "bytes", + "prost", +] + +[[package]] +name = "quote" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "substreams" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea94f238b54b075ad17894537bdcc20d5fc65cdc199bf1594c9ecfdc6454840" +dependencies = [ + "anyhow", + "bigdecimal", + "hex", + "hex-literal", + "num-bigint", + "num-traits", + "pad", + "prost", + "prost-build", + "prost-types", + "substreams-macro", + "thiserror", +] + +[[package]] +name = "substreams-entity-change" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" +dependencies = [ + "base64", + "prost", + "prost-types", + "substreams", +] + +[[package]] +name = "substreams-filter" +version = "0.0.1" +dependencies = [ + "hex", + "prost", + "substreams", + "substreams-entity-change", + "substreams-near-core", + "tonic-build", +] + +[[package]] +name = "substreams-macro" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9df3ebfeefa8958b1de17f7e9e80f9b1d9a78cbe9114716a872a52b60b8343" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "thiserror", +] + +[[package]] +name = "substreams-near-core" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9922f437e6cb86b62cfd8bdede93937def710616ac2825ffff06b8770bbd06df" +dependencies = [ + "bs58", + "prost", + "prost-build", + "prost-types", +] + +[[package]] +name = "syn" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "thiserror" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tonic-build" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn", +] + +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "which" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +dependencies = [ + "either", + "libc", + "once_cell", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/substreams/substreams-trigger-filter/Cargo.toml b/substreams/substreams-trigger-filter/Cargo.toml new file mode 100755 index 00000000000..c510de2d2e0 --- /dev/null +++ b/substreams/substreams-trigger-filter/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "substreams-trigger-filter" +version.workspace = true +edition.workspace = true + +[lib] +name = "substreams" +crate-type = ["cdylib"] + +[dependencies] +hex = { version = "0.4", default-features = false } +prost.workspace = true +substreams = "0.5" +substreams-entity-change = "1.3" +substreams-near-core = "0.10.1" +anyhow = "1" + +trigger-filters.path = "../trigger-filters" + +[build-dependencies] +tonic-build = { version = "0.8.4", features = ["prost"] } + diff --git a/substreams/substreams-trigger-filter/Makefile b/substreams/substreams-trigger-filter/Makefile new file mode 100755 index 00000000000..365b6f05178 --- /dev/null +++ b/substreams/substreams-trigger-filter/Makefile @@ -0,0 +1,35 @@ +ENDPOINT ?= mainnet.near.streamingfast.io:443 +START_BLOCK ?= 96764162 +STOP_BLOCK ?= +100 + +.PHONY: build +build: + cargo build --target wasm32-unknown-unknown --release + +.PHONY: run +run: build + substreams run -e $(ENDPOINT) substreams.yaml near_filter -s $(START_BLOCK) -t $(STOP_BLOCK) $(ARGS) + +.PHONY: gui +gui: build + substreams gui -e $(ENDPOINT) substreams.yaml map_block -s $(START_BLOCK) -t $(STOP_BLOCK) + +# .PHONY: protogen +# protogen: +# substreams protogen ./substreams.yaml --exclude-paths="sf/substreams,google" + +.PHONY: pack +pack: build + substreams pack substreams.yaml + +.PHONY: deploy_local +deploy_local: pack + mkdir build 2> /dev/null || true + bun x graph build --ipfs http://localhost:5001 subgraph.yaml + bun x graph create map_block --node http://127.0.0.1:8020 + bun x graph deploy --node http://127.0.0.1:8020 --ipfs http://127.0.0.1:5001 --version-label v0.0.1 map_block subgraph.yaml + +.PHONY: undeploy_local +undeploy_local: + graphman --config "$(GRAPH_CONFIG)" drop --force uniswap_v3 + diff --git a/substreams/substreams-trigger-filter/build.rs b/substreams/substreams-trigger-filter/build.rs new file mode 100644 index 00000000000..f29e8ecf091 --- /dev/null +++ b/substreams/substreams-trigger-filter/build.rs @@ -0,0 +1,18 @@ +fn main() { + println!("cargo:rerun-if-changed=proto"); + tonic_build::configure() + .protoc_arg("--experimental_allow_proto3_optional") + .extern_path( + ".sf.near.codec.v1", + "::substreams_near_core::pb::sf::near::type::v1", + ) + // .extern_path( + // ".sf.ethereum.type.v2", + // "graph_chain_ethereum::codec::pbcodec", + // ) + // .extern_path(".sf.arweave.type.v1", "graph_chain_arweave::codec::pbcodec") + // .extern_path(".sf.cosmos.type.v1", "graph_chain_cosmos::codec") + .out_dir("src/pb") + .compile(&["proto/receipts.proto"], &["proto"]) + .expect("Failed to compile Substreams entity proto(s)"); +} diff --git a/substreams/substreams-trigger-filter/bun.lockb b/substreams/substreams-trigger-filter/bun.lockb new file mode 100755 index 0000000000000000000000000000000000000000..7f816d7b37de6928a634997cbe0c61ec24a55907 GIT binary patch literal 197771 zcmeFad0b8F7x;gw6HSzmG@wK!O;pmLL6cO*22?bvG>Q-Q7CR@Lp>@>lya5_c=bF?rQo8ak2XTp^^GQ zk%`^>6C$PH(hZLam=GKq9;6!_6&v9fubXHfCCcORuD#0Ys`@lDY5v(Uxi>5JpS0lj z-7zub{Ee;!1#{n z5Rjp9K@)>Os*3P%7vdKef|mVbqhkFc1OKwqo6#HXKOr!<504iY;@9Nh0O%ZY zdl~r+B>L$MokRO>j8p?2`>PE8qKpVMZsHA~^O&Cy6cFGyfyYy1?D)qSH^s}}FD^)T zBE*pChcqZfy&~`jR$;0%(Ng2_Th0>Oo^AkiL-b1X3R63;G=&5#2Ptv7r&Me!)E6`0yxyEI0;@z(i_P zWIzz+lVbg%qmy~OeNf&Me{dS(t^`hETx39EoI)dm!$Zf1#BYOksJ|66FC)P?#w8{W zwefgH;1uSUf<$gMNQ|G2ihwf($;^+Y_BxQq`c!C#<W*- z4Uk8_mP1f5ZqFf)yi->}In5iIFSI@$Kt1XSjq{6*^-JbmgFO0a2@?Ag0P`a}C@$_J z=tDn#s0s8%ghmAE2Kw{fKpx|AOkL0~zqsVcfC-`T_n7wN-$lq{d`m&1J#3~M8yzq{ zC|)-@DmFeYC?YW^mdC5r5ZFofg9Q~A&x;L;NeBX&kPsS(fl=%!@K1)3(Fu{s0a1ZL z(Q)BTaie%Q^bq(}2NL7*03^mGG%hYaHpnmHD&#Rfr$OR)#Kna~PXG<8v<3PXgT(&A zve6BT^N-AA%JDNA0>7jE;^KIXT7ozQ!n(%GFD}v$>%-!rVT$p1&?#8yJl$4C3c#sy6UuQ5(xe#voB(8DBV-csBh z`v~geLt>*6#)t6Q_7&(ggYq`O1tkYH5xPv?9;{>k0)xU~R`GZmP>y~mGm_>7AMzOg z&(H?v$2JoI7X=dMVLl@h`U&!jAdl@QgM=lQ+6g3b>tQ#8EhIG<@@OZ~FE%tX9`6^r zp}1f|qjh70#>2fMHdYtsOaK0Y{bYujpkEP8f0H4PcKewN%7Y@Ib!g=H<4itI~T>KEI$9NwYAn+qvH!vs| z7C@w5WE9TyA}B{cqM>AxbwPHk`K(^y0WI28nT;2okoMR87VoYmhjvjt&&;(=csdrN{DiKpx{<01|o7 z8-Y7f9+UU67xX7QDiXuM8{;5oHy9*x=8V)~q$0@nP~I9Oj@xJ8Q5Jy2{r(J{>}$KFrL&8X=sP>YcLk{Gr&J879D%X)0nb3kG{y05}Y3&4mS^b(BI4v;cXAMF<(qB|idc|DU~ z4ie*=4HE68gG4=li_OP-IL(XlzjIM#y75o=q@?!7&9T9#ZkfLF-_z zqJaOmarn3RH{HiT-&DpA)v*FU+k-^^%0Xg0ia@HOAGpoy22DIVPTs*L9u*5+i~~Q=t|ROX*#7VHZ8enR zewYIi<6;I9^#uZl>%%V^?(hi#L4FfMqcCE{!2-YN{9_4=1G$LM$WXko(S4Zm6wlx| z=wy7X-*Y&OqaR^#x4@X+fjsh+Akpp ze(s@j4(->DeFT10fo@zMpl3p8pkHioTu@?AWPBW)6#~)Hbg+f~@gJzXuZI@dEt8e$u{9_lsLlj`5-Wi=JEmzK>CR+7BZ`1A=tpW0PZm$MsMF68%5K z=!5x|gh$-aOhH_BKsm0L0+1LloQ(bu`#3mdz!K!aJ$rmmA80>5GAb5!#_-t!E;LIJ zhfyGL+=Agy0#g>w*$Hu>!O6V0P>%iDF-PFXcaYehK}>sC3r!Q>5A>nk#b6)x&_0|A zdGvoO;}_lM0{mj(808lp8x#mf#X#P?c>+JEzs~ao{`mrj_F@=#EIqbP)m+$$Ad0*BKsdd=g+w=Aa`ByjA=QkG39OT() zXpzok>t`Z&i&hNVA6r}za4p~JmCeH+-fGqQzM8&j&-eJY@8B@A=Zs0(?_OR~+n-cC zuwe1*Sg)uqZ;o8q<521TcE@408s0kx|H7_E%g*+GUOX!1Mz5!Z?Ww-^jbqn%6Lcpn z+f+1pc%srh*~;MM2Y;m3b+5`gzP4aWlH!X-y}|Pn&eVNKPx0foOS9HUixhDzk*~~~ zU>TBCKdasHSDh5^UmI`c6KXMg?&HZPRs<$B_y)?o2r()(lU#Lt8K$W=5enGrmmDA4lT6VHi2brl|Whz!BDOjBy{r+U|oKEig zx%z{jyCobR8>;QpU0KC%^}8S+E1BwyFU1QYI_?eoaLKE5(EPjm&2RnC?#{FFIcwlK zLhD<%8T%jc7WgdGAD!K^#Iir%B76OSvlShlPrYh4@VZ~^qDv|Mmj=joYP7LAJJx99 z%l`I@E~hVCw&HYqOZ~yhTWveYEYfIwW#aOcBiCLj%OA8mpr?EJ;Mt{{dMHGQkAHNu zA?G@Sc=+v2^VeO@DO5C0{cDX0H>FUaZKg>=n317JD%(D-(8mqG| zg@mVfYV)c7`p8}8DOr0KE$-4`#e3cAnJE#SEGA}7dvUq;?9H}CQnPk?`z%iz)ZXK& zoJs$TQ4Yi8bOv@vR7qKpyyfep@K5swj}7+BeycRf&Hi-7tCEzlE-~kJ+U6~r=qGh* z<*kdFQxd;-6@BD8?Csm=QAgj3ue|q0t1Np;+fJg_CwdOM=IT>h_ORoI-Y04vUAiXz ze2Gcqvh6ZgT};L}z4mfW*PQm#NHQk1)5P@Ph*fnL+9-S-{eJF)srJdX%MTv6Z}WPG z<=PGj$pcR^Di7 zlHWb|vFb_FU?SA{H%#9g8OICkZ>=>C<+i#Va zeZcYgd5$5q%7yy+B00If6@DL2o-X*%aArf~kfdH~j4$tK>@YD!StI|!%8Nxoeb(z9 z$?CjgX4lIjGXI({z8Nnwb*JVV=Bdg)-7ha=t15rbu+(t9;hpPu)1$sz_}OvLQ;#-_ zG#)-u~;n&|F*c(41YPIZ>Dhn0`Cv3&C}<%I6`*ZyPE z6mxFfb_;ndubrN6ys)UaQYK|x^ox~-GGF6lM0eke9kX#~nVW?Av2r=ZHk-cIJp9_( z_FEhEl8)B-@B1fr-uK|*sxuRxrkr~7bKjbt>W>PJZM4jYE%6Jz(ki4nZIYymS3+T+ z+(7@ztxBtQZG3HO(x%eC$mmgAp?KogAd2t%?@4P+XGD2W`OH}>mgPK)TGQ9PH1ygj z!>ON!EN*Lk$Js`+P3jqMovfL+GJ7@-7?UZPVRcQ}`O-PZ&gT|N-kq^?D~*fXsiMl0 z4Y>`L9frG@%qYsY+no8juZE7-h@V5nY}$0OzT;~!e7DYl?0L85$bMVDP4r~Wfg(Q} z{(Vu|x7R&R+&h!wu}Q>fgUF#L=VzLvU$|)Anm=gj{&^M^6!++*vM*%9GYU=SwY@vz zsd7!K|GvS_tqW87e;;jUwRA}M_x)LQo}+il*O#R~deI;rr>UXf24fJ!GqRUEK6P!b|sYN?UvEQX19B~HD0=`|ly?RSzqVoQ>;_tT~@idd{p`tFTZnj`I|DfDet*#Lo zp7LMYR8)w=(^pkx?c89>DkO)4{ur;m{jH~dl&>scb1)LenM*Okggi)C#vT3 zeydS4E zu8em56qZl>Rn9^2B|l62Uf%CIZI{@KxXdk!hEEjN-a2Z+0prY*u|rl{Hz>_r{_^^g zwxPGbth^QCKJUWN(y6c3q#8R1rM&%YHbSYd=gZ@(w1=3K7aFx6HdOVj!h+B7i;wkE zQnrpO+^@T*+Pb7fHDIyd!PjSv&V`5V?sVPjgY~Mjhc4d;ow(j_#g^;2t~(UJ+V+;7 z5Ls|yW@fd^@+>p!ai6DdDs$_%innyn3)6r&?M1Z`KkJ@lWqum?=E=wYnKP5#OjO)a zD(7kuVzxXu`D&i*u`SByvL+kUs~_&6?eu73ZSS$gMe=$&%3QKx#aH}6F*h*i`>@^X?@*Z z6nXXRJwVwmwmv}pm}-WA!NNA?Wuv>`qs$l_9is`|t3$0XGcj22m0?eJwywCJQZd$ZK#5-K03 z8Mr8T-?Tp&5O6`WE;i>*NNF3d>hgJOce^)Ct#o(OKe4WJ`NewpEXOGT`J?(==teuTC8`N_E`F z9F=JIO0VahvC|Sg`>1A5aVx(PceL^Pa0|%_iI+W{-OA5bxjAgAng7se!_w8S#k-BO z(fV-jU0kR4`-Y~3AG)(^&o=9h_m`O+m6$5^(o@wce{Ox(qQs@+jDAG!+tTwz_T>}j zrJN(w<|eF}*2de{Q~pcq>_h$w_&|R`|?@l9la&XK1VK{-a%?( zcjuCy5AJzCa6cNOS^avNbB7n3vt+BPT17t_I!VmcJ#XK=y<24ihHuD!{nh{O*!B`e zTh+YQ)zA2;Q6tl>v-I>HeQxiaP}C_oWux^1Cq=CZK|SsUF5Kgv!q;yI7ugxDZg>0d z^KDy{VR87|fYq-9N4wv(IA%9|cGdPSUd~c#kxSZk%hns|^i6N+@lJEzneJ>@aJDS( zi?X`dnqm1yPwD(KL(V%qJVRtRkS|&kCi9z4}$RDsKIK?8mid2Y2v@o$>aZL#T3o$hpBwX5P6Va{aW? z&H48}Uv=oQd57DrRHYR!v&LzT{8DAqR#T+FVN%I}+QLhtu1l56XFtnL?QX2qP_kR> z{AN`duSKVOxn8Ker9I1G_i^#*%S-%@eh)59JhEnM-rdD}hs}IuV0HRNjJ25Vy7*G< zWC@Gj`*-iJ8#{H9e$wC1J#@~Y{hID;Lwq{8FAbBKKmJ&jkFu41>HY=NJI#2Tc~8A- zZf8~X;d9)_z5ZFh!0+>vRz52%=Wd8h$_Rd@HzdJWYSqKr8Lc1pwUgbsDL(sBR^Z&d z9-((uzg<1^;K0odZ8sk4aI;nE$>&`y>yGUzmopCGd-WN4p;h7OUiwZyzK-co`!(TP zhI`GT5}KcK({|U2KKprdvsqrSkL~3g>gUS0rUymbUun^JkgR!=yLiHva8c_2V>d@* z|J17iOXX5+`uc3QfAYFi=~%ah%d{_____O)bamSv^_Nl&`W22_IBIjvS-Z98ljOXv zPU_fEzM#t@x6o9V;hPd)UR*FQXV(k&1NDX5+7_ldthrmbuG7J$*^%u=feK9fd zTDHq@mQGC#01|H+5emTm556<8Q}P5$z< z{j{P(Df9Q7@7h*L-Tsl~=|d|Ymv*+45;bX^ES3DByH7}2O>}gUyuvkT*L0A3#hHAUTw;oM=v3_ya#=5@ybj3EvR-UZAs5QARtMliK zc{j&RQ;qz2?aeC54%$K47e8*4Un#mMc)_dTXUc8{Z45KCD(EpKy`54`#Hkft&xchU z?Y-i*xx1os;v?s)25#9tvKsayt(Nrmz98dzZFI>y?OCn0SG%s?y~(_*lkcQO&V%O! zF5Y$McG>;7Q>Uvdmb?qO_~dZ(vU#8Gr>f>CI=-_srG0q+OG%4fH;*lom_4xd{q^fC z={}d0*lDZIdads52e$f}7#mfWpfm6OqCKLIslUhN%|DHGSyUlsEV^TFe7ByelZR&Y zX}xqQomc6cT7PWNnTRA`X}WJ`oNz2K4^G-DLN5+D$|Vbu#Mq1|!@+B2o-^|v0>1j< zoQT&3rG~(xZYj8^{CA3|T|9vBx+j(LB)Re%|Hl80qk6J)6L@RJ|JGE=K@x8SAK)vs zrvA5t-wV6}@M0WnvYU|oXYc{O3TuiVjse$1ygnF4UK~IyJTtHhUYqVXou_vGd$%g?wJQYP6zrpI>^t?NITq`P?rc#gF31m5%}5 z?GN~)z@z`S4^ zHwt(>f05s)gG(ZQHt@9mkb~vSDG`4dc$$Ay#zg_O?F?VG5|5m)@$&(m&aW-u^MI%Q z7kv^oewD!6u;MSQ|H|+sFW!GTfNdN@iVOCoxkT+o0B;UF+U4py>Lq?2@cn_ub%Q)t z9mJmn-W_=2U_0cLh*yyl#2+EJm2ytR4+q|o@gHGf_a86#(TF|6b7j#E*#cwb<}?)_)(J{@Kh$OeJ9}A_ixgR`kG7BZVK>te!#hdWy1P@33w;q$#0U) z*~U7uuLv*j&^~f_|Kpm7cLE;!Pj;#NcY3JZ6yWLk1$9t4SN=5cIDe4E3y#B_64}w{ z{OkP-g4B=SDItCw@a*#wm2>3_fH!5ukLx=GvU3M`I=}zUCcB<^B_$ru8tkK8u6;*6 z#7|=J>~j3vTq3?T{J2n;v5#X%<;}5JNA0YEw`RqU>l`M&0C>EAVBg_ULg@AP5_nsN zN1a?_NcLS+{&)O@UH@6YlS zJTOiD$Jmn}zf(ed5%6xn)3}l3%6|vmmf>6S{$zO9|J{FujsHC09l$>J2m8+T8HW1* z5qR3avG2n6-?W=x{o?r#S<(w1n@!X%v72E2P+nO3XMo4~&y^#m$led&ty#RV^T$H% z*Z$R#{!aqFKiEgVaR23+$p1aSEAaUIkMYMo&^Y`~3Gp@>zs_%BjE>*2SWkR1 z!y`*&!th6c$MZMFozDei+K{d9z~lPCz6+CxH|`>EO zy`F;mH^xsGABgt_-Wu$qEn#9n{Az~BJnlbS6Y=+e$MuJK>;u&(#Owqk(r|{O7uM*!K4VkLMRGYl;80z~lLaV$anF^50tP*Zzkrm7_n+ zC2BVjcw4Z~)h_8Iz7%*nhNm)W)10KX?%IO=7wzNuk!vD;3-Hz~|Ec_UdZ^t?;LU-@ z_HE&!alm|YiFl1(e-?jl;H|+v>TAjPtppy=|L807T*s08KMy>fzld**A2=l9MRk6i zpEzKDH6h*_c)Wk2-xz;k{Z9s-`j0wsAK;S6{ubcr{SO~ND*v4hYF7)qG2_3~?;ut^ z@tUxC;r@aBM*G6{KLB`ZhDY_J7e6w?tdZhc>cuxw`~F67CMjnKh9lD z!p7eYc=8{4u6rQ$e-`kx{~=G~h@YEF#9sm)$DjOfj$zjkKdg`7{D@p&d-r|DnL+{SW=eGGXn{10II3#rBT@kLORc-2pCP z=l^%$A%uTDKXcuKsQ-F!c){_ z;9&?-u`T-F20w5}#8&{1@k0ddbL|K5od8Cj)(`68l86ri*adj9EA0H)1w7rq(Jtxv zoeyOHA@I0y<`UVrw-LmT`Yr7FeH!ra-(T~O>pn{Mj{%JF7h#+gcK&?? z{!ilH%T{pyMStjOE^bsu{)Yf>$M}!uU+M?7`!`AaHsHsx`cLEVZyc2qFEdbZ{~}IU z`%b_+f_>D>_1=qZ$^H%Cy@01Y)^SPHULS(@C;L|r@OXaaI&aB0vR?!|KL1eMD2~+b z-z4!h!2iko?P@QuPxGGZGZfh$4?Ny~ke7x_SpS0^1m_R_zjgFq%E|sx;NcM}m3S)u z7woTMYIgy6Jbz*QX$<~q)XWFsC1LXZ32*;5p7b_jx~YoVr2%jI2mYS~o<09aFIW2_ zF!_mZN&KvUhavbY{=(Xy0X&|6TRMK1fyelfU7AN+``>;Lk7v!YPkOlWBY?;LV|%V^ z58INRZNR$$-wMivi2?DF@bHfIDNhn@G?%E|5a4P4V|!uaH;3U-ANo&vQCD+`+MNX+ z-yaY!-yFoMBVKf{;NOo(Fm&WoJ`hOXCy#HbRFm_z~ zK>mxv;=%aQ_=5^giFhmE^?;{&PvyT;LhUvJPv=Kugzf(w;MvcQ*|f0NWM z2zWcD|6J!D>Lh*(@Hl=g_5UI8@Lwv$jjIo2-*DKk_+c68`L}*kIq_3~w`J_({flcq zh(8BB-9ON_usnbGuje1~jf*7vPQY7(|Cn#d{WlYM^dJ4^8awol>|bI0Zz-=f;@9}0 zUb>Iq=jIaG9}E1S?B7d($N7u;TGIauz{4#(71y3HF(CgtjTAh85#L;&*>%K^W%0On zxb}hgO~B*&NBb?|Yk+0^4z7v(k74oHcVYW~0C-!*f37V0NcO)2kLOSH zozAdW|GPxIjmIzhE#cFF$MxIN_#XxSPv)3)}FBZMd0BG)np&z$~BRFl~FvNHSo0dgthMt zJno+qJ7M|7jD7M)SpTm9kLO?Fg|**y^snb1CayY&p9}n-JU`t6 z9`_&8C-CC;f2n=%F@pXJvvyEB@sYse_+uV*P#myLbBXv}z|;K$W5?AG;=cor&o4Cp zgtgz-Tk!ikeD=j>C}GbZfxu(`Y20ZYHP?5nBmY+dZw5TAe_{Qv0-inox=LZwLNk+_RHh#{)TY&$_ z;l;Hd##kFm##Ya;t67`~Rzn-&6Z3%oG4kt6?K1CQ~?JkDJj2W-<^BEC=9uYdn12LRW7jQAMf z>HLZ@r1Iu$V;!}t1|H9^G;gS!E8k{V ziR_oL>{D5D9Mw@<&2Yi}qowmF2zY$|BTVej2eQ8h_}&bUWx~#%`@kD8Jn9ve?;IgG zzaWeAALk&KME<)1Pv>_m6L$T~VC+-;gdP6_z+1EWFUiGuXhZ&sM*i|2QLgiVcr)M! zvFxic_F+vnrOu4IfT#X<0;v7(K&GDT>;WFLlAN6ug6hFxr!Th0hCv5x%0gv}D)Q7$c>;DYk(SPEF z_5U{T&J0g|;-Z7vYQzfmf1Eq$JC*-V2etDD9-sdxZdA^d&j%jkN9#`5_`L!i z`uG3nKRyF-NyPgDPtX6z(K`B_4&s*rkMYAi+NW}^{6&VxxZ}lj4Ks72YL^^arLzeYQWI^Pa^+E0*}vM zSczvhVb5<*fFBAx`i(k;?Z44P!TFi&l3$>z*+lK815fLZ>)c1h#Ge4(8vIAUx%$rL zMUw^RKP+p>`kM+o?tkb{OZ?vfyaUUBVdu{$;Bo)KzT@7-H3k$v-ARJqA5i?L98@)% zsNFo^M>F;j5Z3++;B9~>W5V`dd$QpEiFUF7!t&c$Jl;dO-bbnbwZNM({YTqW4j-FM z)UNlG|2=` z%;O#+EH9h#XXl?mz|;Bx^PCg;p9VbMKe6u^Kdy=RsA+=!?VZA|pB=zs z|8ee;PtEyFb=3AF@O>E`=K$A4yyo=(egB31;F5?B`Wr87|JMVL=MRdzG#BI0hU`E3 zn|)#5|8z^|@jSskn#D07KQP~1BKr$~cVl?$yRi1foBF|KEYP1D;|htbMB)g8LWVH}T%Z=jZ_0j{}~4{-N^UsiAfUfXDrp z_8s(rOCnx$rr`XB{iZPxhBpG9{3o5l@RNYI1^>Cmoph6}Gr;5d8T*Z6K;_i#-z2rG z2OjT#l;`RPweK_QfA62d#y<>rYp{>8C!PQ1GrOGH76Nb2#1DN}V>fE%Gx6_$$MK^) z*EK}#jWY%D$2^raWA?8qY8wkYo*#)9HvZdyw*{W-x+4e3{#)R2{cyGWcfR**!S`pl z?+c@Y>?Z*KC-09o0Z-457z3_8ko{_g$9aooT;oW*Y?fgBaNcm8JJ9s6xe^CFo*&4r zuwmQz3Qb90IKFyL|hVjjm%*!~{` z-WqshNhf^#ZKAePbN_e$<~oMN4+S3MPkCYepAS6lzhqa~`Fj?4T)$|aD~rMT%inCl z^Bb16gm(oV&(G*T@{*{CLn8Z=fwy6J84esZA^s5X^!$Z&T>C(LJ@EMa!*$;N-M+`X zU-wV6Ev)@zz~lNse_Fy{1RnPf^b5y;`i1%C619_>&*ND${cj0B0(kuW4cfwYuQZP_ z-&`X5^MH2(9y#gefWOrdFS>xoOJVG@o4g0T@=)H|`c~fM^~1)p>+m!2`M_iRas0WC z9hOFjgiwCnGO;Y zN*sH*jWqcI%cd!zL=L8RQ+W=PU(CoQAkp7kxRB3>3)`=S3;VSZF09|o4p~ z0g%X@fD7Ys4ld-&7+KE93Pzp>iS4eyg$X6Lzls@1C^3HxF0@+(7bdpE@|#Vi0*P_2 zh70$dXK*3^94;(>1s5iixHr7T3?#P1_V1cX1rqgtfD6lOnR1kvuVeBkQQsFPj}r6W znLJ92lPEyQiQyk1CHmir;af92O8h9nS29&z4ve&(tR{^(b*YPiFEc@#7RG z&z2asX-s`8Q;!lqrokWVZ#t9DVB`#@d=^MdY>7pgOgT#YIGf4;lf-*#9@B0)(~d2% zANf#@eyn5aQDVM;$+IPvZ-jEJ+Qig1C$Y(9rX5@2dOHN=7@wm|Jxcs|jLEYl+C2f~ zZ9rBs^(~R8=Q6`LC(*BK&<^DdraemhcoY7hp4&{mn#teAN=PWN{yvjuOFT<7Fy$!m zVT|2Y*oSa;6+5=2tLzl*q3JiADKL`9DdV*Bh96wnTj!nR1la zZYz^ViS^r$d$nK(O?_nSu_YFLhCkTu3nRaR#PWLhgZ=)=lw+r{od|Zmg%WvDhW{st z^F@NG|0jw5w1Il`PnMDGK%!oGhC_+<9hmZtOgUR(dj+PPEwQ{4!znVdGt&+w>Q!dS zRhV*=$f+`Ul$h@V64$Ldlkd*dqr`d*CQlNIn#!^6zxM`B|8}w|-v7Nf5Uj(EFfVZa z{rBELuulGaZvc}4k_K)VEtI$gX#)S>djrAyga6(e{P*7AzxM`${q?{122I;4r}q+g zU;gjCfgoQ0y*Cg%AO82=K(LSe_ufDdhyUIi2=<4Tyg$Gr?mxXRK#f@bfA@QXMp?R7 z3nKjg&0i6oKri!n(WI0=x!3TS$BK7$xiq?a-Ges;S2CyVn)j{8^iMwV)8jjen`Wl; z%ejAL`g(1ZDet>RYaJNrHgfNZcW)#|ojqV+S$x<4zZ)mLc+aH7-!b{dIp@og(E7zV;?fl@A zh@V||<#DY$ZcI76J8))f(f#||0@B<)+!VhSuB&!A5cWo=`}?kI)Y{nXS8LTh)On9a zm+qj5{Ka=#l=w4VExcT^D^)!@+h1+#gjVNnHbhBpdj9mq+1k5-n~r#$&l#+%;%$6N zRQike`ao|z$t{=OG~|tOahd2ET=3Ay#TgWlUR)ED_~xgcKG?SZa<54n-dUQBGEq3Q zc#%kL)MB3Rv#j;gdhUtd6|`FQ-kvileH&!1%v`Ys{BV>8l=$sW2Xx4=8lur{)010M^)}S?nsz!Y)Jddn_T_aBrK=zA-tYZ0)Kw)*L1cEp zn=Abe-5r~HZ(U%`@vU7XCh4av^xguBNH4y#ro=xyJ1O&N;{)}@qhCABxvnu&voxhV zcZDFX|c%{oF55E;Md#$Aeiq89im9i#6V+U0*)&+A8S{mRgQGl=46k>BVPT zO8o7suB>+}kGZu`d)JP%^)WW;6W;7vGH6u)!Iyho8@pvgamc$N%aZdy^vkZ`0v_67ny-Qlu0l|HO%@X&?98_--0}!u9H?*mYC%PPK_x z?wfm~+rU-zF4MiYM0cj=wv)$a=4E?t9LHDQ{iW!?x`RAEO`_BDOO?K#PH}VBbFFxB+;*b9Bmi5AT+3NzoGo{4$3D-2bY+L$d z!Ig-bJ9^UXW7PY1j!?bta&>vwixC^#Uu@o}ayh)b|K*-_*Ixv^FS=E4VOjqze^t_& z0I#?Y)Ak+%MKo@0NhwHvgLXrJvsCWjLfe`L3Pb8_`I9HK8F#-hx6$Itz`WrnMieV) z$(DzjP7Lfi@!H9&Q;g&53a43he7AG=6K;cb?TqIpzxCDb9-^e1Ex8a zA{Un~JHIS1;peMU&vaMapAxY;V55^_r)~EZik-ST$@B2ibFWP$-hXqLot1dzkPOd2 zkEIvC3#Y_STBbT6MDFsM)J4NT8@t#1$daF1Wk1z8$|k(xhnNZPx%`Fn&u99Nxx_mZ z&}~j!wu{q^daHttdrf5%gCZ^*-Ot`9@w);_{N(tkuEx8^y^n02Zd2jd(@)||)^(k| z+mw%ZSKf(Sq7XMM^32fg9gHu1F4vkoC2h^wG4FrAn|HhI*aZvB^6wN#<8Q$zzVf6L zB>!aP*RvyMD)$>u{BE)fe@^!HMY?zI+fCnFT;lENx9dS_VpTiiPg{d>7ZiTZzI!<@ zb)5c>32oFjYUVg=7Kl9CKZvCle*-~@Z@@20SUzQEX?kwsA>EHX!+-YGs=Z|Fo2Is0 zV){Xq4n_kUD$0MT__h(tuaM7Lr`qS|xujMb>{s?$(AYYq=tZVCC?bFHJAF!gqrL9a zp1Ac@(UrHnF=6%ILg~-^?Y_4Hg1x`aOBgPzCvBFiQP<~v*yRm5<4*MV?%t4{(^2br zufRiAqb};V9lR?86p`MJq!c9I*u=fpG^g_6??;$q&2!jx{jJ{3J{MwqbrNRgi=|h^ z|4^vg>Hp3u?c$WIK`Qb;kCc>*eR^Ikccc5Z+3UOOt^a(1rB{Ki*W^yo1JO=p>(1>z ze)>iFX4fqucXlisqoTVrZrGVocKL00Scx^PpKtc6?J}=E*AgeM%<}MC?&9B7q~?Yf zPwvhDYnI+lY`vNLZC~2nTYezGa%rP&&9J9Nty;hG8MAJt?Bd~*_(y8Xb^0%EEV?Eh zc}l#e^o6@m!i+>;^?3DlZJSB{wT?@MsImQ3Wb4(K*(35^eyv`Bc5Twx0X3a$Z!B-f zSzuwKJ4jvp+2nAUiAu>A`W!V>Sh&f&bG=fBQ(aZBWGmg0pD|#*n|9IEz`ZPgJG1o; zzv9gs(5KznqPbu8jDCFhn4aeGLD#$fv{H|#m5Y~sc;sMA^1ueEh8b7dOS;CiDbDb1 zldDxVzt+56vhO>ayGB1)dht6FO8lJfXF^k~{5|%Kv6w6M@lyZ9F5M=-GTl0UbEn7) zhA%HJ?tETaL+jkAn>#XADlY1K=Zy1o@rmW0Itu#}X630#tv(KlXg(;DQjmPNoBLAB z=OpAl?BB|`Z@-@+TPm_PCyX+ko$B=Y<%(qkZ&WWR?RkE@nzXcOr(Rc*W&{>=dSs%I zaqVT};@3aEWWE#pwo!17!ryyR;+L#nzT2kg@c?;KG1py>cg#+GH`msGUXHs z#?!qo1_Tbl-`G(c@V6wC_;L3|rcEBQT7AH-3e}v5?~C3HjGMgPhkqqqYyMEfwv!JI zTcq@AyJpm^vL3A(T1yPpy7uUz-|@FnKkp3Azi?`8V<0FZyNTZowS2`o{|B$r z;{qq!Cf>YNJk{2~U0Fik_3EJDG;{lHBMn%3)!2Hik83~aawp}oIj?Sg&Qh~8Qq`m1 zEnQGD^u!QJ6O$<`3%qA9O*>~_+UMD$i4r+$^%|3;YvzVH)*5+M#&t}xR%D-d@SabJ zpQWcBCb9SFgl!&~(;sx{v9)xJ&wK4VR#8uScU_qMYB27O8EN=x4A%bAl` z+28GSdW!Szfl2oQ#%KPxVIaMAqnfyzNcZ!*{0H~YRM6JgasH^&NKiz2HApE){^xhE zS9jYQ5dCdbHJpXdwoltTuE(qVNPCwAbqPIrwbFW@6Su^(`t)g<=jwq|4-Dpp z9l4h4z~>(vcx`1qOK%UhUb*+*wvNd?=RK&PSH((AS@ituP9~=nr&#)g?D(l`C|9SL8M*6C97}Icw%(a`o*!I2<}UPke^5*_ ze8ErgrKbzE<&TMsaKFExl2>mzpl*@1((;HKn{=*+jC-+8IVwwI#QI*j`EK)Twr~0& zHIb!Pldaca;o+rPdG(np4=t{psHtwJI>`Q}{?L(W&z}x!Z0lDy{!B=Qhv9gi;(W(B zM_+q%dTg_L(WK6?Ge0U8ozV!j)n=a`wAgwpU6myQ9A%<Y`wjlBA#oO28QlDnyoj~B(7vz&Mniy*5jYg|M>md{Tmma z`L0jC|DnFmm_ujWPTpMLvix4;*t^I26ih3eytnPP3nOo_^y2T>De-$OujnS~abTr$ zxYmX~IdMr=migwVq~4%XbvcGo=TH~xF?)k_^8MD9QPgT3$Qvh|v|RwXn>na;M^&fDR(V^V5o z$&-&pw(n)R~F_Za;#o^cIS!X4!SSo%c`E>LnDSei| zI5#NqyY3$Os+*(hmW1i6CxsL$P560pL^~zkQ}wdy8qazsy>+d9%)?G=^ITq@6q|4) zdrf`Su}1wyu(ZzQxC#y>QW=g|0AvW;}eQC;KYjPioB`=`g)-x~g`POU2B+mZB%-$eY}^PUDDTz#?7 zW1Q*i4|a7K4IT3y-H;j=ShG=w{d{4<*1P%k&4}f5&kmj~QoBjJY{;zVBaRh)(XKt~ zzctp>LiI`V4YU4FW?bwwed_UT+ZT>e%UiK#Teo56154sw@^v-3ynoB`w;x;Y4*6u$ z5lM~`mrnH)w+c@FChNC;R%ZO5MR6e!m)~#ypm}&z$>Z@yKi(gcyVgOnZucyS=kAkw z=pEO7F?fIQIFTiICZjl*vh}9=csuNR*uC#qw^Bdv>4kqA{a51uy!E!;>!bhqcE)NSjT3uq?`r;T$%S){Bdw*^-2J&|*{kR7uT?Y` zrx{6{emi>HB>8m@H@-8vUOVmKjk-;JC9h1uwLoz&|4S>Ziz~AtI$nR<5c4Kmylh;X zw;v-~eK;6=>C!i;cQSAFqB0!$bswH}nqHl_annhiuLt-wx9?1f+IPtzM0$JKOWRSK zvRHa8*m`AR2UO(Rn0(Xr8>ntCv&&9FB>8h-k9zaV?ixd-+hw18{rJFk&l@{ZZrr+5 z>n@q7EZQx9x^jHm8{(hKPuu!;^J3|>Wb0isG&Mf$@@S*&F=G1+bmJ_0jPq<7^Qo?B`B z*4I&bcFz@0mfxsJ{m9a5#n!8N@o`0k-s;=O7R-!DE{~D>;q~)--@?PeixrkFX`}N{ zZh5BV+IgJ{J5?L&4zoBsX13(SZFAEbdrCY1Fzf&D#n-_sz1D2Kw;W|7XU%Esab$y* z+2Pf@Ir_#+l`TB9hJ~4S5Z9ZxU3YLj->f3F+x>n~?KFSrw(gyh zy;=(29a0=@*m}35coemM9;Rxi5O?7Gicd%9Eq|X-{M9XHzudqfV&Z`Zmkd$Wjc_f< zQB5}46ma^vPVe9%>-+YdMYlu^bB>QLs%Po7W$SHwR5yBZ;xdQV!AGAR?|aU=^%qfH zkwul?(=03#dKFX!>d%O{akz7^%<`X)rtM509xUZE$#});8#~iyogdTqIer&Q??ATR zFRj%4@7(P90ODHf0Wuin(waIJC8DYH=dvhm_QvkJC~zI?DC@@tyLYAxG#Z(pjP zu2HpLI6rOl#>rcsow+b}0ZXqPTkkfNxUmz;YlH0H9B#kP*L2_F>RPFurpeu(NzeH@ zF>auD+N>X|=7zpJxv}V5O6E!1Eq7kmU38S%baLtr`~3?uwtZpgwP)*{;Jf5QX~~b? zjV|K~+6T-x?7LW|c;ED$TN2GACDX@`RJc2CpnPiKwW|-NUa3@mF)ghu<9Oh`+xObm zXJ3(=Q_>aBdNghhY`u=z-k%1kWaMuVEhsXap`G~h@%l-D@}F(*3^fQ&8UEz>3g2`& z(^yrNIldPR+n*9U5K;5SKx+6or}1AN%!)Qwz<0i+*O9GP>G}5Bkvqx_JoINN?wZhj z)_UE(Vn$z6S|3?EBxe2wTT>)g0@N@@SAW!7TfR1Q33>2+r7T~nu* zV(4{r%8VG7H8T@l?CbW_`0|*yv!5;OeJE6<-JF2O#${Clx9=LOu+Mkop_Q7v59^$J z_K`5SGD7-bWSz}U_W2CgG$p>Hn0cS{7ju7vn<{9Q-CbMZ@S%9N`*tnYJ+}j%_RtSF z6IXRg#X6!$*=zWYlP6Dq<+ncNs%07$K6^s`%&ktBF0j{y3n>N3e^;=0=8ad5{@vHS z4q9@ezt!kIVaaEWqGM$*C{%4XeJkE?`5K3hAGdVAzA<~?EW5ssRA;VKcPkxpa77#Q z`JJNeN(`7EgnFLP-nZFqi&gx{bWu(>mR>iu-jyTc&zFw8mAc$fMN48u#mm7b zy87Fnb9Xw*uh=>3^EK&Niz_t5y!>2m4bbzDEPm;=uev_*dtcdc4i5Q)>ouf4vGfjM z>lN{Mf7do~a@DJvIafmUtyefco;>1q?b0(_j9%v@-B$0ez52+&#inC5YUa$pw&u&y zAtMbsOxk&0eu#y^xA~JxL~#$JI1FX$U0gLgb^hLgOWIj?R*ANq3S#8L7i z+XrRKbhOqBabA6GNOt*G)ymJA=WdpczuMP4H|p9n&F7V8}2Wlu|1`pxa2<*UvgO_*U7l<;F|UQMt2d!H<>Y;Di8~Ps1xNDtP@>*x^@-1#} z=BYi6>M$_ymE)xihrep(J}kBloyuFI9KTO>?)Jr@WB5u|qCM2qBxlU%kN0-c>%rEW zb2qc$u*KH7`U+<|NgRBYa{R=0v(xXia(78*dVI2becxovwB>~>-wwMpCZ*%biVve4 zr+rm#{j`th9J7kbw!5a|yBE^y$<}N7M!RdM^UET&;Aqvbw@&*EE4vqtTv}^B_w@MF zcJ21H)jH<0GpEdL(8f24202z~`PkmGC(uB z&G4ExKJ#LXR^dH@^o}B>Ao+I}_$%*@vYMhSYhamK)&Klr6_q31*EEQXzJ6}u=B|~+ z-As>G6dGihcF@zP8hg{^C1ZO8m5^`iX9LzP;FEa9wPnlE)3v=(M_Z z{U27J>M$*%r2Kk8{l%c+=1#9|D$G*`%vF}PoU4Cl?)Zx6t6PiD=qhbFR0WDCzCNTB zB>!f=-evDaa$j^*aXit#pk}m8QGHdtp<1hDg+~_5b3C^9oWI0_WoJ9SFEFiomR0E; zGpwwkc*VGm#U0vt@+0nw2e9Ui zNtHzB8~+bgcNJ8}(gg|_*+GJPa0?PVNN{(8TX1)G65KtwYv3Tk-QC^YEx1c?$c0Z2>2}Io4jl7&u(7MQ-YoF3*lAeZ4N4h%n?3aMNRO+H^H7% z#w=~@2=9M~n|JGt4C5LTG_nP0GD1LIg=w>IwHLJlT@#>-BNXDptUGMpvCe{!q94}V zd1I_V51-IaI2~PWhHj43{Ggt=7Q@r z1-eh-&@52u!9RRJKYs?8_H!6!a>im5-h;TqynD*?R+bzxx?mL`%$Uz(ac=JMiihey z`YjC&Kj{apZ_+0gnjipNGoX7UeO3{#Bk=eB8Ty)*ef3%6;3pNFa82+#n>0*HV$^?G z1%jXQTNR1Wk#}94`My}U4my7QjDF33uxl^b)I$8WHiGMY>w$s;v|QQghi)H9X-y`x z;BfzMOKdRi*~Vl`)N8vZb1dBK>p_dP?iA*l<)A@g6T?uHiOOh@`aHi}&NH2Ms%zCr$wO?KcbH0A*cgXP$5E zD=b@oI}2;#(+JtZ;eM&^Mq#qk8vPdKDJA@|g_@q1v|jzYA%u_(EQ zEdSD2b-P|6aJ|-G^fiNUSL2P)g=2oG9h(gHK7y|9&uBzJN(%KCqgEkn(fs+Xy!_J1 zb{=?cR`#DFC}`fDSBI3XGq^sX)P-DD`!)g3$2LHBEj*qpp0w`$GH0nfReM>_p99h_ z*k*>0PjF`9-ae1dIyZx~qX%N)x8Fv@m|I(HtOVUOJgf5iF=)s_G1dRRT`O?Cwm?_8 zft1uxdS7T_Eb^+$lw@Kt?i)pz0r$Z7arPlDr%u%A6v-o&vL{cN6>2=LEWJ=vZMrev z_J6c`_sn^g;{x;m*AD0!L)eDPujFRJAm|0%YN*&XmltSHquJNhCK$k0;8>+$Xq;5b`tiaGOl?N-3w?FLzGmrTouutm% zMqe{%Ywq0;)e_c4szXSm4YQ74s8J^cM=!LPwHju~9F`LH#4=%?VyCTPh>EXPUr2D1 zT6I)06}A>KYtr^yHNO7ao)=uNBhXE=mRU8-M-@f$ll8qe7{ds#QQOpA?%E7q3ew*; z#w)`@a;Bi7FAcV&0L}8?Wr4;tJ{aw%8D*aA?7@{N|M#}%1-ox^85|&^FML+_jiao+ z_hCXM?|9QoF@1cJd3jN5d1Z|CZMVkTq!$T%RpX|R+cpWy$odtiCVf{H(K|^V@{xw0Y=RwWDTdJl`tH!i9-08UlS@f z4649Z6jeuvm0T?vn=zMPtvr2!()SN-0M`ZRVnKiZjB`q{xqi~23F;EnL61c)uZxDD zg;9@zxn;zZ6@*n-{N%2P3GxqIbi8Fr?*8Br&u-dgi0uBa(S8Z^W`XN<1-hmm{g3CL zXLC9y3BGP*7gO7POR6NSmhVUw*UDC{&~fNKA=%7bpl|T|Vg^rgQa17Rr(w@^%6!vz zmB|98AMBq2?suTex!=m+MK4D9p-`r;59{&oG=JJVz1BKOb8-zaxwI-=@<|3Q4_6ZX zLfY>ij)-AOweqj-!r+|!E=qpqFAU600^A=!7t^E`r!-Jb&cdNN6NFr(&o#&^e`P_e z@fhX$@G}bDov2cOAj_ujw&z%4m|g8k)5w>6dq>i{7*9gO7Ug8I8sNGC-MX)`(JByI zq8Cda^h=2GNmTD;T3Eh@M+Rh5c9Fb%2??JnmRtD4o2;&F199i9tsU7YsP~cy^9MK7 z?37T-0teu_1Ko4Xa(s1|w9%D;SPX$a#U}NggLfX0zhFwYtQ^ziniVakuBW&t`QvP` z`$t2?5-N;oiY-)|D%UJE7*g5X58v(u@OAS5y5H?N?4kFg3O9PNrCYx}5F?KShm2?j z@+QXQe~t0wAyzQN74_c;Hdk^-uU6Z%{pL{x%Cg4alwU9lTP~Jr0Gq?J$^sIQgt*gHpXd5u~tk;k%$l{2CnKx zezw>k)E^|ZmrzNR@}eXm*jVe4k^ijb7{cktvj6rj5A1paT`}lNmTsoUI-6>_*B@bG zW{rn>`5i>v*ypK>82CUZy z=u$>zP{~52P@j%@z)R8}DD33BUR--#@5DH7bSAgCl`6lti0+{F`riD3)2G64-8o!e zBRIqvBn;Ib78hW03_K6{0^N03vCuh(;<(P~F&sLPM%lPn5v225pG6@HqGZUFP8vuKK>eNwnM$*!OU4 zIN8f60*P7@o;jzF4ZDw+qltV37^i~~SDxdeg_|FnNpx5|uEQR_8Lo^vnlh)q4+gmY zK$nQsUmW?Dk*p*$96^np)=$;k>G3c$HAOO#D{QCduXCAx79Kl{%c2`9GHs~Rsxe>E zByYYZn=P1HO(Mrsr9A=M0HFJMa5Plg_yU{Aa9Ges$|iflZ_Z%Gx#s5`B_v}bd_|l@ z`u39|Q#S^z(f$b6)a_S>rh8V0lz4p64zu@VR6ocORd#ky!5 z!Awp^6YTe(9C0M>l_k`HPWoLV*b($?d`Fq7CYE=1`UH7-{|cLk21mF;&fq&WZ-D!U zx3dvAK&4n-qYzKE3J%pac@RdP>R)5GC|>`lv+znSRW2PjUXvyahmY!y(dGdvbS6aZu?FE+?Tw~IdFiAu?EnIKEavx zlTiOnhLT@?PEtuBhhtqqhuxP%)Ox-lYZbGXmE>l0=?2CB{!+=J_0v6DxDh#nJ9a7H zPsz&2D+HcrpB1%@@fB2?CL8ZLHif{IfC*)`aggh26R1-_2m`hq>ViWE9fPB)4u&ij-h>0 zoY6$Y?HH(G+Jqj8g*LA6^c}b;{VbKCAw_rK6O;TtAsi03B#A!-_3G^m1g`h(*$fVl z(I>rqnB>;Nay>zw#npkq)iX&la^XWlzb|p4_>pU4C%>pfKaK3$n$?U@yML7XsNv~8 zbdDV;YTuS!p-^ZAJP$>H(bo(zP`f(0z2t2g&TZT%8CE}@w^4j&8(sqQ7u}OooEd)2 zQy;~7=3Fu#!(X9qOrhBwm-wm5gSR+B90Bp=)d1J~6X-gUa2}FG5WxL_U)lZ` z)L;=JIk;V`-+~bCptT-HDJol-)ATOFH)T}%G}kPKXMckTMTy~*y|>9uN7iNa>N7RK zjRd-IFni^>jvJJy(RG-$+Xz&*5g{Q6%8vhF?JgxKTj&-{b)LsJfP!B99!-J{9HhT>d=&dk3E)-YM=MgSG6Q zJtMHC>c4FTjVlO~5cBTHNxu9i0Xdd~7!C7_5a7POD*^|oa8p8O`tCCg+5Xfa zw|1h}FEn*aKN|X&tbh6+D=BM;9Hf_?k+W5!^ua{@Ue4OsvkB}9=nTC-BH;M zv@em+#1&mhv*eE~m{VuUP+Qaz`VRm%7U;SW%RUgG5zO^%pif6gx@IA9Ya2M0uh}3D*PZ7xf?AZ?&tN zqwOdc{GUz^6Ie?#^y}E-*jSUQ0B!=%oqXE9PM4*>^wKie6dzezI~bkF7xGOflO(yC zFULarFmz$=5u=>paFSb<^u0QQ93xU4>e!pbT8K5UGjV31k7Ry^lS+?jj6mSA{YN~(KJLs+a`+1k5 z86x(=t+C5sFvn+y8DZ9ITlCg`fR=VYSAb09Z|@UOrSP;VO0)o5hjIb(Jr^N%{GeXqf$T0NV*&XP~0vVkDp z5gr_Lg;P=RxGHiB+IX>KlF&Rk=g52)Aow^=849P_74(IX3E-v!-8GHsosc=1(7A&} zVaxbobEb4DoTc(_4O5Nj6k1TA=%y5N(I#FZI0sOoeyx8Bb3;}i${BW+uPFS;olo5Q zTTcf(A2NWh+xT~uAJe3dAU;|&^PAi|yI~LKJ?alCYkxW2{#*I#xLg0Ge)(UWdGL3HJ zB#nQzXbUDC=MV1Re4xd5CM(Y=*(unnvZ>3DumFvfAUlE@?yF_XaoB|ex^lwl7o2}W zu@{qa?XN7azd~U57Z`obAg)52@sdqkZ-PPWI3WdTeiT+~G8KF)Z#&f|qdR4i%3nlc ztx`iD;mD)?($>D;78g;ebJhIaqOtHud?cyI-T}DTKsU;DmH}b%JkK}>4AQ?YR z{G+`L7)A`q##D2HS?0DZ)KB>W;Y@zl=9z>xDneg;V}o$p;y!a2OXtG)lK}6x9H4tz z6+cXWE3B1%J0w5DM9Hv%GcPB4!c458QmenjqoHjOuAE6kl^HD9?{OQxc!fh-a$Vh@ zPBX4sLK{wbzX}7W_ic{?4p47h7jcr1zO2%DJtzo2``E$b-1$_~t%Tf#b!1oc(~St> zkZOoCKNZ2BBU{0;zO)xZGC~A{TWFNWF;0=&dev75JifVL^fiMLalAWp!{gRrO#-^= zR4mQhb%PTY!kEb|R951e@f4R+Ge|D!Moft?lTev0h*2L?dpuCS`lS{cbulGe;hz%& z+&rM`?G}9#zJ92wrXj@nDD*24)wZiM!$}4KFX@n7H1`vz-Co zBd$3j841$wQq@s*NLG(cfSV6==M;xv4H;Imqh?WVzLQr@UTKQ-H*2qatcNac!Z|El zr6ozIVE5U)li4h}&O@ZwlO&p{&5&6in$@vwy6YsR0JsG}H%cg6@6P0iJ=HPPam~;5 zzcPz95R~0xS~v~8N$nw8c0<72FE;;{!bRm$FWau-=N!CDxf|E0@+wW)=0W2OC4l=I z=z4LLA!KQxR8(me7qdCkVB%-RBvFUxw@iah`S$1cRtaF^PZB+AFpMEL8RsCeECOBE zO|-+mfZBXvzRoX^7vM>(q$W}#!hnRE9iUg)R4-yOfuz(0NTt?t3w zdb05!H3^7;P)Yx+vtgn~B!z@CQhebmN(f0Rd#nMdw+QG?4-()NE_mNiMh5766O`SN zUBIx>6VwiCd!H#hc_tYwj99PC+0IbexN9I?mVHMDmcB8E;p)4V^dv8aE0_JHSE zG0?59-XGxpNAgqF2E>p~*?E2YHANiKc<}Yjp>S|@4f|gv#ILYJg|A0OmrJ&`6iq)@ zq76Di$ecO2*~l{Tg(`P|TLN@*gkg_&=Besv*{5^YuanB$8$!Juxa8_~-IBIG#V)zo zw0?n3jhaA9rRhxhVCM5@jm;MHXN*Jw6qqwMWTyyR&q{&rcgsK=R+|+Fm5f=FSLVfU0O~CR zy8p$S4HwJc2ZTQ<<-$R`iF0gOh$$||f7R1iVP2Ljtk*urDo#hq!sl}jS(CGsEo?z6 zPfA9eAmVXKu}W~?mj$@xKo?~m#qaBIg7(Qu#P9^~2ZjruYdE8gTgGqi@Y%%`@me+b zg)QrSKpAZK4VH%u0yW9V4S2=Q-WcIcG)L;Dw@k%Fp7mGp z35rElMYwq93LAlG zmHf%Tb#JGc)G^b10&w5@vQa} zu|CmYm26_BW#Ps9r*I3)!iyn9D5y;*PP>8rP1#M;_aNef;t$5gjY~R+4&OA0>;0(RPj;x1j7y5tE3Da})<0b6T} zZ68g_`Xk1Zg4qYu@kz6hexl%Ih>ERGb>p+Pm}sHD@U)*bgcpJP>bn1_uj2rNYRhg2 znP;>l;i@XyzT7z{Ict&`h!j3kxDow{m|ie&N)H>7*Gf0&W;cA=a_5XT(@PX?|hmZu4Kc%$*KNT`A}n0~*PJj9ng^NW}r6fv*s_-bOI` znn487ef>!qy<3dE6ZS3E>9z|THDAxkWVWsjHg%8;D0!R_$8thZ;)dbI z1jS3TcT1Lz4*qZN$0o$wCHHKb=a=NP1NtEPN4a>35s`;kCL0F$oKtM5%vJL z1?W=1ET>^B8SiU5nw5+glNqs>Oih__nR{lxEE^9TWJdOytyMe4S^B_Q zQf(}b5uaiaC6|7=LpYeX_ zrS()-G^@2sA6$9IByPJAeYo&KJXb^R*O-g6ddyRfH?J{g@rP z#U8$lGW-RGxoY%n?+?Ci9YFUUO5*}#YCSuf>q5>DaPTo$NS2P5tgS9H7%IUsiL5k~ zj0m%~lt8FwDp&F0Y{&&}9rE7ErvMi7As$qyAq9NScLH4^f6PdK8sS6Y4DGzKWj#wRs@nk%>l^bx=Hbinm? z0o|<&{1{rEmEG)oJqN3)Z}Ca)uH}>coF64;ZpkGJ@YbHSSHt4=YAN&2pdL{@sKTOL z!fZw0G^E4U@2{gWxZdUf*nOKD-~hGnnf$F*@z|v^R4NYqRh&kiZV-^EdUNuCwm5;% zCU@vOe1bW|ukkrMfqQKwer?|0D%tf<#?L44@?Ci+VO^+1b4v5@ zMYLZHaJ{`icPxw9?Pafbr2FF#%3<`y0B;Sc492o)(uEe%GPkgOSGi=eg1PqXz_TDe zRIKse(?VmGO1DqSYMsu-tsEOnp8;+k&-yMnoiqf^1T)J{L)q!xg13usSfo}M7l8saK%drBy zY#ZCGq)(o|^w^iPZ5sVX)DhmB8E!v;1RAa_DH0?&7Td&Sd^N(U$dpV%@q(Vd{_m0w zFK@pM1CPT1(8W(HWb3R(wTvT@zHnzbD&g}p^PRjozs8etmfvnY?Mm@vAFN84~ycHay&uQ{a6u1a#NaR(D;~xvR)( zl(t=OAl*u^vq*sJLRb+}P8TyJ6AyVw6slb#+FsisuqP$BdpDAY2MPuhX?$!e9 z4gneQ_j7PdyqDrI-xEBSVzSgdz<&X6x0h)sD?tW#0(tr&aphOs zY*B&Ak3p4i7o6+BAg#a;qon+rwvw@(IB-opw~j>P6ymodLd5c~5cs-{g3;FuN+sD+ zA>;Z)t{EPX5c)LsQty9RP1W~)CsS%Cb|0Dky{aI4-S2+!%G4jorbAH}B9GU%$`Bvu z-{CUV&F|Xs)dP=8GJ4QL1i5jGWg$e4AkhI#_+iTrLSB;e3dXnIJCx9-# zi+ejE99Da;u%CU~QV;|_vUFIC#vST-L7Q76VUc;7$TvgxfwPeID8H{}gElxCLM8;<*SdIKE4!2l)?VyW797)eFXv zD*l5^l#Q#|Y{T!6_sym2Tv?hsWdx34F-=A85a3P$T{I?*rU*7UFSTbA=~x=xNOD%w zbVGt4p_@PcGdiqIy8Sl(!*Z#|^r~@i*cz=VH8xnmCC$E{m&QX19Yr8@a0TE_1KrK@ z+_M$BfPP2jjACNhB8@7MEgFj74MOtXkzrdTN@p>!a;+PS!i=LDHK%>LnKtspng4yY zCs?+{Amc)*&0qnzGeEa8oAcDYuuryp#f5!u{|XicZ>lPGSak9{zS8K4grV)SS_pCs ztvm$DTShRH!6>0<{FChjRf|s&`gSlw>Vh1=odvoJ(NydF5aLDKv_1KkVUq|F-qwTO z$T^R-7^}FMa5L1aIGcD(jmbS|mpWVFS5Z2R-MvBQWvn%AYoml8e#mcJ@4~E^Ya|)?X2>(4<~nzD-1P06Za^v_ zo5zpC1yjAZGaGoGz3qj;0pi69B>EG-x^W^9ca}cKO)*GRuXbG@SH{)p$n$jUCUC!) z;!!BxOL9S0{qB(LVh&ef%-6da|FMg8K_?hdH|7-ry9;3SHG?Y5QTlfbnHPQbdv8vC zI55#f=;TvO0~&M+f;jEUkA{^VzTCFSO%CQH@(WMXQj{jwL`=4)I*atQHnZ`Fy&etz zpSuWj7ioXZA5g(?;xTuVcz>f8S0V4;V2(WnImLZ9-Ke`u-~X=Dv9jw)JsM;vP$?-jWSeO|Jt{t(?Q+ZSHN zjcg2@LP+p6H!7jk4{}vMGPr8jGD zDSjfy$OlFv@;rDO#~ys7TPU}Rw`J$2Aa}uOPuD6!Q~qWyOi8nZS~{kUx{SA1p6TQU zJa4|;Bj5mKUP_txm>cmwh|TNls~mQ4`5WmwSCBxbwx(o{lA3ferpt2NfJD3i6( zuL)M3R0;^~2+f=&+~&9vD_Md4omDXUnn5E$IjF{eFCZ}bU}C2)kOh{g*PUy^1{F{t z1RSw?F1;=nI(_1avt35MW%)S;nCQ88(WtR}X1-vc5;Q6oybp0((e&4N@ z1*QFBj;n}{OBgKe&IB#T{Wn-#ApeY)zH7Rn`Fp{y?7=tqQl50~(xNA+~nqlr2i zVCmrtU1xDooLKBzn(5u4aye6^C1Brv1L%TGX_&ffWPRPkHBKH4=OkuL)k0z5WuGF@ z>cuhIty4^mF6-W#)d?1?_ZYS>;?IRIBizZ8iBXLBjZ_ND49}<^(%668y+UAj8;rhY5E<S^arot~Sa(sX6NqWr*9_Pv_lPP&WvvvwMEAyUdhVTn9^o}L!=Y!dLp~bXV!1<* z_`+SeG-At0ntcv!a8v{whaI5nJW>?EJwL<1mEfYK`m2z3)A}+fyN(3dx7r47-PY1|&areisw%NhO)mztqus)_qD`u*~J9Iqvu#%x<7Qw`wm z0p0h6{$m6j3OJqD?bk*g$q$E->BXIFf7&Jb(L(M+y<)alM2;0wUht%}Oi`(ETqRYe zcb^_Z;_NuORNN({zP#PL;PKrDy8D`FkO-fJOHH~Dqh`~W87H*PMqCQwioQUbXZpOO z=bFFtlIlx#dOx_Cobywg?LVD7PDUv)UZPC{xg_<}hqpZg*gXKcG+NKw3r-7GH;vH| z_z*J^thIW$N={50*7{qIqsGh2@({K3?R>P5A&pIe`WUfGeql{Whdzp`M?`#K|jarwZhjOGJTiw_@BLROV2JN*zs5+8qk&6a7L3;0z5uC+AA9a{n0A- z(b~_;hCxx)%+7G{{!3b`znQ}Ng*sh!$Y={?bFG+On`d~bTQ8+gP!i| zjyaepa&t%eG4uQvnu&>-@lP2doNjQR@|vNvG`h+D6t1*I+{n#Zru`Ox#&B%F5=4rR z>Gp&`7X!F&YdknWEK3!KfpB!jmUhC@l9)OkGSXI#1LG3z*FquR6T)s@+*Wg+g(N)8 z1jo4*?%NpMr=hn>=J+DcWOFqTLWU7Pyh7k{I0d7x8T6=KLO|?W-Z>DQC-7MTX18@l zi!MMS1X{C^CC<)})TLpC&Egx>;|B&M&F^c$r#CM1u(wNcs_Cq{12vKB?QsD24Cwa3 z=uu7uSk6%*=j}U%P#mi}BaHmiX=gfoxBa_xSkpHhof6~m7pg~mz`vfT3aPIQri?cK zJ}4b#)~{i@LtYdC+;gBy@FHUVq`ZCmRnwVmh1AL~PxenJ-uW7xC>j3q%*OnGhIEH4 zOrC#boSpERZ59)L&+Kx51GMnY(u*w7%-sNZ|T#1$14?KD|65bBe2=txC?pdx`4@mO982kNn55au*bA;jFslhV;yE zF6-2T<_y1XgZx!u{BQ;vN##Z8T?;OzhOf`71)dMrKzHwq#Z6!F5tKzHk}~*=8=Qi~ zz8^H$k2G!Tyn)TpeZKv0_zvBFs*TUq-25qbPLhqz@uxKlA%W0JE&f7YC~zP3cJ>4Z zNSlveWGtS*SL`|rA5R$t>m6;pP_9z2ue?Hi5mvSbto}?=?=qpY6lyUwm5Hfvj!CMV zm*44Vf5&Ej)nVz|C(Zs>@7p^PaDXOQxeMf;ol)JiD6doG;}Yp_@tIj9wSF`%Nj55R zoKB{r?6kq?AXK>_RKvRZL7TFQNC)YyfLgOyJ{ z8DwR))~Gzr>)BW1Q)s?^whJ}+CsdGZ*(#)*Gec?NZm+6CAKE#;(kR@L!^b0VF!WXxDzOaS-o3=R$ub8a7cq-Io_^Ik2N2TErMPr(<-YXiwy z7a|_G?msd#qB82`7Lm$A=FJU)Zarw!r#XmFs{9n6bo8^il=sYm&$kCK`kFzi_qU^| zn}R7HL2?LEJN0h19*%-`KBrXu+z*^Hl{`5TsQF*4O!~79vOoB*l{0hH!`hqu3*HCg{m=b>Ik zcTN?X>(W%>yffaqVHWDjk$m;n%(0E)jLUwXge=-v%_8nA1g`fV7=6v4M73F+)TAFx zR$*+-GdCLW!6A0CW?5RC$jZz+`VSxKddtaXN>J}^Jrkc~e5H~mDJ+Rd+8Cey+W=_^ za|BD}0o>Pr;C}&%+JBFM#=u}u(at;Oi;+}U^2kcMSI$fa(x=%Oj$d%@@IsNqUeU)+qD9Z0~F9TB-IF$ zirDSE9wSwUTfIPJxcYA5`@9OL&@KeY{fp3Kbw*jV(~5rU+jmZX6pd4jRo9w1eINOX zj3NvQKJ8XYfD8UE?Au^~AX5k+kZse~&o<5vY01%cwcG>Kta&(~692Jd{Oe^}P*{BZ z8neZs zSQ^P?3Q0&vW8f1yr6w(v!k6}PR6h0LDzsn)N~Wvn%3>jWv@aA(F>#nqb6x=V9ngi+ zi+!AkTMsi|Izbgw*{0<-YAU_l;rAsSC=0RcuK8kPAN{1dXIABup+qtGEr(7__{YyN zG)xYmIHe|G`^rdw`}Un%aDYmw&=OHs#tYeQCqEM9K7EPfIoTb(nu6kHM)-xcVP^gC zYd6QaX$cs|jer|mh+i#+_r6a3hR|;hLu*{`$ou_Dk3kP&z z54$r=N|+hf)cgM9?MReTB{;QA;;R2p_M3UMUn*?Sh4^n=0uo7BhSVVJ(PWA}#WI5$ z^*?$m)U^7|AxvFTfC~?Fjd0f&ACqqBhwM9`70?=6+eOXxA|lkGN>)l>g-^VuLH|Ph zWO;PWnvjYwA%c^p3xy+e9xmB)9Ae6F@wO4j0PfrOf58Fb-EF3L{hP0BNutc8Fu#n0 zk^A->2hV6VR&0;MKL?o7t$vp#HO9NnUXu`(x-rElo#EnJ99eZ?58%s6mP0*<1GwPt z3BTDOTX#_&6m!MaYIq4hPaK!W_aB!pJ0)fjsgGjBLisc_&!8abpYAn%u8)nync$8% zSNv=lKnIhD^j%AI7=_zS0Qc>?#NYtUs^~A_2Ui`EI@{G+8OUJt#++GFn9lYr@}66v zTC9IFIf>S&GYDtI>%6z4F@Mq(Q9GONQBUQcdrnPd(e7vfxZv*`zuBM@6RZ0j3D`nj?XR=jE7=kN}h1Zk$<$^AyqOEDya_=x+VxsJiH%!DeIK%I(PH_ zTDc4VoFH&# z%;0D{3=444fG%Hf5JiXwK^dg<+0tM32P5k|{^`B}7L#_^Y6p9L!pgnfg} z&DbzNy_i5(%dw}Ep61|7rNc$b-WkY1Z0r58i_UD&k~zV4wcY~q<2!`bJFXCp6m3a9 zSf6`_A7!g8x&Q5`;jBDsZ`HQQ0$ePh`&~1YA);LzJFnfG_CX^eLGlR6*lUgNQzA2%Jv7m>*!+v$tB-MFl8p5wT_q}4L+!D0naTTfG$a+?UvlFck#Ng|8IUhm^PZR*hTr8v02d$V3b7e>&JNW7#m_&J*AXrIQTvh>L{8JPgex-KY&XBbSrXn=l6&N@a`lA z8HMbGS-EvL*$z~(#4P5^rq-P=8eJ#Umody6TBhdjRWhHK3=`Uen;?@%ZB7O4pU)k8 zfPGIwpxatwRzf7I@UA?e)J>_!y9g$boU-n>)j4vt34B^IP2H-bFwFi>bvPrmANOdg z_>s}R2^puI!d}9X4{Sx37TAD#iGZ#OFD`i@uIS9>MMP+~Q-pPBd-Jx1SI1}cgvg28 z=c;mk_ciQ(j6$9gPb@;wB$852`B6@kmdr&9oDVUMhxfk$E-}y*JjbkEFUOjo$AX1A zC#f=6)z!CB{0$ej*yDf1)Oo(TlHUmlb!}g;GKPm7^i0}M;e&BCzBGQxfT)L1F{_CO za7lpfndy|KNX?|U1@xzi&*z1?9lwWd^Bfi{cvf>yGTcZp`82+ z`{J`Q;-%CM1?DQ8$n_)+3cw`=x(n_DsJUi^40V2+P!GQh^RI2JXP@Uq{g}6uOt*N4 z9>SDt|1SOs--{AwLC~;nqi#wvTipLw^48k|uMcEEcl##%ujgE_bk1jZ zk^8-vK*NXDLD^N1b+@88b-*>I3f&*I18jZtqG?9`48DYx&hAxK+clG~5@x}SMOnsD zz~>M-&?SHBhemksJ!yd#II?LR13`m-`{!qqn3C%nw3?1PMz~qdADe`VN_z=6kvtG5 zyqdIw>6q}yAniC?ek|SmuqmKk@OK7Z{}Ti#=?MMDQ-406MVOhWgsjG;dzP^e;#!BI zatBInC?&!vh}fh+!X)6clm8Umq+oMv##8hygJh|C;jtXGNiBH`z@-4z+huoSB$szj z-t}^bKJ|AEW9sjtG=tetSi;jYL@)Jvm(@UxuR5s~A)!JULmhn*+tTIH9L#N`&Q>R! zURN|e@SH^nbYq${BuPVMOa%;Mqg1w$KRFfiEj=N0Gb%H z{o38hWYlPd^M971*@^k|vb~d^ECs0d?R$yf0CDAy#ifD-H*hbE=LQ#Tt{n`=XA_)+ z7%dE;?0=LMijm7o_P-33Xxz!K-7!%sE6lhVwoFI~d+k{ze;Ba{asas0K(`VnjMGno z;^oc-UG@tWhr8TyFh^}&t)`^jSU^5#$JvE@^HH+ZKa7CEu|>64-3u8~ZjdX4-DAX) zU#mDd5gFiq0=jo|dwmj*)m@EBBM&t8A9#$B@iNY{6FZN^=}w=AV6Ao z2%C0}UA##qK1m)olTv4x&f$AecoYD*G(gw(pZ|VcORek!<4f)He@;Jz=7>bD(YAzA zH0gP8|ItpVV#iBd>-^Zn zF%xCqHoxq*-n@@rpf?n~j;QRrvqVdiA z5cs@c1iC*gNd#?qNsrNQ6J;G*e-PLu?W+j)m98&bDU*F?_oh4MJ$eq)m+uK8#L7%B z(?!_JLpw+jL#zkcI;mHDj4cM#%LH^QZEm@9Tkbiy4t6>BnT)OyqsQ6ck(bQ0Ui z3IF_?%9$W(imHwAZxY)j(M9DTh`FUx?&Sd`?Tb9vupfm3+|NL_;t$oF)V@Fx)8jN~H!D}gdoKhT^wnmpqi<}qf!sho7HxyG*C;5V3 z0WLGpP5lcSdO!Soyk^zk@duOQ{6+D8yy(x>BXMFLOy~I4!US`EqlL3SI@t9Cn2{aC z-bk#plFWmw?!zE_7p&PwW`N5AbQOe>PSoV)FTd{jfAcpfdXiD}`o8TpPRtfC8vU2Y z#1cR}cYOAyy15)nq#w!6`OgT1g zB4vQf26WkbC#C%ni+*ikX3J(d?^k(cB{i%^y+)J%I}>*E!J7)GRW=mct^Rp=3rnO!{*zZFzQ||d zWmDKRjd|AT?io8On_GB8H*c@ z*RgEUIEH@4bJqN{LKAvl?o1s!ileB*DYe?0P^?n8_nhCz@e}nb&?eCYrQSx)b-3tj z1i0P6!2*fA?a812@=`ylvX1TQdx(3p89C zm!j;&y?ZO2m8+4ryvE3j+(jEVfX`!opbP6OYFgATXSNyil_aJTd&SOHEx22A%qmla z#1g^jgkf;!-E`b9BR1Wd0oat1G5OpMLIVu~yimieukg=CA8`Qn3IN@@vBdC`|JGQ^ zix{rEqbb-sdNV~T5VmR3&{&)V5MV8mj00VDZ~nrwZ}3!xviAFE;>dZ*z2jCdS(zm^ zvx_nSxPm}86m|S#9*VNC@X6#j_s}1!gYg2AOW%&(TC;;6hC=>~*!&jrj#$GN+#f~% zQ0yOeF4(-tr!M)8TFl4TIFI!K_rF3wH`>E;`HQ*1yh+Kb1v1x>?u~OPp%H$^#RL2& zT7`xmKgxARDD7nQW%N|_rFTm6ndYBT3i26{A|DNwm)XTlfc+g|pnG?uZFw9hrFT1!M_`#_rTU|P-K8}Bg;K@xK ze+DoPB0zVD#gwo0@7{&s*NA~iX*IuHOrGb=nW+ZXN^YaAtwEo3D|rLykr0%ViZ<(t zqeJ!ApRYjIqs4Mq@itm_U=%X|Tv4F=uY@1LS}skV+dR26K2j$9Idoq^o_(P9{nKr< z9V7(I`K|lEFPcNnN=4%pewzHZVgCm6l@(FNJXRf$*>%j?0j?O(z0veE>2C;lOy`qA zIacV#5t@KfsKS|2$+yeHtQ23(jkU$C%E>_zOZC2mjZ%xBN%D&n27Ns|YlCwv8%U3V6K2v=)*t6GP!Li?E@ZM!%`47Hk_MxGtD zgaN)4xe|`B$!>%P?gqWyVj|jlP0w3Ai&Jq z_$0WfG18V2nZu|FD~A<;NmmM-zh8mwufpX3tr$`_<;LTu5Ddxzv4)I>JCZ-lO*s7B zj7Em-bIP|&BxfqnWi5PXXwj!H6E+VLHL~t>yzk>#kJK8H0rg4)-J)opR$R}jy?I_Mr1(Z@@+T8Vn@H1<^{O2KzFplvxH1QPGdQeLfF>Kf?8ozuSKs;4oQJPeI>{a z#oq19il%lvRHjqTOylP}pZ&n;9qf#{<#z&fAv#9!=)iro9MBa-gW?-_3_H89O?|IF z{Em>u9CPtF>cxdqdGb>m3x6t=?mN+SHj@-CB)h#ngpC;Fq@ib7kIgHR^*(XH2S zdgXzx({bAhLgH#^1zVjs**0z-J4u9PxYDC9<71kAj3=i{_kaEC8)wX_nS$QpSzm_( zvcouweeF-?c)36@IuLXM09OI%hCp~xRP^9}cOVrj|HbL4_HQssn~RoT7~NBt4~A*C z**{CXUIVVaBHLsy4MK9#m-P?ixsIpnQuN7uQHdQ-C%{z%x|XWgY@#m2kU7ajt4fw^ zox%(yQ~wnZ)exf!9$_I~;Iu7`>dp8OQ(h=c-}&L}^gGGGd}fYosBv8sA@&aF83edW zK-Y&k?IK?qY2#mQLdo?@Td|NQ3p`;`iS)Og?R3N2-wBe_`K!$E_%Mny=fS#MU356S zzM2}ND!lUzlN47h9l-TW8R%M)gtgfo1dvv8iJ?i5^DL2^JFN4ghfbkG#P(VVpWZe_ zd?%^+KkU5;JXY=h?`xhi6QX3w5SeG9P-#-hT$Fj9$(Sg!l9Hh)k%TC7MKUL3$SjJG zc?c;~I^WB4+t1p+{j|^j^*iT(_TK+F%Ty)VX{B>8$g zh>9xm^QF|@133pHJUtVIrzoU!QofmS8o8G3r)YaeniC)3$)XKzU+>gNB5l?UU zDQ8c)f0&>B+1_aHNtR4lzTe(qNJJGMK$?SU^qGUY^4FWYLo;GWfYUQ7t+N+vv9(qp@S#`QK*9CbnR74Y9f= z4g&p8p4twUQd0$!pI6lXl5A<@+spDr&zg2u^9_|5o-jl44vP`BU<1)pP=mA3NsoggENQCEtB3j6U0+HE+qPRNt2s)gPVxnfHrV z>Xn1H!u`AV7sl~fWlk$?EXg+Sn=w|me2guG>=m63p|HYy$o>WA`uDGmzG(Y9c#Cro z-BXx3E-uoW*;jaTw|6fs6Ypwx;Nx7^oT^l{6K2MS3as+<*zen$V09l>yfw3&DWa6j zXK-CiyvBRxAaf@LGtGR=@z;-oM2YneOc750o*uG#OvDzSRUt<6f$sT(e3w^`ukUK; zn`M#2wzG3s-EB0%hU_QxTPIeimCYWXvy2^-*?Jp3&G9&4F;;51PXFxHyL!@vjNY)} zsfTf^jl}FnI)!2+nA|3w-QJ$R%@lnHW-|`wvAU|2=0tV#mSS;SG88=qhbRff=0lmL zOPHf>9%k+e&n$m7R%vxDK;`kyW2LJOW5SEl>Sv#ED%KITeMp~UDig8A=$c}6f3FGV z95feu%(1unmW_ZWyEe1;#nL;}!*|5@&*zsKvkB0iYZ@nh^oZ-=f%&($*VS|#TPycz zboLHQq|D?QcvoU{&9J)mpLc6<@&zTT9y#^&;>#M&M250Ff_;-2Squ#+Y^N*&Be!b& z>MHFD?r|lhz?W*v{r&nJSHdr56k&ADvAUOC{69CpWfN^qYFA~^ypz|a zwOcApN$2KP(>3bK$z6_b=U*zWvF}fC5 z-O8Mp161Y$M=2iV2Iuk{v%h(}%bxww9R^v!L!on_GjUqKd*6^hl=NL(FZPShhQDc6 z9jHz3Zm<+D=bMh?cgMc3w#4e1U)s8tdmwwiJ=G0s_5QiTKG~(ytEujh>DIB6sgIxf z%57`i7pGUs$J(hm^vIbZ)Z$!4dnCn7cIyj`!s0_Z=sU`r?ZFDGn>~EeM5q0v**B3( z+NXS#^|!DMH?rSj6Pzg2PHG|P%sb1}B&T#t=mMX6!%p{K@rNZ(vs463$&dYd*lsDY zp9=fAsWn!&RVZV8`cyNho7ICP-HMkP{a6ZgVmJ~>}{^QCQrjzNjTZ6F7YtPxtg}%`kvz) z{*Jzph11bnsjk0Mp`7kn2z@q@{E_E$GaX5p#|J4BMtjOGZPVZhjIJ$KHz$!v&4)Sp zq5~_P2qXj3|J&F5StA2Jcyb-FW z|MFpYhuF$%IC@B=p}PwEp4I`YTl&%2 zkCLbC%?KN-U3!cBIkq_}#c~m%dpis>br-r?#W*>sT_4vb5$e#SeW_fuH0!uc*LJWX zWm(FVd*Iluow68z9kIH)M6U;ZrH3+N7RPwrcO7?p-~C93rMAPwesYY9lSHTaWre1d zs>#x9YVi*OJ~>cEYm2d9FlsWtj6E7V|mX&q=L(NV}Hgvxu>!}uP)m-(|^K#ugw{& zE2*jWShx z%PCc${lzvjn5V{5yH!Kr+WClbgF)dSmBD}zfrH&@Stm-#9x;;k|GYwEazd3a`ObL) zff(%k=7!bvADGh@Oi9y;Vtz%_awDK}zjyZY-L`27?JNm<33sR$9w8Wv_i3aO_w-l} z6BYYX|GHSjlsIzW%@dmSH;>E{E@J$3$LgLr@bu}s#jO`ku>}rM35ediCHuW2(xmlN zP{RAtV=I}Y73qm*A0C=}*m`zlBuV*`llQ<&-Q9hGhp&`qMTUhGVxJ2=u(}~{uPIBFPhhb?A(=(N4zL@ zi#2_2wI-v0@z)cp8%*Y~`$=z;v0>TC+P0bQ>$Ky^C04uQnP}qO=uh=|YU+M5BpYyp!sz;9 zb!ETYGl`d&ndVpADaAGN+b93UxU5xezO-*o_krTf*r^h=>k{irH-26|-Q68XeQg|Zq!**>kJS~)VD9E(-Q~$e==19o z+i=+kOV}<}&ZfafFOpwGlzZ-?5F6FVzRTbgzt{WAnemy2|o;pLyD>J9$lsg4!wns7LPR1Tdno-%Yc8T;&-UE#8C9Lk+j7+2(xz1NX z+cRO!+Y`JT&0JcO?|meCt73mbY~u zbRXdy`rE0^^-dsGm*Nb$8pZ4xU80b#A!nY0B|fi|enjmbGcL}XShVg83aQ?~D^pF? zRp(RcDDqhQlQ2WY^Zby5Tf`Yj+4syVKgRkSgw;Lq@?&kAOCBxy+}Z>YNCW$(ai@7bVpp;ydYg+nABX>1iRS-&WV{ zHMEih!S+wg!i&6q4^&>=^DNRLJtx`V(1_=`r!^SeV65)qamv!T48MB;-`e7sK52he z>lk6K-6w@cL9w~Zgly8l8{hsI-{=$%VnSei<0e)2tE;&~v(_ZXua zg4JdIowY#kuV8oJL+F00!qv-d#CN(X(>eEvzgRCH2weG@Po-F0c%1+GF2R;2+CeXI zn$ho-@9v5*`bhom&3;!YfYA-b>h_BS9%>AV`OsU+8e!)tWFV^Zp=33!a*zWeeiX9QEAK zH_P2rY$e|Nc+xSP+OMc`-h7kZL!Zokyu?9@h)4zka3&g z9HAd<0{m^)^GW(-L(=b*M|Jr;D)Wq3j%&I6EJ3Jyoz{c5ajOc|Xq0e>BukvEfbQZ4$%oYqTJqaz;a0|Z_;FIY6n`G%{ zeZmuYuaytPnzCiP=9~OD+H-7*PCW8cn{}-ky-VF)bwes7OX>byBJBF%8djIWV97L) z=ac-oh%Lh{`6qq9cAWLIrh1s-_tKMyjU>NzX)?aaaczr6;arfkBw4TTrPmq*&_I%c4B7^L#W`_s&JCBfI_N!ir8x^>O-M$#W%okQA!+JbzzSWr6zDPDa8-R=<52 zAEt!-xxSuw(NVAPX_$UD5lg0wb*07O)FJ#^Y8UY!0#Ja-PgOX?&p*s{M7M$r<0L$%2rkiMWP@{-XPWSgeblr z(Qmd=<(|25_WrZuf`q%p$@6!V#D>{E7}`B3)BD@Vd8kGZ6Nj5v-QdxprS}30tWH_o zpUPmN8`p|7x1EB$+$zEc~FTAiG)Qz0cqVr|t)vE`pzJ zyA)RwG>ko>9EC;2h|FWS^s<vTyd@lf#F$Jx%bQNyX?UV0A637)+-X)0OWweoDJgU#3{KP+Xv7 zsDF#`>d>JTrQ8n^+Cqu4XZkcwH1@vPrX^D_@1%Kg@~DqKiPXojJDms5VRRF*x>x1) z2rO>hUt%L5B`m54z_6h_lw34I>S6cQ9Z1Jp845?fcsu z^A^tvd_FjA>9uyON&f!zu@tX@##iAOhZS2fL+rZb4p!H>DPyJl)Z6B68j<->r~ABn z-7d;jJg_!;8|_JR-TtDi zT;%Q>f_o!hJ+LW`=+lV9{(V+5R(Cq~N0ds!IJa=u+wGUXN;npX&mNWwoO$69=4c&R zQ$9Qu`s}dk@bFK6-)B5U(g)Xj&llVsy2{U;okLKq*{7V3@%JuPcjRPRfTp5f?PA<+ z<6=dVJ*RhiL^ZPA!gt`py|0pTb@2bZ5dmpP?@giqpuzpT} zr&KG3$5&u*_1m4fi368PZ@OKqJv}4f+vdGOkX7qN$^1o|HN>kn%4zp=kym{~&MQL| z?)mkOUopC=Sl!=}xy#2MUG~>Ju|TjbD&OWcm;7Zj)n+5-?sGAhr-rW_Rn}c?&q=&9 zH*!$b&c|0G@x}ISG3(OA|q`8{h7S_&{Bo71#7GSa>Fc+%^6orX`D$4jkihh^wphy+$#_gohWHuErJ zA-9Sii8N`ufYD9I>e8`Yix|+Rf1119sZc&**UOH)ByVS`lFr(+v3;mq_d)S*!Q#M@M~i8;c4~KC%5A?9uitZRj&!jb{EM^1d_sM1{+5n`}|ZANXAPc~Q%#Ce8mk z1&dK)?1Nhdp=MIAF#bNm>N-U77fJXC+8L)&Zt?1J$XIZXZBKb55*jDQwpGt^R^mE| z+iHOuZ3=5n80mSHo|xrVBn^E55B1)>Ij-*ZB|ZqFn~BxEL>wa%cUqF{g%HP1^JcEu zMy-S^Gh4}}qzIQT^a^Gczc<^)ZTBNZs9c2nu-A!`ov&j={b{IVx1GtDPPIHHA&$|_ z!sPsy z*Eh&+awsPbwC&U{4%3_2t8lbwm~AL|mBRaI?Gde&Kq9878>Jm-juvZP%k}=->2%e_ z!(YnlYsvW40k z*}m;xT$F7}UOhwd8sl#PR<~w=Dp2bAmXCIKFPy5nHArRF{g}@>E$D&ERk98OVk70f zKixeZJ#&23%lI^GtZs-p zoj-XduIlsG#RFTl(!UILKEvu7WKtQ?nw;i%X92$pe`e4z^#`FxYG3vGN0oEp4xQp6dL(y0=wB#1qbk|= zdrtk(GxCQSscqj=MZK>{@7; z6_MM|KfXE?HZ0fXrlH@~#=~7%@iHYQvUMmwO(@l=#=bPfaX5$2E#$_4y0>BjH)kaj2{KRKNkUidTnnWO+4l;nLd)dE}qOP_~?@| z=W4&DeCDjMqPCMu_6Qg#R_0`P((Gr?K28r1(F4!symub!8hejk6pKPrhZJa<#tYv3RgNK+imyXMV>? zmh;28;SZEcl?;t4&K~&wK6d&h({&Yv3R=f3{YTXIv_-DarITL9=+^#sx?OJhr$_7A zzc5=e*IIE0%=pJv2<_X^kQJzZtH?0u-K{LC7gZ-Z-}nlR-TNAT@O3cf)M!?k)h=ms zw}<>i$xImCI;`$#PRT&aoHA>r$SOU**P>k!PW*OpVeFB8)jcu<3WSx<_J4aV8>gsA4)gAI-Fqma8^^42c_`b&KRz2yxzKhJi+dA<2d{Hj*cH)3|g6dkP z>PB6C_PbJLDF<5P)7U%o?Q&~^BgHy$-=^gS=>A9%-O+H%IMP${8Ft>R$Lbc=ao_4G zd%2i-n?kKS)#O$q-_x(j0~9n)jdCfk_;tQ0?2&be*?Bz9Yvt^>3^7KH#T{H><dV`UDP>*syR}P9E)`4*k$H)@wN&ywXD-~I zae8&|g6q;pZSSEl?B7E+VRd6C3br4;9wef8xa>;01@#ZDu>A*%hGe}SeDb9f>5=^L zX7qvB-PGO2^NG51KA(5O4;Ng2c1=DYO1|oiP>u0TYK*^cvAV1pfwxn5v^y)-orx3q zD%%xIC&e3MJS2s4&Q6+8(J0>*q%*#^{Oh+L!*Ly9Bd1@NbqOqAj3xH$FejzhVd>_K z(QU@+RR)1^e96g4J!T{dtGgtj?@-C(Uq|ai9X-X ztrQdXy?q;2_i8_z?pdA1_46aH!q;-Y#ibGN@XC~3(0F+9yZnk3A<9jiOQ`taFf zWB1psfm_bFSvyMKHp?|)N<6sgXG8S;%6Kdx%Qs0y3c^m`pqFF4-KuffrH|gfy0S-# zB6R5Yg~^+dFEF|tSY2=9$F+P;x6~4LNSo2-Ely6oHMP4|bGg-%j(Hy~cj=6gTAhi; zg5~=w%J(Jm#SIRhvO4#*UP!dvC{Mi4`HQVDMz<5I>&d?@EdLXQlG<)3db|GJHi$%Lo$x;FV&(1ZSgiDLHR9b+VLmfPw+pL# zFp0p*NY6!?b}xZ!-~F(myL)CTC=<8LY+-HDJjOp;D>6{VEf)}Dt03-GPMpk?Kx3Ly zNOMm+OqnuW@5F(*WQ=Y%R#!5PM9w_(LYLI*#h8ThS zZ!cEYDJdv~>FyJbYXe%Z*N+fH&$3)QyLWw*mLn!}%#KfXFLn56knXMGUCPuW3O-{T zMFxi9^k-U@>-Of|;vos;|!T|vq8aTb;fqpq2GM!5@3>!vo^mIZ7A8H!>O+5tPhuPhIeF<#_Y z%6pM6Ez^#P!$+*H|Nhs4d>N;>S}US5uUGD(o0w2BdiHQceZ|9|iNSs7!Iu%$3zK5~ ziv^-A%^~L#<;b^4>{<`}{_7)m`}^}^<#ZU`L9A}KxA*6sd(L-u$wYrV*BlhHRCZY8 z)Mvg#e%_8;xdW&4#EH%k305k|b&MR3Adg@h&exZ4r8#8W{;A$Bzc#r^2BSNK)pcRB z&7I@7|15pNi2C#L(2R1i0PRximLKthukY`+U!!m14;2tP-6R&KR?bZ-X(5_3Dt6>0 z(=5+#YD!a*>wX6?x}UJR^t$GRVM|62b4Y3rDOa+JaJ}L8Jr>dxn8mrk@j%huT4Z90 zG~?X5wWIiV@$TVO33>ewo)d(-pPuq%^>E%Aj9r%uV|5=D#O~ZGO?hNM!K<_{VcI#~ z^P}6m3UmCSAA%qEJH=@*x4&VIH@@BWQYf#`Yg|8CUNu$2zJk?2Wtx6)@6h@x#@`XF zu76HtZmuv}95Xp#P+%Xq)#!v}!@5>i{llUAcho7xUrTh?5}V$?*(kYRlfNYSqB^_N z%*Ts1WWL;~I#o6wn`$t+qgY+mi4u~>q&Wel5f4e-j|tK>ZE1SLS#pf2x+5t1hr)!2 zHx-e(cRFX*&!3fKJclbN74LKhyRJTZ)#_wL^lDiY`#JAttS&=ajny1M?@YLG!Srv3 z*FR{@ZHmX{n`W#TCeTvH>cJoF--*O(ZU&37&T7Inpb z9x#U0Jt4;Iq@%}tXy=h(b~`D#p=T$~hMy_ad%$q1isZ+)p_F@fcNsD0DGo9##z~dy zvM`S330`I-YYS{xO9@w%ytN+_hjFZKG$X6c%wqTJpS-)b+m*#1xp+F-wo0#+ppjK_ z$#<-_-T$$DydW3vfKY?^aMre6)h~wV+%=2F4mj&PKG8j>{Sl)(fz`dzDHHN2sVN}( zu&|4B#4LqrsLf+`UBR~L`l{E#+e`a=F79!Dam|0HKjSm+x;JO+V|PZj9`NL7**Eky z&_jv`J6}vF%narx$IwD-=!+X)zNWQoy^#oP$QeQ!tk~rul)FLkJoftv?!zW zIq2n-nohJNNKtSNRg8R7O)h$OQ%e!!?-W*7ob3A-Rz`=5oZH)6Q%WE9zWaUS;&;V; zUU7H#%k=uu_uQaj=rF6DH!2i$;vo?4EQ!~xWhC>IP!B40Nmcdv@7^0tV|A(M=gJEU z(tljLIxop3_kEhPDlTrhz?xY?dZ6(#Z$o{)600xu8L9%B;;fVIjCS8So^=WayY;U* zM2cPJKI(^^=V!3G)Nfxgu#%jydueSxv1+bgMlVUrP7TY{ztV5#k{ZDMVTLz zS`|EXi85{)+)=45D|hDnpp>RZM$Cz^YaW<5%wl!#pAR^_?Y!X|`kT~(U#v+u$TLzU z90#;%mowfwY`eoj`sP04?G=l~_Sog6_>b(gP4R-4WVqY)Eh`HRty_GzV?RHh!|GPD zPq-eBqV#;frW^FE)`pw@=U0Q53T=e)vK29h7ySg=KWAu-FI;;W?LdEJAMJzRVKuVL z`#W|VT>V%lIvQ^0h4FVDt9!bye(CkIi>)7HxIGh2bgF6#76^x5=le8A?`(XU+>G?B zzy0je$DYd*-%bwKct`K&uq|7kk+dm3J^5{R3&|q(bEhv@-B-EK$#NHyZq%wdUK8B? zy}{+nwU&9UDy9(n@tb$A2*39%O?$T@G55r|SwT@+(5e2sr;_iTBUXFgHfD&5v#!0z z_`87B9W&Y5aLa$9oeTvP`FBY-7vSZf{KS{ZjzPc+tQC-p|R*SM(a+!km z4m-zc?qc`A%5LHjSt@ozH5a1AojjjzSdfaber8XnW~iKd#qGPd@he7m39EZh^MuOX z!YB7D_tAa3+H2j?Lq~LG?UR=&&%O8a(cZ*&#CY?ZV%hf0)ctrdKv3pC)F~&l_qcGY zr*4X z%|5Fh!x8Q(5^{giynih<{c)emm56s4QsnMj7~N&8?skX#`=h<5xvow%>G-8mUiR{4 z49g>kUXtjUQko=-_AyMPU;0(+^YFLuuUc=TCq;P`lWh^(uk9yMC3(hX^ri`;`wgp` zp***{TXD#0r;$iA!QGutyX;E3Eo6C0B1Gld$~1S%S24ayI#b-QsOq2i!afMfiI<>mzyI zoGmQlarzRc6nSrz%~nD3KJmwZ%;qb_Wz=rBZdtfC^)JwVTcix;Q@c8!(SJ4O{QWN& z-Bqk^+)fu+UItekUdeI`fr`+Le zFxD4A*BVsnOG--2C6|&kOCM_Dzp7yPgxR>@$sn&5(OBP;#Y#0;(ntKlM4xd%c=Jqdm?rqsupw~dbl6C4ZR z)>{6+Tytm0Xy+ki)p(nhsphiSB<%X^7gl$_Um3|-Yk!wz-1b9X90=z*&CcD47bKt- zO^exP@o|r(rL?=&!_$=C^&RR6C=7kgUpTpB|8yEixOA6oT3}Wv8xx0htZv}Lkx`;) zA&V{fLFLgp?{4p0*nK8{$GO|fgyPcSME=c}%81`m&2;H``y`QkSeXv8e9Or7da9sC z@1Ub4)7_J{7~S7k-DgLZ$et=Cd3Cw=htcde-R)3tZrze>T(4kA9!O zY}@sar{S;%TaN9Q))oetLz+rsvAjd&mlCjl7fL_`7r|{LCR;y|{W`UtdneeqN~$z= z)NP5cPgCp|hl|5)J;i9P`;rcnGor#f=BSHLiiBt1Tpx{EGufuyB;Q2kCYu*_2KzZA zAy&8HF8Sx#+H^9iv;)5h%WM-AoR2b4nwOl*_$o=K98G&zq@5+5fKr{~QCpeLL+V(T zv=Sx9(&FpSs>AhT4d0YvKPMx?>Q*Nk7IxUw#YZsk9OS;WNPRFd!a2 z#LQ#W$4*|!{q$+?5q?rK&RbK;Qm5Q(6GbP44@z2+bv@O9dT);F#8};>dA+Q$epW^O zucHBmPon#~b9llzHQo-ew^7KvKepXJ@QU%yj`*8fPo8l(5_)N=r{)wdVM=!Yw^}lq znpabeA27NkSY3q+6J`YR0*jo^2;>&IP_ z6m3ru`tJ>h=mtycoS zr7ao+WFBS>uLIqG+KA-139a2sU2LGx%+^WF&C`j1;2&)0I3sBH(5?b4Af<;C9ryny z8;XIO1A60vixtZMzicA`J*1ER4t?W(;I8ps)rMl~ZeuEH=T1N%Dh4%1IlK)Y1AII9 zPw5LAW0W)xS)3buk8T!4;ME}4-Z>QH*IkO z0{GrsAhOxH@9{BpbRb9u8!gySyF%aTAOSXQNbf&l+t4%had(DCv>J)O`?TG^*sy&y zPV%qgknIrU(eoR2osGBQaOl{xu$;*yC;g=fwbxh8G5*~McPzW9aaeYDoY+x~0B0JRr3Xe;R5E%yHY?_xvk&(z%6 z4PF-$Oa~DVP~zI(e+3(=+rRtlywNXhY|YVkp1z0t{cP{`AFyGs`!e+J`EApe|1WLG z7u)c^?ko142j2Fd9~Tt+&1X97^V`2`Lwe>G4$vYA2#!SmJy-l^+t78pum9BtK0yk9 z=k<@DTcLUDUq8$Ge`MRJ>3{rx8()h1j~|Xd5+4D41n?2SM*trId<5_jz()Wd0el4T z5x_?P9|3#>@DadA03QK-1n?2SM*trId<5_jz()Wd0el4T5x_?P9|3#>@DadA03QK- z1n?2SM*trId<5_jz()Wd0el4T5x_?P9|3#>@DadA03QK-1n?2SM*trId<5_jz()Wd z0el4T5x_?P9|3#>@DadA03QK-1n?2SM*trId<5_jz()Wd0el4T5x_?P9|3#>@DadA z03QK-1n?2SM*trId<5_jz()Wd0el4T5x_?P9|3#>@DadA03U&WCjxndL>s3KkpGoZ zDZ9Iwi`qK5dzd;nh&ni%+gsT>Sc>YnSz4-aNs4p1+xl8MTZwQp0tcoDKbcQw+}5kFz0vjc~RBu%UX<0%vfxL9n4L{9X#d37qW{&W3&idIIuj z8^PHaAisjMea6`s!DfxKjYEpEOu!MG4QZe(iqkQiZ4zhO3btMVZBsZK3*_H#Sm5`b zz=nKg1z1o9wmGmNSJCfNtwILH>%<_N5BcVz;P(Ht2oG3sVCx6a zMvAiuLVjSw0&N{<6N3Fdk|*?T0*y0jeV}&ZY)=6a!@A!`aj!k7A9sT{xQt zA30P?8LXhZiIRp}t00(s;oy3Z(nhXCXYy1p>(I8Dgg0caD!*$zYA6+pUs zaJD0mM?NE6QJn24Fz9f$$^0Ds^j5CB{P0)ZglG7tvA zF@zM&J7<8?fDWJw=mE!o|X#Dfh7Qq`)Hg;<2oA0(YTGqX*33#0Ox@7fGJ=GSOAuQ6@cb38^9K@1MC3@ zz!7i)oBz4sZb5fgJ!RuoKt<^g{dS1KtA-0Ghwhe1+z=?a*dWAJ_&dJHQ5@`I87B21o!> zfD9l92!Vre{2|~l@C_IOJ^{nP2rvqK2F8GKU;>x~rhsW+2FL}T08fE@AOpw*vVe!c zJs<@*3futD9DECi1LA=wAPfirf`JvN%PO!2xWT>&a1PK0)B)6gr2uI_2H*m?0X_hY z(`*15o6%Uz0idyV2fzvZh8Pk+N(c}EB)|u-c>wQ#EFc?r0^|YtKmmZ}+Y;b8Pzpo= zSAl5YI&cGs1LA=MARO=nyZ~Ro4LAcB0EWO}fE>^O_5vyZAFvDH2gm>(fESagd2jDzl3YY_yfE6GD>;v`#a)3Ob04M^y z03$#LkOG~cRRtsgcY%ArDtVeZYP|4Uhxm0W{YN07AfSKo}4K z_5h-Q81N0MumFq!4}k>WHV_Y>c^R!`(Aovf$7nr82l1f?7Qxpg;04eF_6SIA09!x} zPzIEME+`{|v>L8e1JnX_z-yo$cmp&5jX)Fd7Ptb00HMG*oIe3f0-1m%U=7#+wtxhH z=4Z49KyyA?+n{v|TBj%gXdQyq6KGyW>jgo83H)aO_JUukz&zNdff-;H$O8(1Lck92 z0(=2C0L`P$fD5n>UR3hWem&G8e!DWjg>-u&)5C zz%tMT=f*8rm;h#A9dw8xjeyh`I14Po`5%B!z-K@J%7uU*z)#>8 zuns&1TA}<7qzS-p$a_KR0eAu#ptTiJN&u~Ew*V9XIgkQn4*^Pm0=Nn1qIIwkzz?80 z?FKp^j}{9kC4%G7^=QCO4PbSfp{xZs3+3p#mjER|83+QKIG}>lqlI)+BM|JD09OF3 zhmJ7>8`9f6whp(C$~G3qki?)t5w`!?x(8g?7%M9wlM7@5_W=}x7yz|>H2(PjXbg1% z&^$*Apn0zX%29jD0UiUF0pzm*a1RY$IY0@Z zwbd42bDf3C(OQ-PKy926pvUc_K7sm&Bp?Q$dxefc{Ta=l!T=Y5^pW;X0IkoM09F94 z(Y6A}Cp6chbI^Rt4r~M10A$TT+vbklF7kH#^ih3sfNLn#_NHm`x&7}7*`AzX^alTFZgq7H11 zCCZTB59|Y!098N{K;sN5L$=LhQ6AkFlp@eENbdlEG}VC3WA{NG9fQ^T*D^HDAPsaJ z8gDksH_yZFAAxeDr3oAY(6u(t-Mq%X)@vUev#Euyb@*S-L3Kgx0^M_T+%e!dfW}6B zKo3B%)CNuhCxBDHX+Rgy0gxTpEP;yv>aYHQAK(l40B(RS;0~ZMA02BB7y)MhGr$-) z3z!1ufpdTffc6on+ybxyHuaE>4d4nOKkWfKzy-JO45<^~2si+{aCvV?F903@8t=V; z&Gr!h`9L5D$Of_ibUk!@Ch!Qz08)Tx;2LlhhysvaBoG0F1C&4r5DZ+w?T0}c3d94q zf$KmbZa)FiI3O0d1>68`0x`HevY|3`E@Cr(5At_`J2-nXq)9+3a36RGqyrCtGyq*6 z8-pjXp9?$%UIFF6OP~xW1)c-XfD!=Jvk)i%@&Vi{%HPG1FT&YgKq`%cn((i6ADUxc zgAL6yJ&?WyNFe_O(htA@&=2$hZ-6SG4yXmHfl2_`Y5=5D4>SWvrx8G9l|Tc~1fXlQ z0WCl)@DAt%+JO!L#jYFZ0(yb>z&tPqi~_R&(w+gPfhk}d_yiyyhXACHbUy;f$3Xy{ zI|3j-(f%;N2Ot~T9|JxElfVRU8bCgwxR3+r`q=ns3J>J>Owdd)T)F%mFh1Jzt_V?m<8UK6SO^dV&@(6_zyJ_~59s+c5K{CE$_wxS+`u+~6<`K7pI6x+kB;L6 zb^sgzdS=}TZ~=S(Kd=iB0MLCy>vAnfQ9bqns(=cBo`KMsXY+VP$fNZSLJdH^sY7}I zI0T#mjslv1Hk2KP^a!8}c`Znf0mp$8z)8RqKx;%D;50x4#~DJZ59k590D3kv0?q=c zUT1)FfC+FOumvmtbT4cGYrqn)0vrHGKp%A7Aw|!7&VVc60wCYK0WaVJfSwHzXx|pj z35V1d2m?ZZV89Q!0t5n=fB?WAj=cz}KX4NAmmv+p+0nR(jzjiPNTY#kKqL?WxPbjC zq)`An=tMI}%s1>?Gn zsRQcIts_C=$5_rni4>HGgT>s&+1}Q|)Xlmw(wIY4tQ3{Vh{7oZ7M5n7){sVDA&~yW z-uRk`P+C+9Ij}J+BM(j(Gwa6NQC~;bk(2@_Hs)3s8v?(k)*ofoa|=QBkr9=LbFAF= zSbBT7nHpcrmN5*V<{=_f5S7^Qz}mqXg@eFK|Joe8q1FLZ0zQDn+QHM(+S7@k=}dlD z-$~|fB0?EBM-r8|nY!4xIuLMZxUHGoN^|W;NAYfWvDj@xFJ7MFV$|JYk zEgdgdy1}e=__H3(O}-H#LOD^m68J?1?Ts3I>o_KRMsvpC2oa$yE;@`*f?5c@tU|Ei z!Bg~q-8poBcHqu=aDP2F^sqiF@<47w&CABt+y;(#>lpq0!HLC#$OE`TVA-e)q|$5D z<@#~?CL3OF6xq64dRszCrilLg(~MI+(B9BV;2e6#$p{`O9Ju-J%8kS%bQi=$<=|NK z=79=IPUZ@~+c!q|;cpMjoJ8&1;Rw#zSM7xdTXjJVq9!4#u<@pa1g50E2&)tfep#N<41wNSi(K+1xVkQD6 z+{gpe;2;M7V|_NN177FCIrMOjuh9Ht8~1ZB~K6fkuiN0zZ^&g%XjT%1#3N zJ-$$aLJG~w&A|l)hoH#NwP*L|Y$hm?gboDV{(tlut^;iZ`W&dyfEtPNwFfHoP0Vl( zE~INvf}*p^74m$TAjTL|on<{mG@b{t^ z$BmMW>##uy>f2UPth!Ld%VUnUmNvo<_{5_?N?b5na*ZFsS$)*F4Pff8JOT+Hk(tl)ZkpT}F!UFiRaE0K`f zc*}>b^FOw?KeQ@P5y91mcuw6(fQnJ$@9W6mO6Xdx?v(w!vIHf#xT!!1YVnDWa{aU~ zup zcVty+x<9mDxXwo0{^4%pda#+Bvzw`t1;K=kbg3fc-8UN^Y}~njjOa#Z7$*Pg&M6w( zDo0!$@!L3OqaD~f*;u;Sdbl&(nPO!d?6k)D23}9YIZWVn>uvkJHj!&#f1k4sCEK86 zA779Y;h|#2zf1n$0krEss6l)Cv)Uhg`?DIf>wj#C8*TItEfH#}0)r@O*X{(<-L_v+ zDR40WwSR0o8*y{DG2Iwqvt_OhX)jr(ZJZ;GrrJMv{XbPJ!FjN~)Llf(OtKi%a4r7N zYJVt!`mBN)8u^-LNe%Y%SIBOt$)djfkM-GTZ+}pOSZhJILan!~eatM=z38uY0AmF_ zJ>g2yq`I|I9dU$C?J<#wu~>JgA@6urgC$+)3SpI|r`whdT#e|G_u7&L8gF zpKAJlbLalx8(inl9ynUaY_tPQg59l`>USq@)MsOv_6OfU?GJ4Sy#9CE!9U*9jTZlB zumAC!jp+P4W7VJAo0+@J##}$H8R{i4lKia3g`*(Z$!`+6v@Mqt)fEwxxTTAQ~+6+n$ z|E>1##QOhg?oHrrtiJ#8LrG+~rif%ng*v#VE>TydqEI43MdzM-?!AXIInO!wy2?$Z zk|G&XiVO`T8Widi(TFrilcA)6(x@aQzxP^uKhrtSdG^Wc`})8Bx6jAf`&rLgd+oK? zUVH7ehbOb{IM#>zTn;)m^`8gY51(@Os-E2{S8B`t0>=5{fWW_oGIsL(+pE`GBRfC7 zAN8SG4rFkD4zDUqc=@018)k(bcoqB@W7J|Q6(xBmx7eL&7S zHC(0mt91=boDU?_yr=p#rnjp4;n;^H1Y>X;KxkyzdfHEl)a%*)$|lP$Hp_A3 znjLvZt{LLU0Ye<6!;sA7WTxie1&|$u4atm?8IqZUbUl!2;JFKpa|YyA61t;d%|DK8 zXSs2G95sX?i-AMl?Kl6PvAfz6+vb@#$0Hp_J>YUO*VIuD7{^f$7?N2!|6g(43!SGf z+$va?8wHW*K|~YRy4y(6bj*On!bkdEQ;b1oe38lewJCvi`pJAPJl%zzH|(F?Q0e`#vynrU62-KBNA_zj$}tRbfIhYwz*2 zLT27FYhvb}bvzQyY{mX>dsb%cZ3n}PDD;Hi-ukrG^6Gc5G-dsNE78o>GJS3)r)Cbb zP?`o~Fmti6iExa^+;cCfUwV4Q!{w+Cybu~SK zg3;`0C^e1fRlb|mHS*vyHGo52G^hn5ML6zIl4!8xybV8m_5Mc^f-yMV+kg|qe=mU= z&1ITZ{%XWEKm8PA4%l;uM*IPK##mKA1m>iTL z&KE1k40hgUThFj}%gZu-8xCpWp5R%S_ ziTfvc$1eaiAhhd*mP_}zM!oZY~oRyb|NUClnYb`p82U7;bJay@?du%UpaP5$lt zM^5PNf30$*3h+$I^Br&~iu3M`oht8L_}(`r2fs^d_s_k&2iN&K~L>oPu{_WXy!I^Uq1=YD8qlw0CIQEXB@FNG`1*EG8 zkamUt{95mtjMI(kbMEN^=i#}V8jlIDK7}D%A4C?IxqEt!YqD+=9+HI zE04B_!n>_N>rjByK|^v`dmQwDB7iP>fDWFyO&@}bRu47ZIB9*0@XYexO&RoTCS)-2 z*s4eWZqy(mA&_;t4dJL1q~kE3BYgu0SAZJnTXe&{E0;gKpCWTq9n|zAKuAMgy!)}$ zy`z&JhYSoWmKtGV>7bmT=8MOD<(_)G$FJMw?}j*%j`Jm`QQzKt!DScBxp~O|DN%5p zJ~KjOfi)yOP628RNsm)-Ina;{)KHF3gAo}WOl(~q=(x!H>SV4dFBbgRLRh{5Wo_qo z`C!GrQlgN-KY);Q+Ez88{#t zlJolV_ck7#bCfy2(hOE8(`+KNsuStxYKe2#-48!FMtEIgxVcmhO z)2(s^IWMSby7t1uXWnX@EqdcE{SHKCSN&@z%AtioP-`S1r=NLn>*Cz&N>C2^FF}&Y zAld-_@=sUwdH<|4C^FYcNYt?$u4zmQ5sGKq;$i;Z8y%qd4)wW?d7fA@Qtpoy2HIR$ zaL@U*s@y2+gO>ZZwXjHMxBP1H`p0`tWE__DC@lsD3Rd%yg7=m+e&kt(u-(zLlGtdJ zvoN-AUT%%U8bgF8Y7x-HE`xE^t{R$LTF4TGCb*STh?NKWe|(iM_~A(Gp)ma7`%FQCW**?6?VD6Jqu9CRx`?WHfbK7!~z zdI|TrnZTiu>7D*3w_Lv|r;&h=5y1k7m+)#narlQlhfjmI!8j=A5m`=+%Flfozy5LZ zm8Bg`r$)SCP1uD4N3NN-%&m|al1`2F8oHB?BboN>I#+0X=dZM-RZ0&B$nvq%>G=*2 z8l8XnyKH{mozd&auVMr1!6GfzIIxrCz{Oo_zE`lEaA??cDo2-2`uIZ}pm`iP01o$h z->6b*IL{vgX4cBz*|!&D4ckJ>Gtw)#?lgF9@T1pr$II3R2Oj}K{q(fAAE>tL&Xvmm zAq$I^+Y1Pc(u9c@ebl7y`)^-v;v5BpMzhZ*o&E2H*H5Yj2vrq0$#@WYfIU++C%k#X zxs&e#gw}t+c82}~4*qXPChRviH6 z%p14DcyLZuY{qp@RJvkGrB|T6QrIVgbF#2c4Zr4|I^Qg#nFM0tXCQu_-fzLD$q9_5-1Nq(%9d#dh)>`!&k!Fp#DQEHr*vox4(y9deQ6wAwXyZ zL@m>8JW~VYa^R2*VzX=Ss}Y*G6gZ^VT^#Lbp0J~3Tt0R&p4eyn>Zv`*!ct9vGal4P zZtr)gKWNy_->cRaT;u7zvYZm_nPW$4FAL@vyw!q@Bl%4zc+absssqlnIO?q;0cfidES|k0aqMeyaf;% zHNincKym|Tf^JzXuIFMQT?itjF`zefzmqw;l z1A3klICLA8L)5@QE~ru8ZrOBqmo-oQco`ra+VSX|FCks}1}c6!)csn9X7c$&N^L|1Bp%tgxcrPry`|IR@bT`A+RnBB(;5sWRRqI0C2``+|t_r*GHerasWwfQ&STcp-0vM zPR_R0&BCpQP(PJ@yIVIA!(X-G#`5ZKtP}AN9_ur=X;AY9W6>cyAtvhwtA^7p!e^8F2 zCSuIMZlLZ$I&-_;w7k)=Gos8J_mVpSA-x{la$wFszOM@aA?pIUg*EQ3p2aV`zJ1jv z_mjuPz9setUm$R%9a^$rZPOEOHzisjA$3lBV@{*@^RSOgmuMh@o)C-_Pn;=;9sgL*Uv4*7}TAl(b7+C%iuM7%R(3VT|#=hL~XCSJl`4cm}w z@kU+LXK~xz_a^(a!Tu^;chc)4JFw@roEW>&ikxnFmR~P0jD5YR788f-sp4beXYm6MtVRkhgO&g9O?`CM;qVw(EjbS80UPd zX=bfj2psAQqt|qqv@t&5QRa<(il7$tCgUN`Q_}~It zuuq%B5boQs2OJ*k)%e5V{r(t*`44jq-j<^rwu!&A_7A$Y_Ff4=eKK2$h%XW)Pk!Wp zX02Lo`1oGnkcZN-gNweU>JD%^qP5Wz*MNglQPZ9sy1uud$#or=8cQ^@rHFY810?Lg zDW}!G>w*mwX_W1gp%q|}c3dRv+Z`WYclO@orFDQKTQ0K=$y`nhrne}HI9!#JZ~nZ~ zrPx)eQ*(%IbQ1HQ%p7Dc=dOIg^^k${JpcZDWD}ReeLENk1kUf$*)wr)^522YE-V^9b4tw4~Vq!j0f%G&u6F}M(s`(;eB0dG>z|U;Ae}>i z)B)t{-@8q^tL(?ICZy_R0&?u&x05IRamkMiVgIcWAXLu2Jipdv`fn$jIOhUFt^LG1 z{m*~IGh1jC^LBxRT=mf8?)f(zqIo0L2OL}}A*YOvEZ%U}BpGuc$n}6wU)VmU--BvBsg}F?_sJb;ec8OiJ}K*uFu>)qNXpu`p(e5 zFTVXTP@~?6R><7inf3YzsF4>vX@2XsV~gK-1k|W~K+RF(8IoChGgu^8ivO(Fz;Wc7 z_h|YHqHz-Gcg-K}`hGz_nz>@sCawC9IFOthjxUC&chVoY;;(kIUu$^p&nzAG4+^gm za%&NpRjc`wc7qtgK7|Gdd4szOA9%i?;>7k8DdA!~{%9x~*KodS+7*pjj}DwgkrF5Z z>My@rpZz3yz{J(DEl>Wa`SJD+|2X#s(7JiH$mzt#JOTilnNe=F~{ z=Y8IR$e3g>9*8$vUOM}x&i^2OO~F=j_ONT+@1ydD4QqPS?=vvRkl}dG49WGNuP@v< zKJT_B;`{;>(enr(IpA%}#;+dxdh^rN7wBjM*j_Fnn?AX(>Vutbew86S%KT442DQ2D zv@hp;(89!d7ZB>(vpj$2_N`G&vqPeWf_4K!ay#X+$`945_Z#*~0Rr}Lo&^w^XYFql zShA->^?ZhKT=@?`NP7oWThXS@kX1Bi>I`86=fr*jr$zJd`rBS8ZOIVs+jRh;`Yejo zf2;KFdpk-9a9T)6*TKu)Td}7h#e@*qpql0aLhW;Rl?xhouf3dnB*|M3K*%5cqQ}R> zLz4!ReUp&Bl3Ihj9d-7fGpeZx87?7O?`x0L>9G1BLs+7t0I36Ncic8C|GXc6Xe}Y& z4ZDcZ7Vt06+1%*H`H#WFCli8h>bV&>G}o+l;h70G<&VIrv2 z0JWaQ?-$;3_o!B&)={{2nb#!J&&$AuAToyePk+&-p4(r2^6ksw6t5!h77{H3gvwbw zc=6J?n}_?U91-Wc0}ztvvU?x9>-L&|B1WuxgAV{gtx$Pf%@-%P%tC=( za?hFTtF>GMh}^+i<}V8tCQ3Y;hMsxKY5pqxB{fJCUav11N@z>xepvhLSD(LHLeRDR zFxUY|w&9XQIMCz$qY{Gp+&(~*GrZEg9hdz%KL!ZJDNvuxGRV7HaNWQChJ~kX+E^br zm>ZIgUIIvca6R?0=LR1g&*wf!)R=t?0fc1Gd!yg~?l}kJz`-#iDhDg8-lCANc*f3> zwKa$CARkux2hnSU#utof)ieK;+IfKB2npf53J6K(gP}{`@ALcoZ#Y6r?wjZJfufxK zeU|K5Gh_luN45_nKu(2cNcUwUr_`x5hdJPyo_ejQ&#ME28!P|2ZJOkOT00mH`n^Sw z6*pdb<|%XLeGiD717`4*f#a|TgXS_|F7qCYbeo9C9Q&hp3>7tP{p;?ZwT8dE1P*9y zgbbztQWG**GW*d3Rhr+_%jE4LK&bzG-2bvCez<=R#doBawg?c?(ak6CKf7=LO%!{Q zkQEYAI;3OMDYx{eR-o>PHq9V6)HH+KP}7$|tscsW4SsLvcSGW^sufdM*cCH@jX)r~3XOO)pw--x0H% z#{r=}_tH~m@0;?$29kk{%#Fg46sVA=<_#l;)cv#S6hPo5lJVu3Hz2339=N>avd_+D zY8?4-=q1vO(G{Y!u=z^Cv=jfiamTMj4HCtO?x;KLzd1lC_JsO4%)qGS0Li@H#i5_d zzU|N(F3%YYhJRM2b?;< z>AULKm%DHHk|HwHvrr$*7ou?-O5C;o+etgy&3X_Jig7`r%YoArI2TM_xU*x6zg`1` z+zsGtlGMiSXqL0Z@1>nrR8u%2>uwTSbtp3X_WIQyKNAoPW<>2O40Ac4wrzz#YK(^_nenIVr8qH5T<@U44bCA8UJ0dt_ zT?XFs@iS)(?JN8P?v0lNLe!3iDy@vQy!3OX2JSFoqC`SBrH1~zjPkxGINe&!FK(c(0sKb~gdx>vsFsh-S=B+$B?X7p_{t0Sm z1yaQqibN|kX?eo@YZi3Ih(Rp}F}Uo1bLfr4HIECCAT88tCrumgf3Q2v=V;vw8s8I6 zJk|8hoEt)+iIpf0OZ&LMb-K54H{Pv-cf-L{Y7YX^7?8}p{yiMct?x(je3pQC$ z@;0yO<T3h<+d|C^IuR*eoCB+uW z%bt6!%Iy!W@<|-XpcovGb-8lZT|3{cwEYc+uZ+M%#YxzV}%han6YQ&vME2 z()WjLufC}+j|*sRP+J2CwZb`-7GE}Q)rG&A<-93zCf{_$59%s81LnW+^7 zBffZfAhO}ubKjq`qZTYIT`1NFt?Ur%lr!;0c%v@fn4L4BSEn1F>5ZDw_=ARxn$DQTQ)8v|C+Rl^zCL5-S5)sfh+2sK8I#tL=7Xx?GiGq@W zW~dK*ip(51Qe&=1j2D`i)1~r?7Zc6JOdL0hbnuJ;gnZcUMN73A=ZqKyt|<-+%jale zAydp)d~s|)@P#IqHe7b@qSiEPleWrX21ed3sEtQCG-{rF&m~pAShTRG**=pb2dnp2 z>(Tbp^$#+H_jNk1Uh*mJ^q4nVn|Fn!-;}ZLn?m%aR?J&iw2vKx9NS{azg68UpgX0&3K^yKcJVh0{+x>SY|Z6q^B|UNU4^ z@i`w{zgO5d^x;mP&m_(nwHh4if9_d&Z-lJVu7@(RmgM9nCy%bclM$(<=MG zK|S!+*n7e3bD!+>rCHO2=8a&pmS^J7u0?g;-pzf1Hf7~H_XV~Ge}funNR`k3?j2tF z?n+RjHibmXqVYmHQFLwfdR_X|+f@XJY=x65q`$N6K&x@loMC`SpTbdlxqTdEz~y9+ z0XT5PVQP=xDxw~z^{igGYt?d^@ykQF8v)5deR_Y@^Nm;L?OiLmMk{QSkc#`C-_opB zV_K7-`T(+5LWbV_=Qs6kT~2GRvZepHX_R;|DRLkfdMhz4L4W^Cz4Q?9y4k0YU+CcvT5Ie6vh2JNTcLnAHW0CE-}G-|$C z!)yHyL7l% zLhiib?VE19s`~di1|)UO)!apOC2)wh`DaZy@7y~B1%%U)MC~cNLyUmszpnap$3?GH z(E3~_+_i)iC_owl=cYdXQ(E3Lm|``=HG0V(fHVQ*uR4Lz|8{8WBbMR5T3^!LZ>&Z|7VXc{2g+OV+L35wv4=f_8?)o&A+vQ?G? z8T6Sfdi}kHGoO9s^LGSq+)sx|$hz|${``W$jruY*cviS&2lGbzj1nC1sOiW7kHJAu zBfb71{QAQ~zq@8LsFA**P19$tECbZTaW)0*LHr2R$dbQXqu)ovSN+_ER*HFf+f#%s zuVYZt(y#h%>(GD7jm$Os%JD!_3l^1oYPae==%QKY-M|p$AYBHeR8U0EihG5(ad*`= zkM>*iobJP-75)Z459Z@oVRmhdh+zXs=n;>1%DWn|h{B6%tLJ zJN1j{&sW}y`8F8@a1aj^VjFs)r`{ze9WISW4+0|fEq%9rg(TPaexYx#)xF@4kylNp z_JJ0VyPj&R%)3A)OcOrE#0{4(zM!aV8Qtl}3h!`p-v~tcs2QdJhg$B)`aW-l%SN2S z<*=|FybWpN9ZnC(UY~kHplL@}%ljM;(}n-|vw!?eSc`VO5pj#dF}yZ`4Y$P?HqSM$ ze9W_7`RGZ#LklxWzLDqLXFuO@_9vBQeR-0w!3#fGmGkVj(<}Z&p8CiGJJvjO`n-GU zSFS4R{o}He>y8P3^LGQ}$shY};HI}m?Wk5(t&M2Sdn<-sG~>)A@p{P9xOQyb)^-OS!QY$yzUJ{?k*EH-H*tF~r?AhCzmcbQS~B|j zdy6jbb$YCTW-GWmbk3)L{dH6R7}UQ#J&R30vaz`T-e9aC!SF>V4_h(yiY*TactRWb z=#icAK0lB0X;_57-{iAK-gL#<4;K-fo)tb3ntscugD?6t1QZguuXgTv-@LSJ+yW3q)5KAUTnJZ)Vo)nwMpj7*Sy&Jnk~P4x*d7ypZzZ$vNgV; z+x=flcwV(`3l}cy-R@@zU%9KSUaw1=yz?*eB=1nmX{T1aHhN(V_%C&lpL6&6C2v2Q zyY&p@Pep#|hd({Kd}Q18osn;V{FeP+Y`D32#-={VS3`bK(V!cuPkdp+K;)Yuf9u2D zKiRjT-{Y<0LJtqphjZ|UZzSN3(cDZ|jC%4}wQ-6%5ygT_d(L_Np_&j2J zk<*d|qTpo2AIAvdD+mShqu`>bJ)F2eep!hxQC7^ZbNiwI;*wY_34{wm<(fZ^Tk6op zh_-m;7Z<&8Us)kOx)`8?5#d-=3y2*2?qE2aOc3#44D%tXB2|?_hJ40eT(yb&5WMnK zd0C*KlQ)1RW?zAuKq!ui%F~Y*?j7tI)telBFAx3g+LIQVc^BF8`=G_YUXmcwa=~C1 z0vP5KKWqaJ|EQ%#BpdUIx65#w1)urJ)X*|X!A=tWzLv%nQR~# z_7;Sq(J(ePhhQ2t{V8lSy^k!V&`~4FZ~+=J5%m^C=$^o6xS%`{&|Em63pr6X{dmd8 zQAxu>6=;?mWJw+uWTE+r0$#D%+ofR3TOee~dC6_cB|PC4sR} z&_GdetP8!=SBa$*Y|xeB!PML&8YpxjoAM^8raUo;x=`V(O@Pb4UW~pJW^|X2sS?05 zGq5vR;RfK9!G5uJ2?Ew9UfQnEh1EV$9t&uKF+Wxs5&Sg1TWz@-1J8|!5zNo4e@W(oK={h6Frr!tx5_*z_*;&ybCAz$TXr!|$kt_ui zhVVoRSTvxzD}cNK7g#dISSq!#+YFdc>20}atI>T?l#%8!Qq?fRU;s@dfJ5;MR13pV zAY7Q+wS!x+e77u>$qBRp8+>=67=(dmq%C~`^7YaMnl9y18}K_IVWgRLmMtwKlPl=O zu!tKmTnMGSNgUa6L6q)8LXBvMjTDb84A7C>IJq)7JchB_?`|LMFQNeZ16DqjA80Qc zH0%#RgS;VksatG42v~BaBKw0e_=PIj>!@e~3n6L{4v8t)5I2beQ6!jk)evFfi%K-P z1U7;%iW0>g-9Qux?i4(i4LPuvts7<^?cbJtB#Bl~Dh zxVGWlYzN`jddFg(RT>+~-gP31EwwKE+g}82`vaD^QhI!Xm5L?aj7)a8;AMW*0n(vARktn^7G2w-Z784_K^1mx5=3Jc@7UM}Tn0YcL} zFHC(-Q^S;J{%Ax?_%Opyc}^oj$|KSD;R~lc4@HZM1M!rHm>9?4Kn2qZ6lT}Wfl8qy z5g-Yff;8kL+)V?y^^Ezs;1M)L;&@0Hk^&VYeLRGW<^drieO!d7J&6eY9uEPtkwC@F z91kf&IY7xs9}gi?JkVf|#AxUgmGBin6@31LSA(XfsUD32T{~y58{&PEBO@LU)0mD> zKLib#GKr+HjyZ4koSD;PLlW5|rwEC7vx_YwkH&jYiP9h(wc{vMN3|BKKg(D!&J1DA zesroWLR&{PG&g3ZsUQU1vS$u2gB|?K45-G1l!mi10V>vP#`=(Y!r!G6IN5V)K|(o} zoLNlv#B_;*lAg?}@|GYkRn`poX65?feThZ!S(Tv05iTHz1ec(46lm)WFz5%tDcWa- z!LdkdhnYu+7N^Bj8If>asYVPAIvouqU869muY!K+BQf|ZMseDR4P5%c`xJ$7AN9qh zx4;kyE<;s+qkcd$(vX~Vu6ZpNr)^^BIOsJl`jYY{$fZ01JNS>=u)r7cp;Coj-usL( z5YslK-3*%f5-6J)ctaGJbeD_0IO5D!epYhQFI5J%o@6{#IMt#`K(*u;Th*f01FX5B@T7e`va6iHvlRW(N;8Q*zzDDOw?w8UoLj zCs``{w9%ZvHqy+g%Hp9;bdfdiWXenT?5TPpUDgZ~`r+u9s^7E5r*ul-J3Y>7oRS@k zX22lB6Jg99bI)(rRmC|M$$d{RpEIa6MXF#qc^?55im?iTgI0UgPa;}eq&?< zrjdr2Nbsx}VGK2JaKvcI_jvOvpo`9$FM+0+0a5f*RaBPWFC@H z!ANR9ZKaXh>Ri`m#XD3qexf&8|C-~zvD|J;{nVD&9Q|} zX;XK{G@VxV$r?v}No;ANi8mscq#HEp!RD&61l_ZXu@>KO4kVJ~-HNir{w2Cm-9=;s z2LJSf#!~kQYXQKsW}&9)b#p7Tgm2-U4W3Hm`VlbT>Pg_KJuX}lPbeB~^`dR4?y77~ z9_W4=_jglTgG#Zt58GT-iDv79fSG|z)!aHL3y3&8#YnE6-|8P500}(_F4P`?O@A5L zqR&~`X;M?OKY)b&0cc2LBDWL7LP^}dfJ^zw(JW;PW8}nh>MiDT%8BRbkLXq=MC4t% z38R4{fsP4<4E0mT#IhxTQdrd+BB&Kr}PVg{r1jzZ%%qEb!HvUoF+*>eE=qm4w{XLy*aAD(C89 zz&8E@-InKxj_5iVrUgeXVkcbjk|ZSSKt@U>eSGx+NcflHQ&LsvYzp?B1Y%x9+I%6- zN+YiCcoFV^MSM610nbLQ%T7grg41JeGnK|VJp@#d$SQ&PA0?X(6EZ_1+=f;os4`5RAh=8O6sO? zUgfbAkO^qgZ4T%VBkww&kVl$OhD>F(bw@u^M52=4CSk)SHdsjGj2*Qu*1>!UM9d76 zVU1!j`O4wwE2PBlkdQcGPE5=RUj)(S5l{jAuS;k7xD3JbFy;tYCk<#_<%o4{8=`budZa44I$)#oFmq|cNrnr}LSi%L%D1%x;FOOMJ z**=@Zfo;oYrH=H~7$DLQIxKzUOfj}w8=MfWd7cFD_+&nI+caf)%SI{y)-l~xu)^UQ z;PJ0nw2Pn(wFQ=uMibFDcNB#gst;6F*v`ijut6_ESzS~R;w0Vox_2>Xl zFp-2Oo5wMBH+yM$3$n1}c=T5FF)M>Ww`RTFvY0|!`31f;iylXNpV8wesuje!Cp~tP z;h>%MHPEtV+4`#{Np?wsg8cz#Se9xt@Q|UAKr91J|KOMS$?i?A+qsh1ZQ zPLTk#lAzmOZN31^`YtG^J~F zxv*z*TF~j(yhn4=6}R&oCeNT{X3QZO8_&p2>i>vL}jxRs8|jfg>PZ1tMG|Tkpi-KNkr)BY;0=fZ5kwXv%LXfrw0*STrjWp9pE^ zcb(sfmq0=!Kta7iW)%cU*diBhAJzsAXV90UTq{5?F#nV=kzH^MxFlCB`!@XX5 z?8e4Km-3BwP`;67Em2K)QVU5Vpqv>baOnaqR)K%oWk$}PvcroHz7dj0WhG=w7iCPN z!_sU^IXLURTXc62us;9?G9Kh2W;b&Jif&+*i^?f5=RcrkX1F1hie$`~*a(t@>`!Pk zmz@V^bPB{|M_A2BaBPu=_KO7qqbYW4dZI9d2*c7hD56QUZ^|UfMO2wkz*i;^no%8z zmS5Qd8JRL|p$j*u6#{zdBWwv%mu`^}3D}faR?Um|vCmf^BEdMyuEN6_HxMD3Iri*1 zzJ*Ooi70HHt6ei+0vR(y@qDtW7%TJ5r&xqShBgXX9}2@}eM09?u}#4Gh^Ik;a4><1 zr}a7eq_ziiLyPr6P<$`R`iM4;+w#$9$eO3?H>3fh8*g0{fWW9Y{cs3N<;Ym5TG3SA zSSoYCiM$`7eXUk`e+H^{uiEQ8vtFV{&VSk)ei8*l{?w2 zu333!Eg@{We&;`zZD=hh80r5k1FHsukTv_CIg%X&WMqnMoXY>VN9Vxy~qf8#%LoV#Xe&NjG3!+!3{Z>FU(i~k^`6zs&%^0$5GUd(XYb|N`p_xo^ zC>bbDx*fEUZx99XE&>9waeT3uHykZY;<5#@eYkQ95jr?Co%~S^U&Nx%h!p$dQEYO> z<#A;Z%oaz{b`AgNP?R=KwDbGIfe>$m75UKt#HTI!P_!%%PtEgWRT7B#3e6N2NX-n+ z2FDXJ8xO>A;g8H9I*ntVwE0$)$uC;+*ijdjAWE7Zq+b*U|U2dAWI?dV*NY`Ci;kB(h)Bg2IU9rP?SuuK~d?u{&72? z>B+1N5r6s)Nc@`>hFJ-qnHlsBeGeMyC%(4sQcDtocnMU@3`DDLc8f%TY{_Nip!3z` zRO1p~Xf({G+*-suXo+&{O%G_e&5!R14@~FvkVLKHJ z%m0;zoxp#=He3Xggz*S;)cX--O$^t=LPz&TKqni6&u)|OXG|g@&lIw38~9CJ1CxgS z1XR?!(PVvqBQ~Yk^8b;7jRoLltrzSZ>WdhX1M?XuWv%gsn`!c5JYqUoO4V>*O*+OS z(8;=e4Y$~&V?4@=AJfqW1v8VCOvGIVz!M4BAi6`--E#2PwSmIFZ0#6>m-WTou_X4H z$$ci|7K)V~v2Z5RxP{moPeyniA)Kiq`r-o>Tsi#50J5D zAwJq#peN=c>Spe6X&5CbBs6aN-jHxQQ#M(B%w&g)V9SSqs8R%w) z!=S7`L<*_~3xbtzK@bV1E+?Q*cdD>Hzj?vmnXuSqO5T-2s(x&_7BK zWIYKsndJ>$;Jzp5ZctdDVD{*Nf z%Uhsg$sydX-a?Sp44^@a2BJ-<<6^fwjra1!l3;O32>*ARI;Oq~k_MUs#Xtx_M17+? zx)QFEi6nwWI9~x1P1kk0wzlyON-@$|si@9o=JJX0%uq{J+?vPI`9ROw7cu_Gm2s#& zZ37i?WOP&7ZiyC%y6pzFz6Qe9Y*y8`l?ej2JdabXymZkE7U^9-OJKYMGDez5SC#oq zvsHo-X`-^K2b)|P(M1BBt0$urbrDf|!ZR}HG4gD-ylDuhBIz_N=Irpj%7TRnw~lCi z4P>oZR7CC!4TPhkWUv!mwI~@1F3^to6Rv0@ip5)M5Lagx3WAZO)d^1!HjaApPu zS?$ng;1o<`fMn+snxX(Di(s;g!1JA3bk#dr2>7-ZgS^!<&6JfJ&`5a#3$AusurE*Z zCHc}gJqbMZcN_S-#8l?kV3f|QV3C8z{-S6I6K_PC@mVwM6m)ZmQ(pzO)JNzISnE=` zZ*nUb{a_cPTsPtIUPCxC?sn^s1u;qE2QZZnBA_>?=8ddHh&SlB(DPiypxQ(}1 zOogmLLL^AP<^FPd0MqP#(Z2bTBq1=kIC{cWUOA8H>eV8dI?lP{O2C(vGN9FYc{KJaOm}7hgikJa4b%?2eS)h`oFX5sC zu_WK(NY@9UVqb?21vblYRvp1$b3h_R}FW-&C#WnJ0=-V>5%w%Gs)msAVG|cu8NZ zGh0nQAnymEIMr7lpd_PlC6v?`PQuTWR}Zo8BBn6#NfFPZkLHm(g73W2cY(%;Jl9H)1wn?(XZwdBk$>w=v+>{CzFyE#JPkbzqegl?)bKaYyy?GMnb7)dam zVDQcDz@)tq3Rs59abZEnTs+IuqRBY+9@Ak6xlrN4ZTib7GyNeuN|?l>SQD5PSv$FG zZo;u1E1=oE;oX(_v@o5@Os%jBAEILfD1@RXF*$aYo1){Tf65j_#h!XvE@N*nA9sEy zLqYT^oP!k~<+GugCy8-x7~?mV0I(n)4V9A(1d5y!VMnQa=2A7M*TIp~WAqkvtlWVk zkaKv-Q+-vREl6qwQCcGx2{=;HXAl!ZE#Qm$72!tAIK^wAV#NY2^>iYG*g?u{11PmL zBDS}Mqr}G}aoc^oTzImahcwiieG^#EAsffPFd4GqE=`$28@MS?xXLPHuHS_TbUg_? ziWRzh>Zvu=i?>vs%NN7DxS)&!x^$NqJGl|M)im=p{9J(eupS+JGja5ioM0$Y?dUtS z8)%9KP{j=RR1 zahsi%**4LlLwJCaeh#ztaf=l$S%=|X;@Xk##vk)fz4BKjPt~qn}X@@pq9q_S^2fZ24N7QOrnXTL*rJW z$snPcIrfU#9nxx`P@kD`DEj!%Oa^d2DW}{^ENK7@P97GJkt=rZ_=r z&qvz)II`hp#>>fzDvxjrs>T$jVu9`Sm<^7Ko>?JKF*9u7RNICMgGEKaqMxkd#dc8w z)BZr1D20W>u^=GgUzDhx1)9xBNJg8&(UQ}6_^#r)OP6Cj1D|i4SIt^e)d|?t4|BV6 zA6HH1Qn2YJO2($UDAfnvQm{Mf*c};Lxo(nz-a$w2!04)An`@^8DRYjaPodyV(C1As zF6EDk2l2_DGJLL^KD8!hS~w5oc2YVc4$898V3(A+x1#8~AK*M$9}6yNo=4`z?L$Z| zSVnCPI*=fgR^=IZG}pTj4Oi5mV`-9*^lNoi^}z!-SO(KR};SNAA*3+jUU} zH&O;|6rAz|VpVT7v}}(B1xt>1L2#RheV}Eyywh#|#Bh+{Ulgt0DzERCg-4IA+;kId z$`eqL=E%hg={Wjc3|k%)EU^$FWj9*<`3GV|dpGYOUCZGJIfTm57nuvJ~&@QVW!&9vl9eRJWQBHsnn?jqsE&_ikp zOV8UAv4)9z(0B?fL--$9s7acu#ucNfdIF9r>DU^ z(2O)Vl>77YY1pQi1wJN-8BM~PC*Kms*05R6hub4uYHz+o^)WEG0acwSwLoH1Yez*x zB=UY)>pM8L8o{EJ3pZ5DF|xK}s1vD=qQ(PB4U;b*$rNjk(h0K4POv4ompq~i(2Z5l zCUN!!lfIDarw6UC5ifjVMqsfpSf>!0Ng~2aQcn1^(Jq2Wc@q>E^Rqslji-Sb!Be+|#ue*rQexgF9J{Bg-hWtRNU3>_r(k8$QH3BliU#8j5P9zJ*qCE z#J)dVU`@V-OquvDSSD#8ni=+$RsMi=K$h5E8NxyRWps?DzqrCobDRAd?|_Pt1`Aje zQT865UfWawL{c?FDk8qr8OJz6mu%3i&ywYv7U;wrwkszv-)PsxMP~Wpt{@!m4<^uJ zBBvz__>K^aX+|O7Br_28!++lNl6fKoN1tN`h|U)>-7z7r+yPEj(SZ**H#IsPqPC4}n%6zM|fDD=OotkV8JpVua F`+xSMJh1=( literal 0 HcmV?d00001 diff --git a/substreams/substreams-trigger-filter/package.json b/substreams/substreams-trigger-filter/package.json new file mode 100644 index 00000000000..3815b847ded --- /dev/null +++ b/substreams/substreams-trigger-filter/package.json @@ -0,0 +1 @@ +{ "dependencies": { "@graphprotocol/graph-cli": "^0.52.0" } } \ No newline at end of file diff --git a/substreams/substreams-trigger-filter/proto/near.proto b/substreams/substreams-trigger-filter/proto/near.proto new file mode 100644 index 00000000000..22a0267669a --- /dev/null +++ b/substreams/substreams-trigger-filter/proto/near.proto @@ -0,0 +1,521 @@ +syntax = "proto3"; + +package sf.near.codec.v1; + +option go_package = "github.com/streamingfast/sf-near/pb/sf/near/codec/v1;pbcodec"; + +message Block { + string author = 1; + BlockHeader header = 2; + repeated ChunkHeader chunk_headers = 3; + repeated IndexerShard shards = 4; + repeated StateChangeWithCause state_changes = 5; +} + +// HeaderOnlyBlock is a standard [Block] structure where all other fields are +// removed so that hydrating that object from a [Block] bytes payload will +// drastically reduced allocated memory required to hold the full block. +// +// This can be used to unpack a [Block] when only the [BlockHeader] information +// is required and greatly reduced required memory. +message HeaderOnlyBlock { + BlockHeader header = 2; +} + +message StateChangeWithCause { + StateChangeValue value = 1; + StateChangeCause cause = 2; +} + +message StateChangeCause { + oneof cause { + NotWritableToDisk not_writable_to_disk = 1; + InitialState initial_state = 2; + TransactionProcessing transaction_processing = 3; + ActionReceiptProcessingStarted action_receipt_processing_started = 4; + ActionReceiptGasReward action_receipt_gas_reward = 5; + ReceiptProcessing receipt_processing = 6; + PostponedReceipt postponed_receipt = 7; + UpdatedDelayedReceipts updated_delayed_receipts = 8; + ValidatorAccountsUpdate validator_accounts_update = 9; + Migration migration = 10; + } + + message NotWritableToDisk {} + message InitialState {} + message TransactionProcessing {CryptoHash tx_hash = 1;} + message ActionReceiptProcessingStarted {CryptoHash receipt_hash = 1;} + message ActionReceiptGasReward {CryptoHash tx_hash = 1;} + message ReceiptProcessing {CryptoHash tx_hash = 1;} + message PostponedReceipt {CryptoHash tx_hash = 1;} + message UpdatedDelayedReceipts {} + message ValidatorAccountsUpdate {} + message Migration {} +} + +message StateChangeValue { + oneof value { + AccountUpdate account_update = 1; + AccountDeletion account_deletion = 2; + AccessKeyUpdate access_key_update = 3; + AccessKeyDeletion access_key_deletion = 4; + DataUpdate data_update = 5; + DataDeletion data_deletion = 6; + ContractCodeUpdate contract_code_update = 7; + ContractCodeDeletion contract_deletion = 8; + } + + message AccountUpdate {string account_id = 1; Account account = 2;} + message AccountDeletion {string account_id = 1;} + message AccessKeyUpdate { + string account_id = 1; + PublicKey public_key = 2; + AccessKey access_key = 3; + } + message AccessKeyDeletion { + string account_id = 1; + PublicKey public_key = 2; + } + message DataUpdate { + string account_id = 1; + bytes key = 2; + bytes value = 3; + } + message DataDeletion { + string account_id = 1; + bytes key = 2; + } + message ContractCodeUpdate { + string account_id = 1; + bytes code = 2; + } + message ContractCodeDeletion { + string account_id = 1; + } +} + +message Account { + BigInt amount = 1; + BigInt locked = 2; + CryptoHash code_hash = 3; + uint64 storage_usage = 4; +} + +message BlockHeader { + uint64 height = 1; + uint64 prev_height = 2; + CryptoHash epoch_id = 3; + CryptoHash next_epoch_id = 4; + CryptoHash hash = 5; + CryptoHash prev_hash = 6; + CryptoHash prev_state_root = 7; + CryptoHash chunk_receipts_root = 8; + CryptoHash chunk_headers_root = 9; + CryptoHash chunk_tx_root = 10; + CryptoHash outcome_root = 11; + uint64 chunks_included = 12; + CryptoHash challenges_root = 13; + uint64 timestamp = 14; + uint64 timestamp_nanosec = 15; + CryptoHash random_value = 16; + repeated ValidatorStake validator_proposals = 17; + repeated bool chunk_mask = 18; + BigInt gas_price = 19; + uint64 block_ordinal = 20; + BigInt total_supply = 21; + repeated SlashedValidator challenges_result = 22; + uint64 last_final_block_height = 23; + CryptoHash last_final_block = 24; + uint64 last_ds_final_block_height = 25; + CryptoHash last_ds_final_block = 26; + CryptoHash next_bp_hash = 27; + CryptoHash block_merkle_root = 28; + bytes epoch_sync_data_hash = 29; + repeated Signature approvals = 30; + Signature signature = 31; + uint32 latest_protocol_version = 32; +} + +message BigInt { + bytes bytes = 1; +} +message CryptoHash { + bytes bytes = 1; +} + +enum CurveKind { + ED25519 = 0; + SECP256K1 = 1; +} + +message Signature { + CurveKind type = 1; + bytes bytes = 2; +} + +message PublicKey { + CurveKind type = 1; + bytes bytes = 2; +} + +message ValidatorStake { + string account_id = 1; + PublicKey public_key = 2; + BigInt stake = 3; +} + +message SlashedValidator { + string account_id = 1; + bool is_double_sign = 2; +} + +message ChunkHeader { + bytes chunk_hash = 1; + bytes prev_block_hash = 2; + bytes outcome_root = 3; + bytes prev_state_root = 4; + bytes encoded_merkle_root = 5; + uint64 encoded_length = 6; + uint64 height_created = 7; + uint64 height_included = 8; + uint64 shard_id = 9; + uint64 gas_used = 10; + uint64 gas_limit = 11; + BigInt validator_reward = 12; + BigInt balance_burnt = 13; + bytes outgoing_receipts_root = 14; + bytes tx_root = 15; + repeated ValidatorStake validator_proposals = 16; + Signature signature = 17; +} + +message IndexerShard { + uint64 shard_id = 1; + IndexerChunk chunk = 2; + repeated IndexerExecutionOutcomeWithReceipt receipt_execution_outcomes = 3; +} + +message IndexerExecutionOutcomeWithReceipt { + ExecutionOutcomeWithId execution_outcome = 1; + Receipt receipt = 2; +} + +message IndexerChunk { + string author = 1; + ChunkHeader header = 2; + repeated IndexerTransactionWithOutcome transactions = 3; + repeated Receipt receipts = 4; +} + +message IndexerTransactionWithOutcome { + SignedTransaction transaction = 1; + IndexerExecutionOutcomeWithOptionalReceipt outcome = 2; +} + +message SignedTransaction { + string signer_id = 1; + PublicKey public_key = 2; + uint64 nonce = 3; + string receiver_id = 4; + repeated Action actions = 5; + Signature signature = 6; + CryptoHash hash = 7; +} + +message IndexerExecutionOutcomeWithOptionalReceipt { + ExecutionOutcomeWithId execution_outcome = 1; + Receipt receipt = 2; +} + +message Receipt { + string predecessor_id = 1; + string receiver_id = 2; + CryptoHash receipt_id = 3; + + oneof receipt { + ReceiptAction action = 10; + ReceiptData data = 11; + } +} + +message ReceiptData { + CryptoHash data_id = 1; + bytes data = 2; +} + +message ReceiptAction { + string signer_id = 1; + PublicKey signer_public_key = 2; + BigInt gas_price = 3; + repeated DataReceiver output_data_receivers = 4; + repeated CryptoHash input_data_ids = 5; + repeated Action actions = 6; +} + +message DataReceiver { + CryptoHash data_id = 1; + string receiver_id = 2; +} + +message ExecutionOutcomeWithId { + MerklePath proof = 1; + CryptoHash block_hash = 2; + CryptoHash id = 3; + ExecutionOutcome outcome = 4; +} + +message ExecutionOutcome { + repeated string logs = 1; + repeated CryptoHash receipt_ids = 2; + uint64 gas_burnt = 3; + BigInt tokens_burnt = 4; + string executor_id = 5; + oneof status { + UnknownExecutionStatus unknown = 20; + FailureExecutionStatus failure = 21; + SuccessValueExecutionStatus success_value = 22; + SuccessReceiptIdExecutionStatus success_receipt_id = 23; + } + ExecutionMetadata metadata = 6; +} + +enum ExecutionMetadata { + ExecutionMetadataV1 = 0; +} + +message SuccessValueExecutionStatus { + bytes value = 1; +} + +message SuccessReceiptIdExecutionStatus { + CryptoHash id = 1; +} + +message UnknownExecutionStatus {} +message FailureExecutionStatus { + oneof failure { + ActionError action_error = 1; + InvalidTxError invalid_tx_error = 2; + } +} + +message ActionError { + uint64 index = 1; + oneof kind { + AccountAlreadyExistsErrorKind account_already_exist = 21; + AccountDoesNotExistErrorKind account_does_not_exist = 22; + CreateAccountOnlyByRegistrarErrorKind create_account_only_by_registrar = 23; + CreateAccountNotAllowedErrorKind create_account_not_allowed = 24; + ActorNoPermissionErrorKind actor_no_permission =25; + DeleteKeyDoesNotExistErrorKind delete_key_does_not_exist = 26; + AddKeyAlreadyExistsErrorKind add_key_already_exists = 27; + DeleteAccountStakingErrorKind delete_account_staking = 28; + LackBalanceForStateErrorKind lack_balance_for_state = 29; + TriesToUnstakeErrorKind tries_to_unstake = 30; + TriesToStakeErrorKind tries_to_stake = 31; + InsufficientStakeErrorKind insufficient_stake = 32; + FunctionCallErrorKind function_call = 33; + NewReceiptValidationErrorKind new_receipt_validation = 34; + OnlyImplicitAccountCreationAllowedErrorKind only_implicit_account_creation_allowed = 35; + DeleteAccountWithLargeStateErrorKind delete_account_with_large_state = 36; + } +} + +message AccountAlreadyExistsErrorKind { + string account_id = 1; +} + +message AccountDoesNotExistErrorKind { + string account_id = 1; +} + +/// A top-level account ID can only be created by registrar. +message CreateAccountOnlyByRegistrarErrorKind{ + string account_id = 1; + string registrar_account_id = 2; + string predecessor_id = 3; +} + +message CreateAccountNotAllowedErrorKind{ + string account_id = 1; + string predecessor_id = 2; +} + +message ActorNoPermissionErrorKind{ + string account_id = 1; + string actor_id = 2; +} + +message DeleteKeyDoesNotExistErrorKind{ + string account_id = 1; + PublicKey public_key = 2; +} + +message AddKeyAlreadyExistsErrorKind{ + string account_id = 1; + PublicKey public_key = 2; +} + +message DeleteAccountStakingErrorKind{ + string account_id = 1; +} + +message LackBalanceForStateErrorKind{ + string account_id = 1; + BigInt balance = 2; +} + +message TriesToUnstakeErrorKind{ + string account_id = 1; +} + +message TriesToStakeErrorKind{ + string account_id = 1; + BigInt stake = 2; + BigInt locked = 3; + BigInt balance = 4; +} + +message InsufficientStakeErrorKind{ + string account_id = 1; + BigInt stake = 2; + BigInt minimum_stake = 3; +} + +message FunctionCallErrorKind { + FunctionCallErrorSer error = 1; +} + +enum FunctionCallErrorSer { //todo: add more detail? + CompilationError = 0; + LinkError = 1; + MethodResolveError = 2; + WasmTrap = 3; + WasmUnknownError = 4; + HostError = 5; + _EVMError = 6; + ExecutionError = 7; +} + +message NewReceiptValidationErrorKind { + ReceiptValidationError error = 1; +} + +enum ReceiptValidationError { //todo: add more detail? + InvalidPredecessorId = 0; + InvalidReceiverAccountId = 1; + InvalidSignerAccountId = 2; + InvalidDataReceiverId = 3; + ReturnedValueLengthExceeded = 4; + NumberInputDataDependenciesExceeded = 5; + ActionsValidationError = 6; +} + +message OnlyImplicitAccountCreationAllowedErrorKind{ + string account_id = 1; +} + +message DeleteAccountWithLargeStateErrorKind{ + string account_id = 1; +} + +enum InvalidTxError { //todo: add more detail? + InvalidAccessKeyError = 0; + InvalidSignerId = 1; + SignerDoesNotExist = 2; + InvalidNonce = 3; + NonceTooLarge = 4; + InvalidReceiverId = 5; + InvalidSignature = 6; + NotEnoughBalance = 7; + LackBalanceForState = 8; + CostOverflow = 9; + InvalidChain = 10; + Expired = 11; + ActionsValidation = 12; + TransactionSizeExceeded = 13; +} + +message MerklePath { + repeated MerklePathItem path = 1; +} + +message MerklePathItem { + CryptoHash hash = 1; + Direction direction = 2; +} + +enum Direction { + left = 0; + right = 1; +} + +message Action { + oneof action { + CreateAccountAction create_account = 1; + DeployContractAction deploy_contract = 2; + FunctionCallAction function_call = 3; + TransferAction transfer = 4; + StakeAction stake = 5; + AddKeyAction add_key = 6; + DeleteKeyAction delete_key = 7; + DeleteAccountAction delete_account = 8; + } +} + +message CreateAccountAction { +} + +message DeployContractAction { + bytes code = 1; +} + +message FunctionCallAction { + string method_name = 1; + bytes args = 2; + uint64 gas = 3; + BigInt deposit = 4; +} + +message TransferAction { + BigInt deposit = 1; +} + +message StakeAction { + BigInt stake = 1; + PublicKey public_key = 2; +} + +message AddKeyAction { + PublicKey public_key = 1; + AccessKey access_key = 2; +} + +message DeleteKeyAction { + PublicKey public_key = 1; +} + +message DeleteAccountAction { + string beneficiary_id = 1; +} + +message AccessKey { + uint64 nonce = 1; + AccessKeyPermission permission = 2; +} + +message AccessKeyPermission { + oneof permission { + FunctionCallPermission function_call = 1; + FullAccessPermission full_access = 2; + } +} + +message FunctionCallPermission { + BigInt allowance = 1; + string receiver_id = 2; + repeated string method_names = 3; +} + +message FullAccessPermission { +} diff --git a/substreams/substreams-trigger-filter/proto/receipts.proto b/substreams/substreams-trigger-filter/proto/receipts.proto new file mode 100755 index 00000000000..d7e4a822573 --- /dev/null +++ b/substreams/substreams-trigger-filter/proto/receipts.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +import "near.proto"; + +package receipts.v1; + +message BlockAndReceipts { + sf.near.codec.v1.Block block = 1; + repeated sf.near.codec.v1.ExecutionOutcomeWithId outcome = 2; + repeated sf.near.codec.v1.Receipt receipt = 3; +} + + + + diff --git a/substreams/substreams-trigger-filter/rust-toolchain.toml b/substreams/substreams-trigger-filter/rust-toolchain.toml new file mode 100755 index 00000000000..fde0e8fe57c --- /dev/null +++ b/substreams/substreams-trigger-filter/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +targets = [ "wasm32-unknown-unknown" ] diff --git a/substreams/substreams-trigger-filter/schema.graphql b/substreams/substreams-trigger-filter/schema.graphql new file mode 100644 index 00000000000..20e5f730423 --- /dev/null +++ b/substreams/substreams-trigger-filter/schema.graphql @@ -0,0 +1,4 @@ +type Block @entity { + id: Bytes! +} + diff --git a/substreams/substreams-trigger-filter/src/lib.rs b/substreams/substreams-trigger-filter/src/lib.rs new file mode 100755 index 00000000000..36bbc15f0a0 --- /dev/null +++ b/substreams/substreams-trigger-filter/src/lib.rs @@ -0,0 +1,157 @@ +#![allow(clippy::not_unsafe_ptr_arg_deref)] + +mod pb; + +use std::collections::HashSet; + +use pb::receipts::v1::BlockAndReceipts; +use substreams_entity_change::pb::entity::EntityChanges; +use substreams_near_core::pb::sf::near::r#type::v1::{ + execution_outcome, receipt::Receipt, Block, IndexerExecutionOutcomeWithReceipt, +}; + +fn status(outcome: &IndexerExecutionOutcomeWithReceipt) -> Option<&execution_outcome::Status> { + outcome + .execution_outcome + .as_ref() + .and_then(|o| o.outcome.as_ref()) + .and_then(|o| o.status.as_ref()) +} + +fn is_success(outcome: &IndexerExecutionOutcomeWithReceipt) -> bool { + status(outcome) + .map(|s| { + use execution_outcome::Status::*; + + match s { + Unknown(_) | Failure(_) => false, + SuccessValue(_) | SuccessReceiptId(_) => true, + } + }) + .unwrap_or(false) +} + +#[substreams::handlers::map] +fn near_filter(params: String, blk: Block) -> Result { + let mut blk = blk; + let filter = NearFilter::try_from(params.as_str())?; + let mut out = BlockAndReceipts::default(); + + blk.shards = blk + .shards + .into_iter() + .map(|shard| { + let mut shard = shard; + let receipt_execution_outcomes = shard + .receipt_execution_outcomes + .into_iter() + .filter(|outcome| { + if !is_success(&outcome) { + return false; + } + + let execution_outcome = match outcome.execution_outcome.as_ref() { + Some(eo) => eo, + None => return false, + }; + + let receipt = match outcome.receipt.as_ref() { + Some(receipt) => receipt, + None => return false, + }; + + if !matches!(receipt.receipt, Some(Receipt::Action(_))) { + return false; + } + + if !filter.matches(&receipt.receiver_id) { + return false; + } + + out.outcome.push(execution_outcome.clone()); + out.receipt.push(receipt.clone()); + true + }) + .collect(); + shard.receipt_execution_outcomes = receipt_execution_outcomes; + shard + }) + .collect(); + + out.block = Some(blk.clone()); + + Ok(out) +} + +#[substreams::handlers::map] +fn graph_out(blk: Block) -> Result { + let mut out = EntityChanges::default(); + + let hex = hex::encode(&blk.header.as_ref().unwrap().hash.as_ref().unwrap().bytes); + + out.push_change( + "Block", + &hex, + blk.header.unwrap().height, + substreams_entity_change::pb::entity::entity_change::Operation::Create, + ); + + Ok(out) +} + +#[derive(Debug, Default)] +pub struct NearFilter<'a> { + pub accounts: HashSet<&'a str>, + pub partial_accounts: HashSet<(Option<&'a str>, Option<&'a str>)>, +} + +impl<'a> NearFilter<'a> { + pub fn matches(&self, account: &str) -> bool { + let partial_match = self.partial_accounts.iter().any(|partial| match partial { + (Some(prefix), Some(suffix)) => { + account.starts_with(prefix) && account.ends_with(suffix) + } + (Some(prefix), None) => account.starts_with(prefix), + (None, Some(suffix)) => account.ends_with(suffix), + (None, None) => unreachable!(), + }); + + if !self.accounts.contains(&account) && !partial_match { + return false; + } + + true + } +} + +impl<'a> TryFrom<&'a str> for NearFilter<'a> { + type Error = anyhow::Error; + + fn try_from(params: &'a str) -> Result { + let mut accounts: HashSet<&str> = HashSet::default(); + let mut partial_accounts: HashSet<(Option<&str>, Option<&str>)> = HashSet::default(); + let mut lines = params.lines(); + let mut header = lines.next().unwrap().split(","); + let accs_len: usize = header.next().unwrap().parse().unwrap(); + let partials_len: usize = header.next().unwrap().parse().unwrap(); + + accounts.extend( + lines + .by_ref() + .take(accs_len) + .map(|line| line.split(",")) + .flatten(), + ); + partial_accounts.extend(lines.take(partials_len).map(|line| { + let mut parts = line.split(","); + let start = parts.next(); + let end = parts.next(); + (start, end) + })); + + Ok(NearFilter { + accounts, + partial_accounts, + }) + } +} diff --git a/substreams/substreams-trigger-filter/src/pb/mod.rs b/substreams/substreams-trigger-filter/src/pb/mod.rs new file mode 100755 index 00000000000..be6467ea7fd --- /dev/null +++ b/substreams/substreams-trigger-filter/src/pb/mod.rs @@ -0,0 +1,8 @@ +// @generated +pub mod receipts { + // @@protoc_insertion_point(attribute:receipts.v1) + pub mod v1 { + include!("receipts.v1.rs"); + // @@protoc_insertion_point(receipts.v1) + } +} diff --git a/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs b/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs new file mode 100644 index 00000000000..91e905a85b1 --- /dev/null +++ b/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs @@ -0,0 +1,16 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockAndReceipts { + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option< + ::substreams_near_core::pb::sf::near::r#type::v1::Block, + >, + #[prost(message, repeated, tag = "2")] + pub outcome: ::prost::alloc::vec::Vec< + ::substreams_near_core::pb::sf::near::r#type::v1::ExecutionOutcomeWithId, + >, + #[prost(message, repeated, tag = "3")] + pub receipt: ::prost::alloc::vec::Vec< + ::substreams_near_core::pb::sf::near::r#type::v1::Receipt, + >, +} diff --git a/substreams/substreams-trigger-filter/src/pb/sf.near.type.v1.rs b/substreams/substreams-trigger-filter/src/pb/sf.near.type.v1.rs new file mode 100644 index 00000000000..ed60d39b47e --- /dev/null +++ b/substreams/substreams-trigger-filter/src/pb/sf.near.type.v1.rs @@ -0,0 +1,1181 @@ +// @generated +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Block { + #[prost(string, tag="1")] + pub author: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub header: ::core::option::Option, + #[prost(message, repeated, tag="3")] + pub chunk_headers: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="4")] + pub shards: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="5")] + pub state_changes: ::prost::alloc::vec::Vec, +} +/// HeaderOnlyBlock is a standard \[Block\] structure where all other fields are +/// removed so that hydrating that object from a \[Block\] bytes payload will +/// drastically reduced allocated memory required to hold the full block. +/// +/// This can be used to unpack a \[Block\] when only the \[BlockHeader\] information +/// is required and greatly reduced required memory. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeaderOnlyBlock { + #[prost(message, optional, tag="2")] + pub header: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StateChangeWithCause { + #[prost(message, optional, tag="1")] + pub value: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub cause: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StateChangeCause { + #[prost(oneof="state_change_cause::Cause", tags="1, 2, 3, 4, 5, 6, 7, 8, 9, 10")] + pub cause: ::core::option::Option, +} +/// Nested message and enum types in `StateChangeCause`. +pub mod state_change_cause { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct NotWritableToDisk { + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct InitialState { + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct TransactionProcessing { + #[prost(message, optional, tag="1")] + pub tx_hash: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ActionReceiptProcessingStarted { + #[prost(message, optional, tag="1")] + pub receipt_hash: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ActionReceiptGasReward { + #[prost(message, optional, tag="1")] + pub tx_hash: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ReceiptProcessing { + #[prost(message, optional, tag="1")] + pub tx_hash: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct PostponedReceipt { + #[prost(message, optional, tag="1")] + pub tx_hash: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct UpdatedDelayedReceipts { + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ValidatorAccountsUpdate { + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct Migration { + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Cause { + #[prost(message, tag="1")] + NotWritableToDisk(NotWritableToDisk), + #[prost(message, tag="2")] + InitialState(InitialState), + #[prost(message, tag="3")] + TransactionProcessing(TransactionProcessing), + #[prost(message, tag="4")] + ActionReceiptProcessingStarted(ActionReceiptProcessingStarted), + #[prost(message, tag="5")] + ActionReceiptGasReward(ActionReceiptGasReward), + #[prost(message, tag="6")] + ReceiptProcessing(ReceiptProcessing), + #[prost(message, tag="7")] + PostponedReceipt(PostponedReceipt), + #[prost(message, tag="8")] + UpdatedDelayedReceipts(UpdatedDelayedReceipts), + #[prost(message, tag="9")] + ValidatorAccountsUpdate(ValidatorAccountsUpdate), + #[prost(message, tag="10")] + Migration(Migration), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StateChangeValue { + #[prost(oneof="state_change_value::Value", tags="1, 2, 3, 4, 5, 6, 7, 8")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `StateChangeValue`. +pub mod state_change_value { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountUpdate { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub account: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDeletion { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccessKeyUpdate { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub access_key: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccessKeyDeletion { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct DataUpdate { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(bytes="vec", tag="2")] + pub key: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="3")] + pub value: ::prost::alloc::vec::Vec, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct DataDeletion { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(bytes="vec", tag="2")] + pub key: ::prost::alloc::vec::Vec, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ContractCodeUpdate { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(bytes="vec", tag="2")] + pub code: ::prost::alloc::vec::Vec, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct ContractCodeDeletion { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(message, tag="1")] + AccountUpdate(AccountUpdate), + #[prost(message, tag="2")] + AccountDeletion(AccountDeletion), + #[prost(message, tag="3")] + AccessKeyUpdate(AccessKeyUpdate), + #[prost(message, tag="4")] + AccessKeyDeletion(AccessKeyDeletion), + #[prost(message, tag="5")] + DataUpdate(DataUpdate), + #[prost(message, tag="6")] + DataDeletion(DataDeletion), + #[prost(message, tag="7")] + ContractCodeUpdate(ContractCodeUpdate), + #[prost(message, tag="8")] + ContractDeletion(ContractCodeDeletion), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Account { + #[prost(message, optional, tag="1")] + pub amount: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub locked: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub code_hash: ::core::option::Option, + #[prost(uint64, tag="4")] + pub storage_usage: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeader { + #[prost(uint64, tag="1")] + pub height: u64, + #[prost(uint64, tag="2")] + pub prev_height: u64, + #[prost(message, optional, tag="3")] + pub epoch_id: ::core::option::Option, + #[prost(message, optional, tag="4")] + pub next_epoch_id: ::core::option::Option, + #[prost(message, optional, tag="5")] + pub hash: ::core::option::Option, + #[prost(message, optional, tag="6")] + pub prev_hash: ::core::option::Option, + #[prost(message, optional, tag="7")] + pub prev_state_root: ::core::option::Option, + #[prost(message, optional, tag="8")] + pub chunk_receipts_root: ::core::option::Option, + #[prost(message, optional, tag="9")] + pub chunk_headers_root: ::core::option::Option, + #[prost(message, optional, tag="10")] + pub chunk_tx_root: ::core::option::Option, + #[prost(message, optional, tag="11")] + pub outcome_root: ::core::option::Option, + #[prost(uint64, tag="12")] + pub chunks_included: u64, + #[prost(message, optional, tag="13")] + pub challenges_root: ::core::option::Option, + #[prost(uint64, tag="14")] + pub timestamp: u64, + #[prost(uint64, tag="15")] + pub timestamp_nanosec: u64, + #[prost(message, optional, tag="16")] + pub random_value: ::core::option::Option, + #[prost(message, repeated, tag="17")] + pub validator_proposals: ::prost::alloc::vec::Vec, + #[prost(bool, repeated, tag="18")] + pub chunk_mask: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="19")] + pub gas_price: ::core::option::Option, + #[prost(uint64, tag="20")] + pub block_ordinal: u64, + #[prost(message, optional, tag="21")] + pub total_supply: ::core::option::Option, + #[prost(message, repeated, tag="22")] + pub challenges_result: ::prost::alloc::vec::Vec, + #[prost(uint64, tag="23")] + pub last_final_block_height: u64, + #[prost(message, optional, tag="24")] + pub last_final_block: ::core::option::Option, + #[prost(uint64, tag="25")] + pub last_ds_final_block_height: u64, + #[prost(message, optional, tag="26")] + pub last_ds_final_block: ::core::option::Option, + #[prost(message, optional, tag="27")] + pub next_bp_hash: ::core::option::Option, + #[prost(message, optional, tag="28")] + pub block_merkle_root: ::core::option::Option, + #[prost(bytes="vec", tag="29")] + pub epoch_sync_data_hash: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="30")] + pub approvals: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="31")] + pub signature: ::core::option::Option, + #[prost(uint32, tag="32")] + pub latest_protocol_version: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BigInt { + #[prost(bytes="vec", tag="1")] + pub bytes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CryptoHash { + #[prost(bytes="vec", tag="1")] + pub bytes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Signature { + #[prost(enumeration="CurveKind", tag="1")] + pub r#type: i32, + #[prost(bytes="vec", tag="2")] + pub bytes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PublicKey { + #[prost(enumeration="CurveKind", tag="1")] + pub r#type: i32, + #[prost(bytes="vec", tag="2")] + pub bytes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorStake { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub stake: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SlashedValidator { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(bool, tag="2")] + pub is_double_sign: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChunkHeader { + #[prost(bytes="vec", tag="1")] + pub chunk_hash: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="2")] + pub prev_block_hash: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="3")] + pub outcome_root: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="4")] + pub prev_state_root: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="5")] + pub encoded_merkle_root: ::prost::alloc::vec::Vec, + #[prost(uint64, tag="6")] + pub encoded_length: u64, + #[prost(uint64, tag="7")] + pub height_created: u64, + #[prost(uint64, tag="8")] + pub height_included: u64, + #[prost(uint64, tag="9")] + pub shard_id: u64, + #[prost(uint64, tag="10")] + pub gas_used: u64, + #[prost(uint64, tag="11")] + pub gas_limit: u64, + #[prost(message, optional, tag="12")] + pub validator_reward: ::core::option::Option, + #[prost(message, optional, tag="13")] + pub balance_burnt: ::core::option::Option, + #[prost(bytes="vec", tag="14")] + pub outgoing_receipts_root: ::prost::alloc::vec::Vec, + #[prost(bytes="vec", tag="15")] + pub tx_root: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="16")] + pub validator_proposals: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="17")] + pub signature: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexerShard { + #[prost(uint64, tag="1")] + pub shard_id: u64, + #[prost(message, optional, tag="2")] + pub chunk: ::core::option::Option, + #[prost(message, repeated, tag="3")] + pub receipt_execution_outcomes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexerExecutionOutcomeWithReceipt { + #[prost(message, optional, tag="1")] + pub execution_outcome: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub receipt: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexerChunk { + #[prost(string, tag="1")] + pub author: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub header: ::core::option::Option, + #[prost(message, repeated, tag="3")] + pub transactions: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="4")] + pub receipts: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexerTransactionWithOutcome { + #[prost(message, optional, tag="1")] + pub transaction: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub outcome: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedTransaction { + #[prost(string, tag="1")] + pub signer_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, + #[prost(uint64, tag="3")] + pub nonce: u64, + #[prost(string, tag="4")] + pub receiver_id: ::prost::alloc::string::String, + #[prost(message, repeated, tag="5")] + pub actions: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="6")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag="7")] + pub hash: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexerExecutionOutcomeWithOptionalReceipt { + #[prost(message, optional, tag="1")] + pub execution_outcome: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub receipt: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Receipt { + #[prost(string, tag="1")] + pub predecessor_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub receiver_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="3")] + pub receipt_id: ::core::option::Option, + #[prost(oneof="receipt::Receipt", tags="10, 11")] + pub receipt: ::core::option::Option, +} +/// Nested message and enum types in `Receipt`. +pub mod receipt { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Receipt { + #[prost(message, tag="10")] + Action(super::ReceiptAction), + #[prost(message, tag="11")] + Data(super::ReceiptData), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReceiptData { + #[prost(message, optional, tag="1")] + pub data_id: ::core::option::Option, + #[prost(bytes="vec", tag="2")] + pub data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReceiptAction { + #[prost(string, tag="1")] + pub signer_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub signer_public_key: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub gas_price: ::core::option::Option, + #[prost(message, repeated, tag="4")] + pub output_data_receivers: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="5")] + pub input_data_ids: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="6")] + pub actions: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DataReceiver { + #[prost(message, optional, tag="1")] + pub data_id: ::core::option::Option, + #[prost(string, tag="2")] + pub receiver_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionOutcomeWithId { + #[prost(message, optional, tag="1")] + pub proof: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub block_hash: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub id: ::core::option::Option, + #[prost(message, optional, tag="4")] + pub outcome: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionOutcome { + #[prost(string, repeated, tag="1")] + pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(message, repeated, tag="2")] + pub receipt_ids: ::prost::alloc::vec::Vec, + #[prost(uint64, tag="3")] + pub gas_burnt: u64, + #[prost(message, optional, tag="4")] + pub tokens_burnt: ::core::option::Option, + #[prost(string, tag="5")] + pub executor_id: ::prost::alloc::string::String, + #[prost(enumeration="ExecutionMetadata", tag="6")] + pub metadata: i32, + #[prost(oneof="execution_outcome::Status", tags="20, 21, 22, 23")] + pub status: ::core::option::Option, +} +/// Nested message and enum types in `ExecutionOutcome`. +pub mod execution_outcome { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Status { + #[prost(message, tag="20")] + Unknown(super::UnknownExecutionStatus), + #[prost(message, tag="21")] + Failure(super::FailureExecutionStatus), + #[prost(message, tag="22")] + SuccessValue(super::SuccessValueExecutionStatus), + #[prost(message, tag="23")] + SuccessReceiptId(super::SuccessReceiptIdExecutionStatus), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SuccessValueExecutionStatus { + #[prost(bytes="vec", tag="1")] + pub value: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SuccessReceiptIdExecutionStatus { + #[prost(message, optional, tag="1")] + pub id: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnknownExecutionStatus { +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FailureExecutionStatus { + #[prost(oneof="failure_execution_status::Failure", tags="1, 2")] + pub failure: ::core::option::Option, +} +/// Nested message and enum types in `FailureExecutionStatus`. +pub mod failure_execution_status { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Failure { + #[prost(message, tag="1")] + ActionError(super::ActionError), + #[prost(enumeration="super::InvalidTxError", tag="2")] + InvalidTxError(i32), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActionError { + #[prost(uint64, tag="1")] + pub index: u64, + #[prost(oneof="action_error::Kind", tags="21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42")] + pub kind: ::core::option::Option, +} +/// Nested message and enum types in `ActionError`. +pub mod action_error { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Kind { + #[prost(message, tag="21")] + AccountAlreadyExist(super::AccountAlreadyExistsErrorKind), + #[prost(message, tag="22")] + AccountDoesNotExist(super::AccountDoesNotExistErrorKind), + #[prost(message, tag="23")] + CreateAccountOnlyByRegistrar(super::CreateAccountOnlyByRegistrarErrorKind), + #[prost(message, tag="24")] + CreateAccountNotAllowed(super::CreateAccountNotAllowedErrorKind), + #[prost(message, tag="25")] + ActorNoPermission(super::ActorNoPermissionErrorKind), + #[prost(message, tag="26")] + DeleteKeyDoesNotExist(super::DeleteKeyDoesNotExistErrorKind), + #[prost(message, tag="27")] + AddKeyAlreadyExists(super::AddKeyAlreadyExistsErrorKind), + #[prost(message, tag="28")] + DeleteAccountStaking(super::DeleteAccountStakingErrorKind), + #[prost(message, tag="29")] + LackBalanceForState(super::LackBalanceForStateErrorKind), + #[prost(message, tag="30")] + TriesToUnstake(super::TriesToUnstakeErrorKind), + #[prost(message, tag="31")] + TriesToStake(super::TriesToStakeErrorKind), + #[prost(message, tag="32")] + InsufficientStake(super::InsufficientStakeErrorKind), + #[prost(message, tag="33")] + FunctionCall(super::FunctionCallErrorKind), + #[prost(message, tag="34")] + NewReceiptValidation(super::NewReceiptValidationErrorKind), + #[prost(message, tag="35")] + OnlyImplicitAccountCreationAllowed(super::OnlyImplicitAccountCreationAllowedErrorKind), + #[prost(message, tag="36")] + DeleteAccountWithLargeState(super::DeleteAccountWithLargeStateErrorKind), + #[prost(message, tag="37")] + DelegateActionInvalidSignature(super::DelegateActionInvalidSignatureKind), + #[prost(message, tag="38")] + DelegateActionSenderDoesNotMatchTxReceiver(super::DelegateActionSenderDoesNotMatchTxReceiverKind), + #[prost(message, tag="39")] + DelegateActionExpired(super::DelegateActionExpiredKind), + #[prost(message, tag="40")] + DelegateActionAccessKeyError(super::DelegateActionAccessKeyErrorKind), + #[prost(message, tag="41")] + DelegateActionInvalidNonce(super::DelegateActionInvalidNonceKind), + #[prost(message, tag="42")] + DelegateActionNonceTooLarge(super::DelegateActionNonceTooLargeKind), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountAlreadyExistsErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountDoesNotExistErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +/// / A top-level account ID can only be created by registrar. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateAccountOnlyByRegistrarErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub registrar_account_id: ::prost::alloc::string::String, + #[prost(string, tag="3")] + pub predecessor_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateAccountNotAllowedErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub predecessor_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActorNoPermissionErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub actor_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteKeyDoesNotExistErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AddKeyAlreadyExistsErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteAccountStakingErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LackBalanceForStateErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub balance: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TriesToUnstakeErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TriesToStakeErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub stake: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub locked: ::core::option::Option, + #[prost(message, optional, tag="4")] + pub balance: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InsufficientStakeErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub stake: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub minimum_stake: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FunctionCallErrorKind { + #[prost(enumeration="FunctionCallErrorSer", tag="1")] + pub error: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NewReceiptValidationErrorKind { + #[prost(enumeration="ReceiptValidationError", tag="1")] + pub error: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OnlyImplicitAccountCreationAllowedErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteAccountWithLargeStateErrorKind { + #[prost(string, tag="1")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionInvalidSignatureKind { +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionSenderDoesNotMatchTxReceiverKind { + #[prost(string, tag="1")] + pub sender_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub receiver_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionExpiredKind { +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionAccessKeyErrorKind { + /// InvalidAccessKeyError + #[prost(enumeration="InvalidTxError", tag="1")] + pub error: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionInvalidNonceKind { + #[prost(uint64, tag="1")] + pub delegate_nonce: u64, + #[prost(uint64, tag="2")] + pub ak_nonce: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateActionNonceTooLargeKind { + #[prost(uint64, tag="1")] + pub delegate_nonce: u64, + #[prost(uint64, tag="2")] + pub upper_bound: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerklePath { + #[prost(message, repeated, tag="1")] + pub path: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerklePathItem { + #[prost(message, optional, tag="1")] + pub hash: ::core::option::Option, + #[prost(enumeration="Direction", tag="2")] + pub direction: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Action { + #[prost(oneof="action::Action", tags="1, 2, 3, 4, 5, 6, 7, 8, 9")] + pub action: ::core::option::Option, +} +/// Nested message and enum types in `Action`. +pub mod action { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Action { + #[prost(message, tag="1")] + CreateAccount(super::CreateAccountAction), + #[prost(message, tag="2")] + DeployContract(super::DeployContractAction), + #[prost(message, tag="3")] + FunctionCall(super::FunctionCallAction), + #[prost(message, tag="4")] + Transfer(super::TransferAction), + #[prost(message, tag="5")] + Stake(super::StakeAction), + #[prost(message, tag="6")] + AddKey(super::AddKeyAction), + #[prost(message, tag="7")] + DeleteKey(super::DeleteKeyAction), + #[prost(message, tag="8")] + DeleteAccount(super::DeleteAccountAction), + #[prost(message, tag="9")] + Delegate(super::SignedDelegateAction), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateAccountAction { +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeployContractAction { + #[prost(bytes="vec", tag="1")] + pub code: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FunctionCallAction { + #[prost(string, tag="1")] + pub method_name: ::prost::alloc::string::String, + #[prost(bytes="vec", tag="2")] + pub args: ::prost::alloc::vec::Vec, + #[prost(uint64, tag="3")] + pub gas: u64, + #[prost(message, optional, tag="4")] + pub deposit: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransferAction { + #[prost(message, optional, tag="1")] + pub deposit: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StakeAction { + #[prost(message, optional, tag="1")] + pub stake: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub public_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AddKeyAction { + #[prost(message, optional, tag="1")] + pub public_key: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub access_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteKeyAction { + #[prost(message, optional, tag="1")] + pub public_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteAccountAction { + #[prost(string, tag="1")] + pub beneficiary_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedDelegateAction { + #[prost(message, optional, tag="1")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub delegate_action: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegateAction { + #[prost(string, tag="1")] + pub sender_id: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub receiver_id: ::prost::alloc::string::String, + #[prost(message, repeated, tag="3")] + pub actions: ::prost::alloc::vec::Vec, + #[prost(uint64, tag="4")] + pub nonce: u64, + #[prost(uint64, tag="5")] + pub max_block_height: u64, + #[prost(message, optional, tag="6")] + pub public_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccessKey { + #[prost(uint64, tag="1")] + pub nonce: u64, + #[prost(message, optional, tag="2")] + pub permission: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccessKeyPermission { + #[prost(oneof="access_key_permission::Permission", tags="1, 2")] + pub permission: ::core::option::Option, +} +/// Nested message and enum types in `AccessKeyPermission`. +pub mod access_key_permission { + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Permission { + #[prost(message, tag="1")] + FunctionCall(super::FunctionCallPermission), + #[prost(message, tag="2")] + FullAccess(super::FullAccessPermission), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FunctionCallPermission { + #[prost(message, optional, tag="1")] + pub allowance: ::core::option::Option, + #[prost(string, tag="2")] + pub receiver_id: ::prost::alloc::string::String, + #[prost(string, repeated, tag="3")] + pub method_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FullAccessPermission { +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CurveKind { + Ed25519 = 0, + Secp256k1 = 1, +} +impl CurveKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CurveKind::Ed25519 => "ED25519", + CurveKind::Secp256k1 => "SECP256K1", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ED25519" => Some(Self::Ed25519), + "SECP256K1" => Some(Self::Secp256k1), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ExecutionMetadata { + V1 = 0, +} +impl ExecutionMetadata { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ExecutionMetadata::V1 => "ExecutionMetadataV1", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ExecutionMetadataV1" => Some(Self::V1), + _ => None, + } + } +} +/// todo: add more detail? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FunctionCallErrorSer { + CompilationError = 0, + LinkError = 1, + MethodResolveError = 2, + WasmTrap = 3, + WasmUnknownError = 4, + HostError = 5, + EvmError = 6, + ExecutionError = 7, +} +impl FunctionCallErrorSer { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FunctionCallErrorSer::CompilationError => "CompilationError", + FunctionCallErrorSer::LinkError => "LinkError", + FunctionCallErrorSer::MethodResolveError => "MethodResolveError", + FunctionCallErrorSer::WasmTrap => "WasmTrap", + FunctionCallErrorSer::WasmUnknownError => "WasmUnknownError", + FunctionCallErrorSer::HostError => "HostError", + FunctionCallErrorSer::EvmError => "_EVMError", + FunctionCallErrorSer::ExecutionError => "ExecutionError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CompilationError" => Some(Self::CompilationError), + "LinkError" => Some(Self::LinkError), + "MethodResolveError" => Some(Self::MethodResolveError), + "WasmTrap" => Some(Self::WasmTrap), + "WasmUnknownError" => Some(Self::WasmUnknownError), + "HostError" => Some(Self::HostError), + "_EVMError" => Some(Self::EvmError), + "ExecutionError" => Some(Self::ExecutionError), + _ => None, + } + } +} +/// todo: add more detail? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ReceiptValidationError { + InvalidPredecessorId = 0, + InvalidReceiverAccountId = 1, + InvalidSignerAccountId = 2, + InvalidDataReceiverId = 3, + ReturnedValueLengthExceeded = 4, + NumberInputDataDependenciesExceeded = 5, + ActionsValidationError = 6, +} +impl ReceiptValidationError { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ReceiptValidationError::InvalidPredecessorId => "InvalidPredecessorId", + ReceiptValidationError::InvalidReceiverAccountId => "InvalidReceiverAccountId", + ReceiptValidationError::InvalidSignerAccountId => "InvalidSignerAccountId", + ReceiptValidationError::InvalidDataReceiverId => "InvalidDataReceiverId", + ReceiptValidationError::ReturnedValueLengthExceeded => "ReturnedValueLengthExceeded", + ReceiptValidationError::NumberInputDataDependenciesExceeded => "NumberInputDataDependenciesExceeded", + ReceiptValidationError::ActionsValidationError => "ActionsValidationError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "InvalidPredecessorId" => Some(Self::InvalidPredecessorId), + "InvalidReceiverAccountId" => Some(Self::InvalidReceiverAccountId), + "InvalidSignerAccountId" => Some(Self::InvalidSignerAccountId), + "InvalidDataReceiverId" => Some(Self::InvalidDataReceiverId), + "ReturnedValueLengthExceeded" => Some(Self::ReturnedValueLengthExceeded), + "NumberInputDataDependenciesExceeded" => Some(Self::NumberInputDataDependenciesExceeded), + "ActionsValidationError" => Some(Self::ActionsValidationError), + _ => None, + } + } +} +/// todo: add more detail? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum InvalidTxError { + InvalidAccessKeyError = 0, + InvalidSignerId = 1, + SignerDoesNotExist = 2, + InvalidNonce = 3, + NonceTooLarge = 4, + InvalidReceiverId = 5, + InvalidSignature = 6, + NotEnoughBalance = 7, + LackBalanceForState = 8, + CostOverflow = 9, + InvalidChain = 10, + Expired = 11, + ActionsValidation = 12, + TransactionSizeExceeded = 13, +} +impl InvalidTxError { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + InvalidTxError::InvalidAccessKeyError => "InvalidAccessKeyError", + InvalidTxError::InvalidSignerId => "InvalidSignerId", + InvalidTxError::SignerDoesNotExist => "SignerDoesNotExist", + InvalidTxError::InvalidNonce => "InvalidNonce", + InvalidTxError::NonceTooLarge => "NonceTooLarge", + InvalidTxError::InvalidReceiverId => "InvalidReceiverId", + InvalidTxError::InvalidSignature => "InvalidSignature", + InvalidTxError::NotEnoughBalance => "NotEnoughBalance", + InvalidTxError::LackBalanceForState => "LackBalanceForState", + InvalidTxError::CostOverflow => "CostOverflow", + InvalidTxError::InvalidChain => "InvalidChain", + InvalidTxError::Expired => "Expired", + InvalidTxError::ActionsValidation => "ActionsValidation", + InvalidTxError::TransactionSizeExceeded => "TransactionSizeExceeded", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "InvalidAccessKeyError" => Some(Self::InvalidAccessKeyError), + "InvalidSignerId" => Some(Self::InvalidSignerId), + "SignerDoesNotExist" => Some(Self::SignerDoesNotExist), + "InvalidNonce" => Some(Self::InvalidNonce), + "NonceTooLarge" => Some(Self::NonceTooLarge), + "InvalidReceiverId" => Some(Self::InvalidReceiverId), + "InvalidSignature" => Some(Self::InvalidSignature), + "NotEnoughBalance" => Some(Self::NotEnoughBalance), + "LackBalanceForState" => Some(Self::LackBalanceForState), + "CostOverflow" => Some(Self::CostOverflow), + "InvalidChain" => Some(Self::InvalidChain), + "Expired" => Some(Self::Expired), + "ActionsValidation" => Some(Self::ActionsValidation), + "TransactionSizeExceeded" => Some(Self::TransactionSizeExceeded), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Direction { + Left = 0, + Right = 1, +} +impl Direction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Direction::Left => "left", + Direction::Right => "right", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "left" => Some(Self::Left), + "right" => Some(Self::Right), + _ => None, + } + } +} +// @@protoc_insertion_point(module) diff --git a/substreams/substreams-trigger-filter/subgraph.yaml b/substreams/substreams-trigger-filter/subgraph.yaml new file mode 100644 index 00000000000..88bf9ebcd1e --- /dev/null +++ b/substreams/substreams-trigger-filter/subgraph.yaml @@ -0,0 +1,16 @@ +specVersion: 0.0.5 +description: NEAR Blocks Indexing +repository: git@github.com:streamingfast/graph-node-dev.git +schema: + file: ./schema.graphql +dataSources: + - kind: substreams + name: hello-world + network: near-mainnet + source: + package: + moduleName: graph_out + file: substreams-near-hello-world-v0.1.0.spkg + mapping: + kind: substreams/graph-entities + apiVersion: 0.0.7 diff --git a/substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg b/substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg new file mode 100644 index 0000000000000000000000000000000000000000..c9e8566e8f91d7557bcf02f5d5e0664c0d725960 GIT binary patch literal 497306 zcmd?Sdvs-2ednor?nBa%s!DaMY?tNNvHYlqRVr86U|f!It0YxfMpaU-R5m7q#K3n1Fs+(^=vF%`RBw)ZjUbn2dDM9NKnXCr zYCZ1RxE@vSi1+#`46${2t+~WV%SM1c_hKx4UWHUkt7{wU?E@7f?~PYPXk685zIOGV zxYFEM=gEOi8^}#pY&U7uYQB2)mU!3F>eBjBeR+0nxjFyjfiBvYYQ6s9DLv~y5_ft6 zR&R=XTt~CHiHzN&8K$=XUq^AbDm(hj#Je`u7V7Jb*|p~K()?wc`Q0auT@k&s>Cky= zH)YLgy$*^iRd0ynr|Qca4Tpocrn}Z{PlUq2Ckt_JzIFAQc;ES{(czh~*{R8i@sUSo z&!3wfo9XMQUKQ`%{6dcl5ap(Ak7vhE&z_r{nH_uW`1H(lU$njL@aSk?ar@)5JLp@W%$%L zgQ<$y8o4ryyVh=%s6q96;&Og*-1=hu`sLvo3GKCd3%O*A;>FRPvdTi+HS51VuDGG> zBsThQkBeHx@S~GrvpM+?yGQ4c68+B<;H5<4+ zE~!-Qj?00+I^*IQ<0vHA=-9-}FpSfGV_b1#t?rM@#xV)srEdsdJx)T9T zRpOH5(5cg}8@;f!esN=NXui2}%uz!1+FZ6fJoN4x;!3sD z(NQnNU2&neTI}ek>Q_`PMjhAcS68)I>iE96JC3?~+|M|UN*&cwrK7tL$1!gU)ncV< zPuvriN*z&0wb&Wm8283y6-p|_p;V#Nvp1Az)>*wal!}zDzrh3HRabPAe^mfTR|=#6 zkggO+K{c!Wp(a4OZn!Z%4TgnkskdWKq5ouZ%C@GY)mUpa+KA(J(r&Ial6j_$|E#Pn zEjJdD^=7if8z-BS#b#?|h`}*B_0Ze3+Yl>gbi1Oebw{DBQdcNZaaZ@QPy&@*eS3W~ zUhR(d`&R{;?T$J_N$;SZ2v&#+G}~R>N6%3ys+RU@VuVFRt-T(LNLW-o78C$i^;l4% zV%1|o32;@g=w)i6E+%bXbam0XqD0MoUA=~3B>4MFZZar9}Ydu>;QHf^Prp=h|;`S}RMd zjdrrm>&1=L`SqpdYCEYvRbN`J&n-8!l2$g>+5N4C?Im$?XnE;LptT$8hYmkJXg_9` z7H3zR>$8ogm)h%X9w*HfFE&=JMq_<`Xy|aVv>Nkxq0ti9#%g<`C9w7NMBByka$`AZ zt~FY9j{`5)n(g+I(5SC2q}bBI(tI2z)Ah?qJu#G$`Hhv0Ww2}~Pc|;Mlf_nZB{5|v zxl?@|KRak3sNO(RU2v`9@6hb{S+zTHb|`t|Vq-PA)JQJXSJ(Ayqb;PD+OhAt4!OW> zt%0hzI)7QxMh}{oR-GVE+)g%VFqW2=la)s6f{`;$)-N?36kyEHLmH&M`Vf2uFLevS z!eRS`QSsBhadh>v1EW}T>I8^FSUrSOArHsXEz3;gydr$KrR_AWV zJK7cQEWNrM-rLS_Cu@06+y$c+-0pC^a%0>VcZUanJYKmr{{)$GoSop2v=LSAs$AcH zI+ozHZVHoTL~jYRQ1o5)iHse2&LQz=qN(|bXx z@uEo4(Mv;Wxlw=0n3}nx(bCGsN@B=u!cFr2-c7i%Ul#A(gd5y@-%G=-{`B7ixBAmp zz^x2!_ie(h3~u*<+y9)>jfHuC<);3X7o}T$O>*L;%qC)L9)Y%sG?Yw$d~=s2-qa_hw~!AKQOb+MMF)9P&k)pUxq#RdG6pyi}CaAn$3Tt$V!m znv^!Sd5`y2;}bUbe*1o?a`nK?N#p60$pHSB29wolY^)5)yvAcQWES{9Sc}#MW_}1P z-0HL^;PKF|z4<3#@(=?Fn6T4Nl+Ioi;5SzhlM6fug$s_-6x^D+YO?kvB^~Bh7<%380A#1 z8=lZ;D%TB9=w^y;GB8y6-pc+Q7%|NERxYO8^>%{{GZ#wX1c0zHSWnC0eq%w*XCz1;S4;PZ%VcEU45IX39s{AJ)57=^!)D4 zz4GYcu6>)TiJT8(667(_rt$DztsE(e5v*BywDf)16o_{pg%<W)s!wgfp)LEN}8*5RXTYrdOU~%`{etr; zbk{<$dSe`CS|85GA|9;V6FCjnHs(;_W^>hIs+DVf?!{=TG*%DBdoDHVPv)!kSk>Kn zoU1qz_pUVBZSZlPvy%B3lWn8VXyr!U<6FtQRvnJJ8><^D4no-=Y^UECHMTvyqvpxgUhw{ntmVU194oNBe*u3;!w zug1A|d3C-1^nqS;WBSs-$1CxkEtv1fV)w=6MUB$C<6j~TT${~AbTjc10py4{9QR-Y zt~VAOM~Ii@iQh_xErY4WuF-aua!mPmWvSQ3ds2RwZPiy)6AjpicDYknC%*_qWQRb}fKyRG3U?W*@?tQI`BE(7dN_h<9%$k9?Nx^?GaFe3s>! zD^&a9BC4r*PRlj@tp1~Hz(rdcw{JstGvuw~?!PDAwP^q^wd;X5##ikKy87C9|Hdle zmqrW!pE%a7{saGy6Cm4p?tH$sL$J1Q6A1V4NO#v4I{W@Hf6>5iR^t6T37#F7FUudH zIM--d0IFQ`C4_7l7fNH$A?gEry4&{KJP8xv@BsttwpD z#LXl<%Qd&q#w~0{_fNE&tARBIp-T^}`+G9xQoO3?#0@(SZ!1q}t906gpZfapfxQ6e zs_k}j_tweiOJy^;y|H1es1QKcM|{# z4bHmmQ6zO2xifv;Da^`;<%7G3O0yBIrn{%qgwDmIh#IDJ}ZQDe`R zM_}5QLYa0@_HKGWjq1QiTsAg@-FHt6pBkH(ot&B(pFB4_0qxRfwXyF#KVBOfg?_nb zQ)9&F2%-(l$NP5@s5>s994{ymxDziJtgXB-@VO}7w}YOtVTLm;*yS$+LB;qUgxSMG?xmd=L+a+3_Lr855tn}9aWt((Ao@!oBK zUa~NLQz1TZCF1Nz>P#eRIx~@YU$*8Q1#jMw1U3m<$vE(VLVWekD&>wiygII|V2L+_ zTD5)eI%}`D(7Wba<#&6B>rLBm_1J8~^sP~R)sDb-M3hLl#R@k?!tIF(uPxDf;ewr! zMeDJHf>P_Fv^Q=;)FHDK&+46V-~93tF|^q<w#U7mw&WDtP-F*MIHSs?k@V=hINY_w+Qm+S5J zsuD-EO?20)dPhvP(HaSFs;`LmSqowS>q~3PjaeCn?E`Thv%RWu)>jZlZPdaWtl5Re zs-g~!*~a&8Fz#7`4YL>P?TZKY3&5%9K%sW6DxUH6#;n@#>cWFms-BFmHYm^#63xzE zY|KA7yRp9bssq>M`!@`s3w1_Rw)_tKF$P2ON=Vxuq*ru8G;c-oJKnZF{>T`F2&RY;Aa( zS>~;V{HF^CuE|%If$m>ahhYEtSv;%+m}cuOezXtVWR*+ntqrV*`S~$>Ic)E$N8-KB zxhLiwN6rFfap~y;x6+`ywns0FAE)eb^)Rs87wfGxQ-0b^M&rP3uBv-39mpPQMYJz1 zEv^TM2d!F}xDwJISV>*V#+ zbF~Mpw#bgiu$NgY@N8|&T}~P6sKZmw0&Bf&%K5;_xHr%DO3*OQMsOo8zH+wx z4Ew6p&Fm|IpEk_1FWNO3KJYIYX_hk9RqNt`WupBLF_WNXsjtj`%Y7C8Tj}fMzg>OZ z{I|O=9{7n;+-oL|CXS%845E2iA3lNXmK~%VUJzeMOy*+>S)L5-A+Pw5&7Lr$c^P2q2 zH^f}m1OG=M?#XH9GF-_vXkMDFFE1hU9V^B5Xjo5P>eRh#;x320TV4b2FT{PR3LX~J zWS1U+Yd4{0C-;qyj-8#FoEbZ3`KzxvH~Gl9T3-&lReSl?#K2VC zw-qTR*6x57D|=tO=iDT_DqHK=>CgX0XAYp0IcEnvB_EmX%e}Q8 z_smtne^tM5p`~sEAZOX^P9Iyv{)ghOK!+-u{VFs;`7*a{gwENe2O^R*$fY)!S0R=X zB@ynho~3r`LLK-3D$nLH2G&m(ncD}+bw|DFd7-&XC}pVGx{w8GrC_yl;o7yizb+J> zkBVoePJKA)KVxCtA%jHFcBs}^Ty9uC;Y0uVccZwoT0YqEp3Xx2;XRg`e6XW>;x~Jh zFtgsAZ!RaNHWn8ft#)!WaexlC6B#GuyI|_iUsPPj5_#+EaWc|eyWCp3aB)33ap#?{ z@(q&l)%l?$879Nd-nGdQHgCkjP@G&`UtepVJO=UzVkH^212rIw8zD#>1#cm7jO@Z% zV}V4&*3#UDv{%a>ROTP)H*Ny#NiM;d#IQ@`a4YRm|KhxemHCIra%+sdF(ouA-*O$! zN^n*y`;A;>B|uvGWm8HC3{D>N>H;FnGBH~X;561xf|CA@Y(cD@G#3L#ie!?lNm3f2 zkvfA6Ir8I;@^2|CNsA|SyS z7dGZy>ZsvC#$`a{AB0D&c@UBdO)@``8@4VrCC8wDlQ*pd!wh8|e@;efn;>4sNq%nA zL7i(baY{}Fu!O4!#;6{f9!$p1jZB;$ zC6F#`S zRI9O?u+I-OIW-2B!vulU6E_Z29*68fp{P|F-VT_)Y!;4zn}-I8yG!0=z&fX z{-72wlhNU`1Z<|0LoY@ZCT8S(ZS1VDh7{B1PfgE^&zzqbOU_JAjvBG2$7&CckBm*< zmrP8G{HK%i(_Kzd?-HgpQB`W19{szNrLR^dUN$3`5g?wo*a6OJxU(NEmJ>?@H$o3%0!+w z*&{efvb=o*!h5rH0daPj>eKbrCs@&EE;d%`m+I?-$wQ6B#bmTmUv+7~CTZHX2$31y zH)J`Qww~pNW}V9$giw&^Ab@Je@+8_9n;VE)1er^8N>m$IEC6)JrG@Y4V9RbEr&6T8dfCfFrMbvsr5){1W9Y(AT8HDOmVB;bGfS8| zt@ZX{#^nwV9qu^b4-ehZF&YjJ>8CqUG}tj{56KnNPxtU>$8meei6i}V4~IJLv4^*j zY0gjg@L0#q_VCtfQ9s?oJ3DT&hc{J=`sp6t-En>VLmXY|t`_;(zwT+9rH;9c2SPQy zlN|Ai+(e8*u{j*V+QRYBdd1}EoY)~w>dP14hxLmqNag10!S&=)v-M=MupwuCGS_Ub zqX>|>O9_%Z;SNl_s^esV?ClvQ6_M_A7+6l{k6JceE=03!Ztxu1mcjlk=wb>t|FL5L z(zkUSqAgai!$L~fdsUY^G-HQF?u`!FDHC@f>)!M%mGW}$?MV-7(d^!vZV5F>#Jl(Q zgYhwUmg@eF*A@D2fsq|+Dg|G-FOyM`XFHF9(i-pYx-Pz#1VPI(eRXu*z@VXP`{W=b zg(=z1#EdQIl~YQH9W~sMF`ANJ{%VgeCDKD*?azKuqLWvn3cHh2%1L}Rhg^a5Uj=>w{d& zQ_;=+2NT&`w!7PU^ov34!&1{Gt;cC^5xt#Cd(+vZQ)zEX={!|+CzT2ujG}Z?5`VAt zMqxBM)IW-1LGFa>Ppse)1G7#&4qn+w^97yWR+nm*qEZBv(G(RO!y5HRp$eSVptN_u z6IVITqqiN5-(aZF>`Zh!x$9d{kyrAiA%e`p5$;u`qo%M+vaT#$aA$dtx67A@lF_Dg zKIuj&&L!(SOr6evok?-lnSe8U6OXgbfStMJw)mBXGZi0-2K(7&(Dv7xYe&s5vq`#| zg1B|MbX@GAltwx(_E3*IWmbrk=l{@k2Yr8K`g`ce9d_=kpq%s9L`VIr3P4_y0@2~I z*Z5UOMS#5KCbvE*0pvAz40<3O9-D}UJdjR+Or$_M0W#5p9dE}5b(Cyk;BY7bWMc5B z2ZA^~n*!+q$k`M~7eLPTxO0|;s0$!xlK`X(AZJq`$j`~hiH1Vd4UowcNH;(xIc?@J z=?2K;HBO{Z6CjhEFZzpuO+rj!b9Ai#zZMXJMrgeELA0@e%Hh1j zDcLUY(W4yxkq+IiFE-fQT8$N1FShR3$=UK)e(EMD$b{JJ%u8#7w<{?2;S3u)=sLUJ zqTh8T=d?|cLV1J>`?0KJASaxf8D07 z!0R!I?0q^2CstqIb&WBhW%@ss9)pxnf6Vvna{eFlJ-dAW$GlK?x&DtK)ZJN1ka|3F zv?8M)Adja&l=1(#=VX`d|9Ennq3@1|JkCif2L!2I_sfB!ApohTK(uJi|Ao>G1HH0Fmp9t%x6F=9wRgI} zg;F?`Lm3vhK%Rg49$**tg;PAdr{fD8ya>Q~eX(>Mz{j564qVf7F$Y}R>ct#zZLAmf zg+n?WcC7d!GaZh&GEGmEj-|M0qMpcs(^NgNYan|MxF>Eol9j>r309JubNXIR=Uftf zIh`&^^ku&sISve%<$fmuDFI|TopUO#M#ub2N}{i(KqS#uQ^t`*Urhp^N}{i(m>|*D zqQDN4=xZquN%XZ86G`;7ekb3fs1qP-x84?iSaMWMRoaN2F82RaA+d}uiG|?0O4jQa z5|>`u9>VqG99sFOCN8zFqV<~FLE_7VBr>?gQ6TRXPB0xVAWFRBq%Hr3U+6i&z7itX z97PGzx3yef^I`^E8OTO_jprIu2A{eDOFDJ$sdSp1!uM3lHBRMwDrHH^(B-L=CGGs` zr7N%`FCq7(9B{GZr5tc!dMRZ|r|n%zg%8{48j!C!BQGHhTG5 zrxj2W3@@V<{D{q5#6J4Q=(hfiY+~H*B^4(D3jj8z6_Wee}oo<-gzTY@-E2~ISr~E@5Zz;Iz2qe~i zsOyG!5_Z%PpVAK(qLcl@&bE;$f(50ELBggE6JdHa8*Q0NSfR}etK=gkI`uY`OgFsG z5uVuvc>!25h-r~4Dt@?tIP`B}{~s>gbXO>H{^*Abue>*Y9rq-l#FpM%h_3BFn~YeZ zTH7ouGotZn;_1Z3cZ!BFcFOFgznj*OvUnaZ3EN_sfxfxGVKl>|jIdQv=MF)-+`)Ob zHy5rt5Z?v0O4Z6+3mt!2DD>a9X`{?^5vH=9E~gk07crHprMDKkbb_wLUq`j_wnFrw zLeV;)%Gk{5c;`3t|gl#bp7_Hio0Yh53=EN4On_&~r8z-4ODw{9s`IcNa=JZ|~-k?Mx5)vIn$ycVVB7?y z*@8!_yIOj`IQuhL3iS&|7aGeXFiN{k1DaO;;cB&M!*i09` zYbbfJd5MTlYtXsz?jkNAFGd;>z@<(k277@kBd`<^qTx~tbFr8uaKSoO#+Q#TsYe|0 zET;9uz4Y5DH$7~uKDo-x4qk%icoR{pL1xq04H@2F*t;)2>ZuS{OV1TbSL4N)XsljX zzqm6UQo>_PsD3tgF#w+{?7J$y#RKfAmYx?`_p#(IX6&zKyJ^7#ogQ|)y_*(4SJ*EN(zV!AE&Y7H#U5JlAZxLQ7C)~R2YrjaYUu-vW8fMI znD!>Q0o2}V>4Pr`)LuYQGXu33P#<)(Lb|fXE5A^PKAH-B z3D;jJboS!c(lAkZs-=IyF#G$jDD;E)Gj&Oo!(!zDo&AdfCwCpu&~d7zk7Pun>>gw- zxCx3HxfYavG2cQUc#yS_i2X&i(4JWmzQ0t!^wTCu3F%4E?d%khSie+2`|xjp^-Be; zKVL*R|55=B%t7Mi#|qrz<6hFGiUAVJ97S9~{;gH!Dj#12(#J4bJtQ#vp#bNnKzd{?@hu})vbGkTr5&dVrRLi=RTGg}v*^!J&hLQVeOlxUsx^`KYqwEm;x@P134$X{k-z zdE|~ej@tb}ZDPT!%s6T3zDpyN88G;e36i)Vw7N|WEe)Yqy6aY~cRQ=8On8b@B!p3mTJd@BvG!hm(~#S!Fg6mD+N2-45C{Zfo=qY3?MIHWd7DbVd6U<%`^4%JJXqcN}&O(?sTegkrhHU z+RbIfjQv2I5l3PQ8akU(%C9NzSjR#j;9fR-(giAo;zp74mOZ;kN;$ z+7F1~2kguV9xbDl%b23VrhfeS!^FDL8#-wxhol}5%BZh7(kR|_seYL$G*EVL$`(K) zX?=%QVxDAo!^*4gPmZ5>mGPqon$inl;nkp1Q~jcfbe_cX-R~-L<9f&8TO;MND)hcq!@c`2bp7w0;uHw2WwKhO~VTOoO9 znwWAXA++k{nH@5#(TM7$rUED1B4kAH%{3d#XVFSHZ6k_+`#^eP6s9m2vX-eyC4;Y( zqcPbl$YDJx)alea36P<@v{_nLB2hzWC@$5)_|-u1!te(_$c*x&n|9D3?Ut{31i|tM z;lY$}cnLK4K)5al_XpYBOAY>mOb*~Epm?LA7m@>Lz<bn+kO*Q0=&T zy~*xDlpULur%e=e2QP64&a@_&3*9^D6nRaehvJJ?|ez8Ha z6P%jbdteOUk74t^0TB4P$oI?$A1_MfgP>#Qo3k^lc`G1)KX&0`C!?-^}ryi*` zVq7^^em%Qn07W8bv3%K~wBQ0`7yR)u?2pUI@YMKDzye9_7Ckt+zDSZ2@$2Pe0DEQG zk5nFjO}o6!2f}pgtOX~|yo?>)vPa~FR0m=y^NGv3BlV=cB#u~Zt{z3OtKLGo&pNpD zkGK^BFh}XErN)_7l-Xa(+FDRLoELg&S4z@c;_|;mTxNEAk}Xn`43~(l6Z;60U^@lk z9?@=6&*w!@6`jMF18Z(|;V2nd7Oyuzx%`SDBMZyD&7|0MeV3Sa$>D8d+B&(6vfU)s zEZ>~+`z~D6z+firJ}r(8VnNDoM4i||2g8(#XcnY%F zUB3{-Hbt7g5Ns5Rbp1lGQOYtFz8b_fB_L4@kO)#y^kSR0R@sYfia`3R#J0k0m1^ZL z3myMUROr7pKL|H}PPKMELun9ySs>LyBdKU4e-%bTiT)|N{wXq&zX~IvNYlRxBcVwD ze-%bj(MbNLK+r^*2_+y=43IREzbW+G65d+nTMvgKkp8AX;KbpCwe`&c!D9zWW(CCn zN!Hdk3xtpTTOfV2K>XMjf%MG+0pwqlf?45iwaS~K=q*vP|NYSpp^u<&+7_RWez*ra zIM!np1S=o$l2?$mNOlZy7{6x`4!JWGYi8%M1;Bb!nkdq5-CZcUCBcdeXBUofQ5)-$ zw%SfjYuw<_bEtUq%oN zdcG+l_`Lk7dE43`1vk?hrBJ9zoM+cMJ+K%hMk#v3 zuVU9}KNERFg(8?f6CtaPBda>Am46m>ya!ozOI}tvMLSs0e(QlTC4Xf!{IjUbDZ8Cw z%6CTIM??usC^}3ig2_7rCKPG<&dB?SD1ym5#T=3~oswGbioB1A5|AhcNMf~jMc#7Z ztyT6uB8otISLA&}o$_Km8<96B>QDj_#Q;fC>)EKs+3>st(zDU^PQ+0J(z6k{bGJgB zu4?6{qKODbZ4E$8o5OC`vO5I z()s%WLAr$5Pe;TpH4I8Xq8K1ajQ@1hlZ3Zc`R0S62&A8mh+T@bT_VVH5wU9rNd%!7 zAc-K)MMSUtTOd6b5x(|CAUzimzrGWKbXO}s6LtJ=5M*EzK{&F&-5buL-Ao+WH$M{* zZWJQjBF@hS;!pw+iVhKq5a(wDaVXOCX9IC4g2>N`IM2#D?G|-D7-60M7~VVGzAs09 z;?pqAk!0Q-GnkRwX5jDWQ9+r-oq`4}gLbRXPn|62Ms#6X7a65!;bz|i50p2SL@^L! zRA3fHCC>d|HY#!M2O|;%ePu@V!3dk%(T9opP!#D>XF<|LQ4EmeVERx7Nfh`{#03z( zGLSwLB}zLGc_{rt6y4@uN|sR!kR;1~AtLPUno5@aLUgU;C7N2>TW*cV3=&O05=Hm) z-;;2hkPQp{lnswlBo?Ga*+)$zmo{9-5uSV6aA~vaL_!KUX_3KvB=Y{uZWB@;iTVNx z5>g+DK*6`g74ebiwpWHC&XtcucfP_S2&9khgrM!$ie_&?&_CabpxkG`2?)x4_R$PM zshuCq5S07uqZxu_5d3I{U>O8IDg=LVH>)PDR{lfO@$ZU-6X%lq|KWecg-s7{$0z>% zu4GKct9QEP^UWzvTLfx zLa%8Kf$_@bd=JeTb2WdliOWjv??pR5*FG6@NKX04&NdBvYaYUoN+YEz)n(-1;6hrd z<=oclebz0VbmR0RT=@ojM7W@3EZcaRLwxW9?rnXqz>et+8X=vTj%tecwVk};PW}XC z{USaIyao5_Ph4SM0~dMFW%m$0|1AVP^0v5}9i}1iGi#s?i&VCH3~-2Canw-B^2*yp zLyY`3e3xpj)S}C&q*hOIMZ1p~;8aJruOr-XdmP0QS#}O2PDviUu-u%hFCR^35Z}-R zjHTA)-0>S5JO=`tnUu|Tm{lBH3fG%^SZ5)1^}ijIdxDcK&Npj^R0L;oP7m?Z934s4 zmNz)NbJ&KmsmZ0r97n$uFv4pRK4Ed#r~J0$JzvG~m{_*2k^Sj4&NZ$k7n_&tvja+9 zSKujp9u;eHUHbohQqLzciS8_yXSc$NH(Q-M>A>l`3pWB?2; zH@)(gZ>)!F-@UltD?dlQG;}#uTJ9rmMFa`Qw=L{sPm_civ7H@{baJ-wbX|!NS{%vW z^)M`FEZgpg>@A+z1jPLJ*tzQ)ErLLn)@@sXlOJSc)DaeQxD!n=P95}eFKw(De#ih0 zP3km~gq>6_J9y>#NO{DW7;+K)hlo3~E%!W@*7zq;bW8t#DUfi+UZTN1#FQgs?Aj|y zDeylqFByoE-85^*oJuZ%wOClTJ37vyb;r5k=WitgzDY{dC#+$1Em+wig&4(CpF%n4Q2 zNNL2&lMVqWdgR@ya>FA0T~#Ax@Iboo;MN8#K^*9rPJQSwo03@yQn)Y4<05O+>E@QCLO)^!eS%D3*S zV>)h2P^q=wH3G?40T9lBrSm1dADV~xlHUK>Y`&!Te>SQ*8;tp3zCIg~PyX{on=cgq zFGbOP{m&M)*4#B^VFc>m&5@3Ld+ug>Yz6|evjW=Egyl}()yF}w1Q z4$=fK4S7>r*HcLB!in$0Cnmpxrm0psL-n2bF3)!I--pvgSLQoGOea|;850#ISti++ zf{_rLGvP~7pK~TqWRkxW-R>L_6fqLM6dgY4mu!TQ@TKVPd*j2#0o47|C>rB*#*8Ao zUN9Rr2t`)@{Nr7Ozw#nRQauO$|dII_oZU zL;ncpJsr!ST!}5DD^n|FFhdA2ZYgw5m2=Eh=VG=s_L#Ku&3!8^H$Oj$Wv`;rVg8`S z5WF_UIn_9=Zw@EnxpQZh(m9kl457mu%4hfWYz~!Z^7W`MNDG<6UylwuXQMf$C=TB5 zrxY(9#e2Rdo)9A`_xNw3=)dD=|DNPbx=e!L^D#8Bm+qC!ADKm@*vz$g9`Llx<-dtK z_r;@lp#iSV_#07a_rTpLa9a7(q}E*8yJB0+4px-i$`G__m)>Sj7$9J}%~Na-kbvKa zdUwTVd_Pq97OSnl;tqTc9J5s7?4FUyHFh*&VIc+7%O$tLgsf;mmgEJJOIWlAboH%> zEZ5K#<-d(cCcGyH_QA(E;ZIp$H3nJ31I-i5w2!&SZ%k4&Rn6`K(*TfO<{+V{B<}!a zD=s;Wz+LkqeK*}7!pex09t4rY^eH{)Ek*Bjk99VbqWAMrWcuDxyy@onKfow(0`v8* zBK!TnE+x}0Y1w9hNW9;Q+uBgGnL!6?XcyN#Jsb54jR%rsz!vus2@j!(%RiKBBfUyQ zq7waz^@!q)!=;tfrbdVpHj@vR z*(ElZ$xfgM-oj?^DbJRr;_rPh zbz2t@;YjfG2+-Jd=!+B)Mq!E^5qoq>;%`byFtu{A{uIIr1l&sZh-=qE%?%{+LNQ#< z0!5t}dS#Nb2jTxAnSz0$Dxp-#zL2!CtT*UBiBrXB>l`W024`HWuGc)pTu!^3Gjdog z$=l|l_V|{7s63pMC6(?4TLqR>y4Smk-p2~h!IJMPrrL?pz1~$!wUbUH%DalGcG77a zibQdei%Mq$TMb`-_XkFulQo%N%D$h(Vu&iBj(4)pWx;-Mp6>U9~YJH96r>F3?W zSKjCQA$RIo)`fqm+$oBoAI>{bOvj_x!n4_UlsoloHlA+!c{UqQH~ls16iU(c9JdR@ec~9|3K$)wrD8Axk{56Fs;)uNc z_ZOoR{hudHArlPMP6}m?5g*{b7xTervw;J&I>@!fa*~z04lALgTSM*L4WzH?2QJR} zY|+!x=umme6mKJ7NO_+GkWTj!uyMTE)x|BLC^OWZ*U1A6rtz)i^=f-3VOc3 zc*mVw1tM~Ce&P8d!HDlq+-=Rys+H@ERm^(lT4%#eM0BROw|0D%fG5BAs+tq9VJT%0 zg4Gn(JgajVJrvzrBqfPC9a=9#1A{QMUWWF3HZ&clc)sYv7+}lLo-cBdfg?*VLwmk> zy9;CRmZ3dgJpPLKCmgGA(&7Wf=y3m=a&8W;ns%PBgd8oQ#He%?wfDd>9LnavGfPOu zZmvn*PD%%)ucaD0xqVKgkWdB`imaDa>2P;3T#yRG-NkS}kZp203-W=YJ|JQqi(L%& z16qOOHdmYj`EZeYFYZgup?zDTwUKihk?#xLnnq_+=NvmJ8oNOQSOJaQpz+}h4IMD~ zaIxybW^uF|G(KG9$_q&$G6jQqV5)>~tX##A9^Rf2jl{tJex_iK!96oc? z=EeKnG$TR%F=b01L1lg&TwYoPf>mo_2idY$>?Z9c5O5FlgeGYbdVVycr;guzw3zyq z_CU{%7H_`A(%j>Sqc^{-XVHW%E!2`c7RA{A8+cki}*#B4@-5xRvzbRM=`066xUgGI$0M2EUiVe>@w! zPAGl6=u?^w}J_tyZylpe}qEs*+t8IgM zLRfmZGe}>Y5LF1lU;-hk5aJUVA*vAK6UAzYI!5IQA|6+3wS@6(|y2vKgQ~B7yS|OINA^9zg_f4#3_RLZx=Zt{!a=dA4xO&og!Hv zZ%8A=F8(4R`gc1c{`Vh+X~a5E{yRl~pggt%=C%j)p^#^PJa{lvD}VA;^Oc&lsbed*>)38R7s3=rr85%0nd4qwZriaf zPt)!FBLTW;p~QKxavmj$=8xox{nL$09m^_pE_t#uILCfcp;8>TI)#9-v&cP|`dH1N zofcVaU^gaBvg5@GZ zK+oKe?fY^Bl|`tG3ags+qj3(S31m;C{hC!O?ZTxgq2$YbWj4Pb2e9oQ&Ha~ZpPcd`F5BK9Kh}t&liS-|#)-aF&K%*KwWS}7a7CAycAd+S zJukj7^;~%$nnBD5?whqW0cRs1VC}TXqW^r*v3bin_KzzPPs+)uhMRe@X z7rk$kB0Bcxi}*(Wn+=xEo%E@GW)ecACy=TQ!jP$f*CFoz3-6YFvy&O8Dux-TKUJk2XYMw;Vz zJ=$D>ejjla`7m%u`o)CTf*T_pyBOewJdzx$^R!SziqWP;==jBqj`E9svDkMkRECaU zES_*zb3iEQ_{HMgW8p1y{9^He(fBKlj)I2tQWh_Of?Wq{u;ax2=cybt`{!G(Xem`6vxbozxIB zbZiF!g4~Jt>y2e4N=_qVvL`N40YO|}`<~Y{F3%vt29t+%8Jd2+TF(jZ>Lxf2Ya&}` zBt4|G$e$PTl9J_SlS6GfY;1n!Os=1;ubo!wF+FJ4klO^6 z+8f31moG3|AtbDtMvd=g6-YYt#N7B`x_2-Ice3`ai_#dh#u%f7d$^Ad4POMug7p`v z>bP24fb6V88i4WGx-gpM%>dNyR#mhgU%p_wX%q`(qHonsGARAj#fqE&Y~E zw>kej2StY6akQZJFh>g`!U4#2?n?HMq|7V|}bSy2~4 z^+?V=Q~K^*2n9d~G0=+RQFP$7ecpo1GbOL>Q)JxFl>CKI+-q3+i4w~G%Wxr7kM#GS zD0%&z5_nK_`a4C4@)ITWcd6mM)ylg{9pCh7xLkbiDs}CTe@3CUUa8^lEn!r@GqPo3 zAH52^CjN6VHt{}gM7NFm<%!{ap}rU#F2Te`T6Nm zX;1&2)K9^O8Wb{7(AFx@J2i3-nEanE?b>ZA5~@Lk=Sn_+Y-RVLD+QsOR-V%e)7>np zLHU0z`P-bVObZ^kz_2~A8U%*_#BmYtKUj*c>;Jcoi)8FGA%41K;-b#`tRUM{)4ha~ zWkdud-Hk)cgX$_04RjV#YD{foEf;K%O~|>ZI&-g`p3VYv*&g}~LGVZEenM>wW@VW9 zsF)-iqS&jm0{EdLZS_`*T?;wyuL>ipL-_bKs^zVBBF&Lt$}IumswdPtU4s`wUJ7^_ z4se+clUo$cIYXT)?8A4CXx1Ybd)=+q9mvR-&kke+Y0X;WpED|Mg5%2H88mK2@Y?-VW3+8*UeRIh}f3|#o(y`b>bT9hpb{& zfz;fYTqc)vo^S>jPrZE_Cdoz3ZAL{8RyvQBzbIp!5DuT(8N#7*acEq8klM-cCQ=pV z3{q7GxqhYOGZ1(SWB*Etn}D4dRWjPIlsN3^i%8X9Dc#`G!l;Z?{gu+8<6f$AmDR77 zqTfiRsuszwmO6LYrCGi1Dyv^(XYaTBw~YA%ku;7}y1IL;sOhqAtR*Yz*GgpH95z{L z_gMY9KwWDd`P|evo(riwYAZCTn*o#H{q;OB%AeQ~m`~KtVJ7-eHv^`%{s{s5kG8dl zV4o|IL;I=SAxFc^N4JQWC2xZ8amc0R%OqszLy+!V0rJ4sFth2E+oK|jU->AlIu;-o zfSA^fQ~2aMmsc7TVsY0h($_MFU2i62rP%5gAYTu!nKB^ZIUmH5m5|}5gKJ(9c0OK9 zeeiM_l!)tkzTX7Z%h&mJml2=AvkW&$CE0T=9UC%_2-(zu<9TDluiM!ljdqJgqr{9w zJ{2H#_m&M%-Tq@9Ol5q)>2gKs+nYveiGa;Fl}P5gAeb#wb!*a;9z&uZCwBbaVRPrD ztr<$~jSF0nYw6@nfncQ`DU4gbF(TDziEz4`BuNjUun8Xv*Bc>Zo$>>F$F2smH5?LR zxB%(NCmRiS|CNpzU(`7vIKt<``}@ctMk-8Ds8Vuzq(dqw*ML~Ep<&Di>PQwr>Ni@8 zWVB0S$)y+!M3-fP8LGgr&%L&*Sj>?MIW5aakSswmNltHoKe9>rE6_wYKpfTe?CJID z;=Rxr;}xqpd=R;2Jf*X6pSTkC@ELibGeNUmT;~8>+%M#C`woqr=QmpJJeln|BupqF zb2C6O{nF|N%L>Z*J2}u9!byC`mQXS8hfN9Wt<~+ewSCe%fJ?n>+B27M^l2-DA2%J) zK+MBHOL$H-B=N|2d)%#CWmuB+?z1{Jt!}wQbmzT}R5<6oT{hzFs`%{=e6Uv22~!*yIQ;V^Q7qAJ^z z`T8Xc%b!^{*rD1j7-JhOYVvx4U=(o2&82s?utXpxWqb z?kvt7s%@4{L7su1Ic->CL98>Ia=Rn)9mTcKhfR?>ap#HSgO0=eYG?ecgZXt*>sc1+ zT(Z?j?{cE;aI61%O`iIudWggH=|hvVExuomEQ7(MMw%2yAz50L7eZ@S8P*P(E?FX= zmfLnX#Y3;@isExzqmzf`j-5Du{N6iXdHncYhY8=fgd7RvCBEfnp7d2W7H63-Os9It zd&0(=7BTZbm+9m3b$qVmZ#L{T@5$#%+-&IEqPBdlbnTH)L~Z$8iQ7wlQEus8DMVi= zkskEV3uI$3V|F8UnzO#JVl83m6CVPQ-p|LDdC)d!1xJ``h^nTo;hwF zYxjA;m-x*aH%Cem20}nhNy0B=s3}SKg^bsgB>Y0@8s|9djVhq_LaEa_>y@MKuY;qG z5_nK_jyj6y?0+ps-SxOadvUIQL$1(0tTTN}W+8dP^lo#i_A0vajo=5R1f+ZsNLG}~ zZ&$VQx229Zm0jdrX>NZjm!DjuyW~;&_IHu;0EF^Dbd;wE<-Z*$Pm$ig9Vkx`%6~gh zUio-GQhs?{q`O4XlpJ{A!ie_{hKPzzic*SYU90^mYlgm$%N&T7z9vJ!n2b+u~Yl~jzSL*kS-hn zDMFREmc2ioBIvxe+~;n5p$GxrTISZ){~|fLTV~)hW#r_4%;coq)$_86N$E}Q=pDNQ zhE&J{dy$;nd`j+1l9CqAPidrJFyjm~k{9fmj7GAopDBAG2~D8UGi5I%DI$rUDSIJF z5!v%h86m0o!)_McJIdc7{;*r??j2FZYQDKUU_DLoOR69O-#k8PIp~a&w>W2G%>lDm%x;ftWj|Eg`|X6_evA zukPXxJ_{oEmcIwXrL#K45kH;zPIM72r?8Y1u-Ia2Q;Zy&mRnltYt7~61$|yecJ0L` zw*z=XTd{wC)d>3_H)pgP+zpPRZBhP&pd9~G+{Ito>3mh}G%I-N*-+=~NRp~?SB3i$ zxqK!rwl}4GTr#nQk^eHgUu8@!tjpXw)};tPek`mqiZH^*!YZQ(>wc{4k8)6Cm3^%2 zk8)6Cm3^$tQ4X!LKCQA}{VuLD0D>wI1FBkOzZzB>MS$|!>BuZC6D$EZI3GOe;c zt+J1YRYnOsD7sZfkyZBbu*&vUEB~t8@yC9ZDVzIWmAkHr|D#sfUahiEX_b+?HZGqe zhLU&by{wDO_3?7AG4Cb(|KAd0Ug29N*6}SxnDJ9#iBW_LJ{6W2MfmnpVTn;>iG3<8 zF^Vj)Picw$xs-#wijMzIIr{hIVqP?RrqSINN8Q2oqvni1gv>GTEFQoIXJ)44o+3er z&m*|WU*L=bvX4lw-SM^RTxbuty7qBpJ6yWO>6i1co*A1_X26_DH}!;@&+Y?3j>#uA z#W%y#_h9F{3zd+b?uM-`VL84SKI7#TBZgeRQ|`RRLh%%}YJRs|y53^-R;B_EuE`#- z0)My6<&n1%kWvjQ{9d_qU|_%bFYUIw6c?8zV6|K|_bn}Z0G!_|Uwut{kB39~-<3=I z1`a0KSo}SL_~k68*X|c_bHGJ;%196-VL(gw86SIKpzPCE5(Gr&K2wfw-)GAHhD+W8?K9=;UH$|`80a%)e;))z80h!Q6?a69A`JBV<-NC>jNU74{13m2 zG8%xO3dDe_wDCU-GMcxb`iDUqrwFQl7-Y1Ly8O}0B%}9AM*mTe(Uic0qLa}SA;TX@ zM!&s?Z=hPOe74;2m*qnLxA+=;>eWqS=i0Hk`rgI$%}4IeSETV9{JHKUdfv+@?;?=} z;^kC?$$eY%B(68x$vjR&9a#3cX(3(u@1=KWq_!Rt_j4*Gny%pKbQ zR(Uj4-7@`!a`fL2c>mI6(%2CjFEBRUub0IN?FvN~Y=&q)`ASa6cH@G-R6v2Bh35Lv zaK#Rv1VV8`Xem(t(Bm(qodJzBAEsJkl359S6LyAP`$@@)t1i}Z`-T}~+413-MSy#I!*32XX` zWv(ezs+4L_;Y;PxEolV_Gah6x<*E3R7Nf+YwXh}}PfQ;G^Gjtezf$gGRakzx9JzZ@ zDFKh7lQ@y>hBCx((-tWQ-6oK^RGD#BliKwcO`}1;iP@jlmb>xH11OxKy z>A8u^CY1BSa%CTLNkB{~uB#^-`f4U;yVwfc9jb(j32C)`=*B`pEX*OULv)8!bYYz1 zTNbZRS|tQQI6KUb$$iiZLldF?yzKKoz}=Adr(!8Gv_CK3rVrSPNQkr-%8@(7P6;AW z43L!G{z3*xWO$)mb#VtO1L=h_SCKeKl)hT_w=GvA1$QV0NJ@?UYT0M{@fJv5E$_X> zQ(vjEUoCUnvV%m^ua*66^Hob6r5GS7H1f3!Cxu47R`zF9sSKpAWh_A5zbHrUKyWou ze1Kwrr1-#JWH>24@E7I1fnRjd%3p{<9Zs;z*USFuM=LS=lusO|4R&3~NSg*e4B=|`6QC)4e;q@9v= zS4MEZ*J%#TWRld3IKG6GyZ;Vl`tk4PjkDOTxevI*>WfV$g>~BSZ{-j8k-ae2KCQqH zRk)k^Ykh2x6U`7C3#>2My0%2LS`5A!B5!hn&P9xMyoMlXn-JteTW(-o{FmNmVyCAp z)57Ay$lyFdt%%M_K9!{jxFDWB9|D~5(J=+>b+ShHU_Q<1ZQ|R_dA@xWvTxH=U9Y0H z$+je!O|aZ|;w>=bFI*(u4#f@|+a2v9hDzQ-dLSW$f43bo6IuObR-8a{M;wxbLjw~H zI}u0r2=`>UD|*EmA&N(c*2Zne9o%laGpNUO`hdU3+zowrcHgR0hh;yX?$icnq*rOE3wu1E@K3hyshjVs4=x?%pM<2uI|q{! z{5N^MRh?l-mpFW|ePHZz)^Ki1!$A(((N+zY>m=zUhr;N!G16pyaA8}MSA-^t2qlLc z#eMz8;NrIW$3uP7APy}yo4)de!He4}-<4K&w|X8rPGy&OA~GFi83eNVw2$--ameJ< zJ2$PQ7BrR@{a5fTbHHx!(Xet%Z|sV~q@)z~5oLE;z#n!GF>0qsNr~ZJGOiSKNxCe( z7G_rhIv0HQ5t(2la-xO+D$US!$IYDn1Mt(BhAER<8l$p&1;^AGbW?`+WO9gHkoM9X zsk_R};Q}G?OPbx2X+GJSZ9BD3kldm{`?b3jQ*yx?Y=FjJXs5bs*9U?IQh4owBv4U+ z93e#8{OY^Wm)LwndZ-*yn}^lu*Utbu1f3OCMlk5=ssc@*vF1oL`wadKoYI zD>>647@%y!)C%+qCI#z&sso6!q+9>+w5+n{`TWuJODL?E5$%qSY23$l0G66%S*Jlm z#mbxG9Fy@U79)2HF)d2H##FLEQ)?WZB&WUVenlP4O*%6y zPFL1T7^AigGf2I_7Iz0Wa1j6{#il~dq{(GV;-#8NNhyhntU)2Za=If1o*_&kNwkUB z)X&6n(wrv&Q^|#!B)|Eb)rqMbwwK>!d-*+sL_l7z^d>K%t%CHQA+Pq#YBmqu#m;%` zm~?=iHhGHyI{4{!V*6;KcENh@drCW$ipqr&kx-S9%P;|}WZrq0=5{0USaN=fcE zEW_53^GY~fI*&PO+1*N`WzB3+xgGx7fuGn6{InMGmZpAnyMv+vy;G$j(?7Q1e%!zZ zy(cJ;R@XF0(I#eew99ch1-xLO3&`(ybza#F8yBtt`*#4818;-=k zANeH@kWXvL|0Z5Pcji=+!p3cPN+?%aRvSx^Ywd%!b&`pZG$1ROJ=pzb02?c-IK_1P zyk2DLD}Hv?p?x|TDcHsi;p+*^i9})s2zR`iIyfxR8T0S40kZz>*ckFzMv^~l<)pm= zis2yG{ADItVN7NS0VEt8Yder>q){MoS~QP3=f3ohG-q8m!P%gvO zp)enC=knxuTC>h^9^lpE1k-oA3j;xKO6p|GxRl{Dj>{K#dji+R>;)Kd$Yk9=ljMMt)PUw|N0+w46rc0T~cdFe<3B?SS zuM)ct$;_q*89(TtgLJ|=C)U<3+WwtZFm`j~QUdeG(n1s2F-)>{VN@Ht zidN?uZ06C!TTjX$vF}!}ukn!z8AUQJnPIa6yxQl?qd*5zyd4dtSn+*SmaTXSieqCC z;%Y6mpUl2;oPygfF(oi_u0- zz}`Bes6GD^!v`kdEgzBPRwX1zU=a@r5h3WIJ!{5UEE=j4MRP=GpL9$gR)gSRfH*zS zrbKpy=GY2XMlV}h$q83I9jc!b8wG=Xg`1Tl$tl!`Cj-{0C{s^-p$6>zuyJLX&`8iK@M>qWH99t|7ISh zoZ{cC?7uB*VlWOJPGJJ#Tb0s@w1Q3z@F0UJ=lHksFy$WqR%QRutO+o`Rk`!HF7Sy; z`>T~Vb#}a`v!J{Hr)QBf?7RK4-rmy5Es#&>kkYVj<8lrhzXhRe$>2BNhg4Gh3EDGK^k5x~ zV!B4sdQH!CSCPb5(i+U+0UAnLq-SQ1uG1n`>03H`t_p8K^DUiMyRYC;nK^q)Cttyn zRk~kR=|Aa=p6zs2DP?N^lg@a5{7eDfqpWegtrHdXhw?_LwzJfjbX&ugdE11k6H%r# zT9z@wi^IpZEdwZMqnov-p$YF39x~sVTTq4&!v!P9sP+IxeOss3&Fuk$cw48}&Fuk$ zep{#4&FumD-P=07ZtfnS&b_S@{ch5a8yG*{S-Q3Vfvw{fKf24#k!?Dd>2k<-9={g^ z>Oy)c5Hq}`m;QLC7X((r`rLn$Eiiik-ap>ScU3QWq(Jf=ouwQ4IS!b42s9a{$%{dn zi;ME3!CE?fmAdUV^O;O5lyqB`j^PC(7e;i^_#E<*V)UFLFDdqpJkce^-qGp51T5qw zwcgQ5-19#NR^q|CJ4@H~{~MCcHlvU2@sg1@-0cz(mV4^C%_DClu-pSPYCK7@c&FsT z8cLDKAa_4zsL~dnvahM@%-SJ7FK4gpPKjVb+M2!JKpsVk=?h{x3P~n2x{N}S$?win zNHY1|o&HP4qL4zB@9yMYh(8{TuXUHtyU%2C`TQ~L0k_zzd)zBm;?=+_Zy1iy1>vDaHqNeX5NXZ!s`b8 z%L`3i<1~bVkzpj@?PKcoKy-2bBYcMGHQbWFI-@2J zef0=8n3r$t_~}9+{+-^q&|59u*ik+4o4xK!@>qx=MHJ_tgN4(S1Ale+7}eEZq3fgu z-*F_nqot2TCMWK^^Hsh97k$j57!PA^*t<4UsvKJ#A^*F6uZ-=z*3g%`TT2)cI&~;T zS{FUAto$ybCv%)<D)>E;mKnuFo! zxQLioQ{Qz@W*!`$PNpYM&pa|*8%y{-RhxX6DD-G@>QP>fB_oqlkJiS|JUEj)I5{yo zR+~ize>{xB&LBI^38lM=S zdDJ>QJw9_zot>Vn$u%)GT$>pmIX^L6BYCq%BIa14k&TW|k4y}YpB)?JQlE45nT$O= zc5Wt_esFkVV)H<;xgR+z;gK1#F)CG>qh}v8ylI?fU;jB5Dx?=a0N_H zjg5@+3wn$}hT+&P+~@8nLIxY7dW(j7{H{OiYUWr<3#3V<0j*JTq)v0|RpLo&lUXKRs@Q8$UNQ zR;!(#ni-!wcbHjz1j;b~Q=3?`2}IL31(`kbTIun3|H zd`4#S)#wmjVWP8fCg;Y^OpKoyJ2x^Wd?p3vk@4xV!%WyXxSbJ9k@=BfI)zc_SR4sn zOp$))SQjEQpUL>?WO(#pfQ*LPaKQAqXBQexpC5V9(;W8iiVIP-m~Uruw)^2ShP~oIRw)0q&MI3_0I)IUnvE5oxdT@E+S()jt?6d=f{uWTu*N)^G|+9 z4tKu@)@MixbMS=m$@`Pr*XD*m`v*Y8--N1;2x7Eadd+Vt;HAv>39fp5^Zm&xA0kfY zKAU{yN7-LGJR?x9yHLS{ay`LTcx_1+ScbE}?=V+lzgR!WV$=-rgw(r>O_I?3(zJ{N z6ASru>wH@2cV(rF#>qijRZe5CuPwEQ(C&|shqgRqFQlj(l*e5fjmZkh5v@2qaLkmR zmTwtE*V(9S|RuJvCBhm z7;qlC%a(?qM(!h?$$;LmpmZ79vs;E+k(ndtny7v#ndvg5dGt4PCySlWQ*4_4q#$rjPry1!;XI*9A0;+wyF6ElkkX zHN9LpWw)(i9xh`|vWm5!i8W3TePl z9ypl{2t#l7rF`PpCLlHB&=^41aDM}az&Th50N>-feRcL$2yA)jT1pm;yO8Tv@;XAH z=;|xIN6n5jZdAy#9zL{SQ@>^Q6ecrw#il6@Zr-#T%^4mLpV<&6j~j1tC)xuktywKaolh#hv8yNkPcc3{^Vt1lA-cN%x1~+9vhSBqt{1pMuZk>x_{gJfQ^aZy0?@R%tV}qXa_tY2ja#W#1L)@h~h@ETU@z# zO;=$XsjWva400?>^pQ6nq;&ZPiK;QCH3A{+F(39!k4_smf#H7vi!6*+^zuOa&C$Gx(yvp)|nygAnejjIW_2SO*eMC7=?Li?CKB; z8$%rENG?_LNoc}SB0|Is$lPB{FxF;99YPI*9`YYoV23d63XDRidO$|M?Q08YDQfFL z8%z4LQlX~!2BiGgQFNsr#JAf#ZCS{>B>7WywzE$ zud*?z?Hqx5KAB0BkQgs_7or6c-o;gM5~gETg0_z)z)o@#p?z zq1&D4<1OCYpDgUZDt^SIFfZSSZQ4Ihg0Q=Gt}eFZTC_~AZWF6smeuD-mkb|V^^*r0 z3il9401F^@hN5-U<-YyHTd;Uvp>OY8#YFa9@gMZX|1W!Q0%zAz-22~i?$y=Ixw9T^ zqtQ0Lwl>=uNh8a)WE(G%Y|FN6XCxc2A>)x|WDg!`gk~gLED1XyKv=U70tpb7ge4Hx z0AWcW5SDBtVNW2e0g@Mz2jKxp-uGA4eeRuA!sPQl|NrL`ePsF8>8|SPt-89pyK3#^ z(XF~)t4kQiCbyp6zBOnj!NA-#d9=Tg-hX;~Z%|npwqM_Pp1{iGIKA`Rt9xe`hb8os zVmTU1_1`LIi?yF%1m^p)ntdb|0~s)5mp9c0E|_Ve?!TVYy+v4t+qD=CW`?MgNgUEH z4nw_X5f<;tUuawe3IP>o~`yICJP}bqjw0^PO3ZyIA*gQRev3 zv3q+mtJ~694aZ6`m?`5P%SRYDM{`blYDUyDcq80YX-T^l)sJMIGx%z5;&9jyT0HN{ z$~uD3QA6vXe&%^r(c0Qmwomq3xp)TCQ}JB@c!En%GW%-Ti-C7Qg$j84;QWOd}9bxsd%@ifqRC$~@ zl{l0^iY~(VglgpsLBc zaG>4@f!4iB&pRy*v~J!pIvPNpUKo-%*8_^JQ-(-hTN%?JlJ~BUknk{?I1$j6q!))I zJtJOH5FuVgc}XpD!nKO>l2wsQ9vzmK5P52^jSgzMJYCj%S)ZSc6Sc{NfwlC(LZVA2 zz@09-pKc4-xtT8Nuwr=ac$^5C*kHU(EH5t_jHij^H>TH9k9XzRj|OgP;x_Z6ja)(aHTBnA44(m3TU_feYa%=!KK z5--}diTL@vZ@h_~PF3gi#A>*T)6Hq`5~cb3%PeNpJwYgQ2i=z(vnn$2ZH>9d`x{zt zvp#SGL^)|loG#XMS1xLm5|mnP-QgAvr>wIa?;+KRGh(1Giop3HrE-BFM< zwSY71fw1p8+ZlGj?}@`dNRo3s%>M9JKg@n%@;`^!Ls}Vd+>h_vpLuH{m1#Gb+-Rlr^D=Z;o}9R z{c%&GpPqAlv$Tn8QLyMf~-?tMZf zavyB4{&X5sn{FG;k*f%REGEw2qe*61nYMI)jbQCOSEnJl3UukVqT;#{(63AHCyGzX zQJ>_AFb|c9j4j(T6~WYs<^6;Vx6-e0cWm~#Np=sTYFBLu?Oq*5M8Bj=uqTTHfl+nf zk}W)O_csqZM>Fb=-X!zdktUT7~& zwr$(CeT)4)v~AmN`+JwFzKcIwwhwICvHj4%j@>(V@wZ>!?9W~Oeb-?ir6ajvMSo+Y z1jUbnM4jV0IX#|Ka5(!4sEsm}#x!B(b1l5az}kFVVr2sCJ3k%-v3rd_`?$ui2OBNj)if8ZKyIz0}HsH%`)K|O*7tP=(pZ+%PpHX&zaB0`=)IqHhALN!oob= ztLQbyMkkJr44-M@jk=!V+(Aj$Df^~QhZ3h{-!OIh;yz=@A3;2XDxL11I;{#WRHG@& zgpu|L#`*1xo@q7Yj>9}PeE8ULUKZVR!|2q(Gdy}ac8JPcJ1HBXel=D0`u(@;JH%zO zzN1s2n)BgoI64(My@QKZgimAU6EWkezD=7p`IpT{r}{@8xDmmzmqDYFHuqh5W#5j0 z&3#Xx0Mc%mcpxUlcH{ZcwSDkA!YPT7)MTNmO)?C}4l#pEsqL4}o<=^fpeFR~mtJzo zC712E6f$85VK!pEb9{1q?7<-GB+O<`MovK<%J}3i7%j4yrtn7L5jCBqKbyJA+r-XU zMzH9pxgwqQO>}GkX#6*ZIM+qy834MI(|3<|et$ToP=_2CBZ|&s(*UshRruZED_uE1 zLhLRPvH5Bf#D87Yn9u#h)4uN1*f`fZH*MAwJ{V?jD8d^iT&Ph?E6=Ukc$Teq8FP*ohp@XeUv^r0}+%#2mSyM;FOHD;$KNEkC zpJ%C$Ntdj*%K-;?R*LxN!~Wy`MQ6e~N0b`SG$M4i#yFa$xcJf$bcU=?3(!xr{&-Q+ zU0Bi;2+DK`|Ej|3kC)!?#4V3M#<}o){2yul#6yqQ;{C+kk4J~#Uu3`&@9uxR)FnyW zCqDWvZVtF_2FDhm;DUu?XI5IB$+<3IZ_Y=?Fs$3bGDhf7(#3sN6n8P8Rhz>3{GiV~~aBLLt*aelO?WCrnLL@3CT&lyA*C0^^K37vei!-?S1b{Nh+G}@h6_U)2d0T`s-pnbxx1yljbH7xQViat zM`&iw-r<9xILE`qEzM4_w$PNUTc%C=Z3Hh(*Sff8eODdlMDqK?m2O2E^N3e7hLW@v zv}m1jg`#+$Olh$gd*Lmej&}812=3)Y8mw@GjX%_kx5RcUqQQsMUVjv0>e}__gM}le z`oOmBmuYp_zVpzw?Ynnu+r52f|F-QkpO0=98Nso;xT~Qn9+uPU%vp4yH}Axc2h^ne zXaXUcJ(6kT(m_(uc_Vr_e~ueKi`UtxN?6rb)VmuQ9^7-9c}k*T>r=| z>yK?&KXPdOjl0($*uDN>|N5hMZDeCXkb8x7vu%zVsL@M|XQy`4#K^G8Jud0oeGl!g zMmj_h3?byARa_to&Z9T^vm5pd(z71}ruBq=Y!O#mxrt^SqB`lT?;^a8nbJ6vuqh8QE%-*npZ$E2k@ zds#f4t2{eNm-hZr(l;0{6iyanAL^QNA}hxjTqD4Wj;&@pNXR=zawCjEy2^A1_Fz-H&)d^ZkeqRN7YWGNG|88m7A@?#6#>!+@eZcXE1u+G6+!+{pZ z+#G1Nw${Q)q|U)wJ30dzNm&{XDRmBp-793&Iat=(86nUzrj(b)(P6DPDAxIMEnO8d z>U6KRIvx${w6KP1dC<>2J4<=edxJYVpyTGPH4Wf*`mk(_OvtC1Zp{`NiVX&D1##Kg z5|1=#g|5&F9FkI1t8G6Lo4_JYInY*I1T*N z$*ewo4sqjP>*;~5x;n^}$S{+ZPQQ6Wp8nGVKgCU=-UTOa{Yl(28XV=C>*Q3vA?u{I zPEHM(*OVb9VK{Ycx6d|(ZasAjKf7(7kFCYz#K~}}rDosQCGZKC#T}DV6DOJ|>6#0- zRP?nVbYa$Q;VwPfYO>qw$%W zGNZa5)U2Nl%Hib*+RiZ4I*mf(Q-dfT2PcLs7V^0D^}H+^(L2a}5EmlD;|0;i zr~F}A?I`7q57{kM8xXaDRdd&PpDwaOBr^Lq^;Vb_nNA-Nm#|Idip%%Cwo^S!DIuU;l0z zxM=GCtFo%kz)`eAMt;hK4)tSs6UW_=C z9g%3wZa!|DQL8xDm33OHgwdw&fpGnOgv@Lb3g-9#opp4m(5)8pu=2?0{*ioJw$xNr zZbl=*3V|Qgtvse3RFZc-7;X8Q8-8#|KEuYxN5_YB zJ(B~G(9}6;w}+&MdrEzKwqnNAiUYiZ|b9Nc7m1ONZ`QOC$47h8{#yFhyYyAu~u#wY}eOA@uQ79J!}}U)vpC9Vo?1R$Tx4G$+&># zS7vSC$z$@+V`MovvqpvL7qi@%r6{G6H` zYzue>q1RNw=ZOLnO$6py3(0)%=wQg2A28KQG3*ft<`E~Xq!-LBkEpX}zE0kAOaW_x zxgDK!rS->T9n1;^K32)^#Gba?p;0=#&;5T3egGNfk8`-qj5qn+#X` zv-s)2Wc_%y$ogBi3BzE4SyecK`OR5Z)J;Z)>{L1q;CUpYaXF|1=E2Sa)8NVahZbeE zrV%=)^tdWp|a&0bR3BTV6GQi``|-xFpLi za9}cjRJKgl4o;#^4sjh}Tz5h_H;>b0)7%4oUqIOd2KVqm7+ey`G`>yd*JNwFR&*pA z16={Ydc<}m^5*X@R$8WmwtB}bfxjKu@@5K}BcVz3FrzZKq=7m}nzzQzY(+DYrpsu+ zk|tXjT-w0Wl)?F`8gWmywiyixj+V*!2KN7et(Om#LGLMJ7g=58aUx7=uk0kQ8iMd> zGc;`g+3)b#CN=^xbh+opRSwRf8r{}zNL~>u>wsqn4RdL>!tqe+6t9V!{eePH2be8` z%PbiM)KddCD}B#>=y+^zsS}3@_nyM*f6!h!t;ppeeNT~WgS-ClET7%)a}H)iz>%h5 z>p;FDTO7OHhh$YC^YI5w(X22%fRaoh|xkTt(?%#M#C ze`mr2p8)DP2nvf$4PKw{Q+f<3tnbcR zi_-JfY|$OJ9^7}Ri$@??blkm<08Snu5$gRENe)P zvkOW1;JVW^U9*J`Mq=P#R^?=a$6ERRtZJW7z~*kaVBqLoJ72Ow+)R#cORhU`*Ub;< zl?>}TrkrUnW%zW{r*G9g=u+vQxChOJu^F#r)`UxB<z_Z7!KhT_iHsi3 zU`ktD+JG@Dt@2u?wN$f9@>Mh1wRFhdR<*}6S-MRYHUVH~RlCzK2m)EVneYT*i_6xT z^-YkBb{k~mx2HJmrR{zLUliIgmlvkPj&yY}-^pm!feqPTJ1AoZ&l3U!xelBOIvPox z#oGs&4m+or4m(@p0}5TC@9bIGFdcRkrRmi{G|`zqP)u~OG?@;&DD5qNA_25CUDNxj zgsUMqA8g+7$;5PVpPXo$_e&lLjgE26?Xaz-Ze!QM=on(eo^El1h?kCy%_Mx&xX~Rn z@ZbpRnXUribJc}Uj2*%uBe%>pHrYD@jwtJ;lJ*Vp`H3D!EKRGS_4GJm>EfOUsXq7! z{922>cj1}LW?n&jx}~q_b<2GmU+Van1wMovB&?F$QgeDrZF!V$8=_6beF-kS!Xcmq z+A}OpI9X1Eh0Q#(ak8-2@3L7g_IFJ=tfJ>x7wTMr;ZO_D`PK;L-_A*UGjN~sUcYfZVP*y$SHYcb9I7K#(2?;mKIvHYy27J~ z-)X?#A$#FI)OTvH_>AGUpEXEukAaGyXSZsx5&UxV(+i?OgV$7aIJx3>H?NI$4=6^m z=o*#72~KDsOow**6Zh854XOJTI@GhXp(d)yB(=N_i6f)te!(vy5kI@w0a zeueW2A(HGufW{UoL3}-Ygac}HmA+^Nw0>>SM1zxWc3fVb9^>MaZ+piS0}L(FFz2)Z z<3T|!T7*9Gp$8{E6x3LeCpC+Ssu|6NBZB)T9+13nnS}+1>5ctm#*=d_&wWy3!d$R1 zpqbBi4He#*2V+aqSfSa%WEj2=RnnS~c?T;l!EtSjoQV=i5AZfG_6JfWMg1#p4iBVC z63iDBXF7UEcTpkT^^WeM@UDuCpj}j;vHWbA_Kt2#u|(({-In6vq2AGL;o;#{6Mwv; z+m>F_iyyw?S-pw!SyM4qM8PY!74Hi5@@`vvPnqiF-L^bF-PFswZC71wYal10mz1vc zHBbjTmvC^T$b?A?OYhF~nzA($5NSJEAN!Q17iyPQR@axrNEIelOlCPZUlr8x{R1STDy7QmzOWct+prW z{ON83826T6& zHPF`(+?~0mKpJSf(G(_(Njmka%J$yFXB`xN1Iq`D+U6Y3bMDb>bWRFuy$4dw!j)0udeL+f5@7iq+VU|Gu9;4RJwZgj=-9x($$w~Cb2h6kcIgh#RV zK=j6RyVVtdcDOOfC4b?0<7$6v>@PfT+`7$%xV`Y)pRNmNQcw2>0Y(zdx4+c~q`&ao zzh53o@SY4pYc6gy3{vL@55 z1Yz|g&CgrYm{DBZx^ity1MOC#?m8Ot^R{pxUACvWw*}^bK)Wp*NCSa(8^`i@T4?}- zX&;_9bj=0d{1)v8m#Ri#IE0B&PNm2W z796Ny23vhBlBPn}Vz4p}nCgepflwzs{yr3hblE2Uq44+{2qK48dDj6!~}YMG%H z@0CTW^F3iW0YL7aKn{qE_k`gD1i5<{P9ld#h$E%@oSX#Ckrv%mLGNw#Rg*O3T)x6uU~yh~OliVf zKuX19f;3pH7Pvn=od=KakQ0R-hID z0&SdF1vfDE7OT$3(|Yf2WGcryeiovW9lJhLC`>2`x@k5;+q5mQ&mIU-qEqqd^yjhAUj zkl+xIi_`8b1bmrCb*~slTjV%7aSElHLpJT;oat~8&m8M%hl#RvjC)59pmy;}ZQ>My zQ0r0)4R(sS7m^4q8Uv@p7yw`loDO3Eh>WMh7yzPWPlqwkqDFrp-D3R=z$|zm%mN^~ z=YcQ_fRuCjrkE2V4_tI{D4P-SV2}?j)~gSOUIhZ}!O*Kfpgl;h*67sMe9_}ePfmP1 zx03DgVLZ30E>8%nDgaD9Aut6*&7Kf+Yaq~`z|o6_DG=13fbRcxnP^&7^3SKey{}CO zQ{8mrX2Z1a==iYie6yjS!V98nA$Sv&G7>g^x<-!81x43(@(e7}0l7Z@fJIe|XLzW` z9=S)zfq7tgb zVi!d~)bQugNVQ97Rh;)Hago%nnF1kj0jm*Yck!fTvHPPU6!l>+`u(95Q_U-tY-85$ig6t&~I}IP}diArNr(ZOzd8u){zI#IIJ<+e7yj5!ZEE@`<c>!#;m{+8>wd|IJ41BTp$>325CS8cha00qptF-1bt;Acd*?b zzfM_Ygz+G?Ouwe*u5fx zl>FR;y{1MhrscfyyoBY(TaE3Sz=+DK2iWb=amZ_#S#jDJt+l-R!ld-eND2+Q8aZE> z;IBngYwENYCFxqD3d9@$bkA^rWz66RK*AgC6u>Hx`$t2)0K4hYqk z=w`U1lKATlR51kT47EuR02ZiX=+_&lbn8nKM5gM}ma3Zoy_*1`^wI>O8B(G2(gdj) zKq$R5L2Pm{Llppgd4k-0Q_^=mP6c_@`~MU1>D56!75^>Tj#{9hH83b%JriQPpvP5mh7h)k%mb)ovpF)k#>rf8$%0r zkn?xrOz2PxzCJPpL?M7KiUBF)^$9_jR8v4uU!N>>{s1ZD^^w00)%y)eWlP8_@dp?w zp{4+~yVN1CW$xJ&GehYO$we1ue=2iCXI_0vQu=EazIP{K*u|{H)$Y zVY48{jaRcIzIf)Uuw5P$Y6_$fsOHzhC-ACi+kV5DffD6 zFNC}p%I1{e-U(S)6|&n`;kQXUHTGMR%F^DYycXeP!*|b7pR~3h z4{c5+?^r-6yfvx0Fao5d-YSW5$Ow}7j-+x?@AW7tx^IRG>~})KT@fh?swI(ZRa!XR zvPBrJ#xyHumnCInG?po$2H66%QfeClgPIKQNH$-X{j9OTWO!Fn+0=XcbQYS9^k*<( z3Y!jgIIlrZh%5Je%xHR1Kx9V4>|IHX%iRpgPMcouN;YiF?lNXz?LA3lckeAc@s1ZW z?0$zkh&UoM(-s{)*|`Ic3VB#K$+ZuB7W1O3uf9(MY?Q_tynT<{RE$r`$GR7#m{kz> z5#5<;ZG2DCzAmQ1*L#w_oe?xnF5i{?rsQ>}Wc~Y?9KFv^LNobsL2=+8Z-zXyw(5Ie z80En2{PxODhZqHI+%i8X4R4sx?zXSF7=1#9HVb1A7?wPN!&q6#llLVxpP;Z$b>625 zDl2QJy5s{%WkWAck8Rj_WmJM+7@4aUW{VbIgLcRe?=28go@UmjP_uWwLBL^xN{rlL zsQ|3GRscJ2_krYs_1WW$J38i%H6nEut4ZP9HQk1pgFW9%+NGFA!!67kfzPx7-69(@ z@1T8Bsz2o=Wv98?6tl@Kg^!p<`1y0R6=NVu{Uwn5>@D}J<| zN3cw5S6g#2D1>Eit~p$TSIUpgs=dso8Y@l>UO$y=kc}m+bmi5*O-i2&sudTWDu0`_ zxSH6d)&A40_BH{4pbF?z0m=L6xUK_2_0w^u(4|H0Gf}Sv0u?Z#N_qNB)N4rv^)peg z?UKFWvk3=Ml1D(G0!CC3{%lfnYXqsFem242sk(IM)h{HaZ-%;blk*D%iL%hA-8ki~ zelbb^A>rB(bb(3!VzRi4Szf0QM3k)*+eZm2#V*d(5 z)#8K4F0~I}{C(2iQD6XnsTo7H;X?yMmH$#R2KD=LGln#5J~S}6O2r4tkdEE0T6~3V zl~+w&hTYCrLH`CNBY zRFZErP__O8MpUi;-)Nv}{r`reqRQmeZzXn>O)73q&Tna;h-$7D{C0w&Uqu2z70~Mf zNZxNJnEFF1RKJ~I>vvQVzms66_e2ePz=*0A{7!GzoPhWqdVYgDJ7i~`5M^7G9HlAr%ZZ7HT9j>l+bGJ%QJQmpi z+@MRX)6unU9^GTPH1_rkZS$6{Z>O?H$0`Fuit;Q2%OM*44y}rSTARDa!7Ub^LZA^1v*xj z{IR?Wz}7qI4jtoepN*g0=&lW1ehvj3Y7oH6FKYq#dQ;j&xsj0nQj(n)iVL!;saStC z$g?QDjHwM=dk)24Ywxy$RBMz&{Z4Wj1U}Y!Y4hdCPiHG<@N@6*B#(%6zb!mh+Vzw2 zal`QBpb?<_<Ayu+Ivt@J6G!~r2QxEcdcJjU;cUI5lWY5Vn zM31?VJcHG#2lfBn;cV@~QzyTghlj4roICZXB3Ti)Wy_}((HQ#=k>{JT?l3Hx2KYmy zwLj~cIkq2aQ}$%b=N`MgE3*~zjIQ2V)>;gQ>lTM~ss8oav}FclUd=o*>j*f=S zf}So%_RA&{&anh=dE{JFKqWMP%8zDCno1~Y;Va0(g0yHNCm5Ng9)W2Jjvw>ec@8-| zH5}`p*Gs|E!n`_?)87kL!G0zhq48POjbg#G$4FXuRd3LYoWX9J=tMXl%a-a%njA}E zJ{=j2EW6a-c@7<`Cu>6A19riB9*H)-()iu+t3;rc9^1GnI4P%Yxj z^&Zc8!`{noV=H;g-g&xQ z@0mSwt{b~||M4j{nU)Vn8&#Pbf_s75zycVaFW;55nkX{Y9pz$&H>4 zu2C+_TFl+D=G(dOx!7_u<*&)wk8?yfRABoGuy8GFLNQ;tP}r7Lta)6tFN|n8)v8__ zXPu5;aw;?v-x@8e-CR-X|v}rh-3X13wHyZ>^jg5;axddLA zQnw*SHQT7&l;uwCJ~K`PVv2oN8(5fFSF<6)Xwe-`3+2>!a^FV_lc^cXp$ClulRX%= zoPZvUpN)@Nz%T-M_A+{t_|>vkZX-S61-XCTAHEU?k&HSul=Z_4)2s2b71q%}vhr3p z`w`URaXc^Rw1zG|b|Swh>n;G?$Cz`+9)Bs`RV2t0Gfp2(_vQw{#If*Jy6YU+sj(3U z!xRKO3@t&>guT&;lN0ROhQ}v+y3MjlGz4B4LL)(~@Z@(aJ zR~iS)m18fy_h;~MbGBp-+@9WZV8fViCW(`@Hj}uAPxuCL)j3eXy!znObtg;&-Wc7% zaj*~`+l5h!+!$yXIC!?1lF4go;>g7KknWUf z)71x|_Q7SI%$&2-J3Xf~FqilX}( zl@9miGhH0u?&9?5%{c2Gw2Td!u1ke-p;_pu|6Md+nq(ct@H4BKdv4_XXvB{y42^4xJ!*xLBTqo!#OVd_i$@SZXKz@?6~3|_iSTSHx&p0-LxQdE#50Gor- zoZ1Fws2ccA3NH5LAf=eE?!}`!BsI;+^;t*EuZ4BW6{N~wd-!^beU!&6ms~?fxbGv6 zS=y$O&A;yPkMhzPvK4E&lpI_<`R8ISCsM2(Ye;{T=7O^lgVFCu(h9|y#fTa0NCLhC zP>xBIWEb|$iDG3?d2box0zdw2x{Bp68iu7vEHqv^3k){|HB4a1vg-88lN&L*+IIJu2rXA`J#u?CS?{X;1(6=xQ0ekAqrWW!+9qE%<~BogqaSt?OIc-n_< zuQjAXU|0d#&s*c8$EZN99C*ZxpD1!bmN?ZQ9xKlw@xm!V%s5@e@@@4wijrav@3zi` z6>HnW*WZ<77oL0b|9@(_J}Z@vZM$!xbrg+&nN74}nX=#YAEa6PG_Dt(V>e{oKHr9ZGCOal zV>J}&X&Y+qh&kp?unaaf)PvbF7B-^d`98?nN4t#qFNLT{t%775T;j;h-zK@8(lK_t zAUVbj6Tc#|@ciPg0XU+xe`K;*5L8Xgn4>Z?1VR0%+j}E(54Bg%Ht6f$OS0uN7jVB^ z1>1|*tW6vZ;(v}ByUu$%Z%GC{E9Q!9hGKrU@z-kkO|`$+qE?*C#7oCUaDEuqgK%eJ z4wJ^rniUtu>i<&7x@M*pzDwPer~9?NDu{WL3ekn<9Bt9qhK5Dp#FY9zsMoIe`ud>( z>#D68ckU;YWKfLVZac^52tys?j&eIPUF)ik%RKEykZ17|GkR~f7zL2P4aa+AS(@`+ z-5s8xXbHBRQ>S$EAfk%OQGXT}Y)^BjX5ro398>~oeh5qYDL=qrdw_q$<;Ib@YoD%} z9rSE$ginpmCfF9JSaLp`<&mmQc9-0;MAZhi&lbRew9P5?&TR5sfn1RVb7Tu|q=rp2 zYCY~4m_rRJ2;s@pvJMtV*R`1YU>dc)BU=?Xn5%L!G5{1^Yecp$8)=q_E+JFIxY?*s6b=AS_Mgsv*a;rLrbLT{D17N6R=8 zv}4oZalNlO!+wZ&$6-{@p^1k>s%t&>K4qRXc4Qr5isbfQ8 z<@kv2pFJ!?IeW>@Wf&`Gu40rUU+W0X<*t}kZX-|r zv%2x1J5R?ahIsaLY|O)Y$nskonByK2!(!s(trNEwS({7Yro)WXd^}q@V$Xo=&c=)m zbW`yhS5NE+bW}Gc=IKI$Oz4r=j2t#rMnq9pA)oyNqwur9;4HJ1Njti+tTXXi(Q#ZnM z43k?&9|$YuoqmW0FUM0$lRZXQG9`MbLnhurcD=V&$q~FD%~`F zG_RUAYo_PBu9{VtWh3>6Wotr(=APweF}l~U%2o&bIi=Cr(0OFLewu`yZ+SQiA&-a> zR-&SV(Nlci%?03)7MMj}oxP8jzT9fo_{KiIPK3#63^taBV(6WXx7j5*SJN#9^*giP z`4^D0m4PAGm#U)9GV)Adi>g09Tk9ic#)5Fp{Qnfjtoj+o$LvMo|4Gpd?P)Vba;$k_ zIZKn)ADgXSXuUWKy0+nAWeV2F`8QfIWBu}M#XKv>SxT#aBGI+w#&U4ZEnFPTX-qD3 z(BcPkO*f)^=RAL{!p*7XhB(`fvuKFlO|stkSE#cz2GK{H*|Ro$s1rv<$9S%B?9{Qa zYM!lXZ6UiYjrBXSWpl21*bEkp+Qvg0n2v!t-ucYJJBVe-k?KR)ngv%pt_4gZKgUja z)@o?OA{KB-(|`rcJl$3(mQY$NF_y*0A$%8do`iy@$P35#(vQZq&%T ze`@sDpyjSVG3%KAy|7mMPI6&;N^#@NoyxJshmGSLrt!VzcaAKCV2VHMAFX6nBspL6 zTeA++GDhQ?6gJKl*J*l9rmZrL&aSWB(ZjOEZKKYS(G%knXDk+mZVrbrKF8SFYZ>F^ z$%0~s6NumSC@m^xuGsnDx5z=$Icy%R1-a>3-NUjLJINv1bTE_6aRtf_aL7^6C~&Yl zhMDVE1Lv?#+mtIX!vx;N*@tg!!sWm@PAkJQWc_01a9ftJH4t@6+a!%qOv}tYpzrAs*6fo-Sc4X7rA&}UOTTq}7;9oUn9@%PcgEAi z#|iTmXPO1pHIIMeI(6R}diGFvjo_>`u6c=R}q z?2e2LpJX2uLp`@QeMQf_FKaEBlvT_RF~vb~h`EnD`Gs*LNUk|1NkOJzN#W{pfmnY( zQ3L15!=8GEDD&#iu`o7Ih(;lZh3VpKKha5XLLuQzup$WdD9$TXWUx zFP8DMX`kFc-wGJt4(VIrd{T$cVdt1eYVd+47KTw+oumQZD8!3u(; z1a^>9qkg|@--Ac+i90$%8f7% zzBKCy90VhH6Hh*-5jx&FCES{IPOFO5j}G#RHJb+LAY5RC%(v=l169J&h5bAwTOW|^!{!1$5G73 zU9C+l?UARkuFOXHgD24GM!ITZW>!OWwQ<_UcCC2Ttw3o-69`5eJoJ$1f;la5C z{oZgc5uAkMTte<7QhDq&R}*q4(P1I0r|{8H6n@>hEOBm zU%gcxjo&P6^~X%MXKI?FOxmaKUY#>FaabfegD@bMD0r1AFAW~8EojTKF#J3|%d#;1 zJU+{^F#J3|%QA+aCIQZSdP=K&63AUiPm9NKupksGL-+>|La{Qq{`MdgE0=gw2MuwHE z(bpWH;=IbGA*1}foLBLH03M*?yj32s!vj>DcgfCB8b#MD`vaP$V!h%q6^J82Y4!8> z#x&);;l_Z*NBilPEsI z1;M$xM~=AQB6qG{R&CZ-JnjpCK`yI+KwH0TLm;WZE9-Hd=^a1}qYahIL(*cq$2B(0!r zsyvuRywb(&;*>c`R4H!yWF5c0A;b%*%y(DgbdPCqxEc9oa z0%aT0{TE&zFnv(>UwBCaR`#9OHeeCk%gs^E2TA>{X=qo30}$byoM#IHvbDY|U;@JP z*80T_SlPG4k~|d1);O$i%th#_&?{vPq;0gJQ?^hW+tx%(KqzlpvoTQia2wk;ZEe8t zylr4d;F-+Z(=e9H8f)7#`7cvDThxv1#QgDg22|$N8yc{(Z|rZtP~N_MAW&v-4#YS# zWevvxTEiQP0k|Xeb+la8klT@Y^pbL!L3T-ql>$K0CD{@aQTQ9Xtn{-kqSUsR1rb$| zh`Kzz)|sSRTg?>cg0l#0IIz^M4bWx zZTC`V-b1hK4oW~pn#2_$SQP+hR}^TH?N@|gRix2USA<|y6#-XP`fWr4fOcgN8bF|3 zxjb^Bv3upkTdWl;(iX0&xJCc~+EqBv(=-DD?W#_B5;O?3s|Yuw#;@QXuKchdegQyx zSP;KJpgk;zUm(yP7Q}BwwYj>oEl`w*y1L?e5lNt3y~4#85NKETZw;j}3a$xjL?xAo zx~Agm77%FHtn@Vk2()X$8c~tZ*pu$_(oAUV$>iCs$}~gkBXOkFaSs zSiq3J9jE-D5{#cnPt22A#wi;4k5`sSiy)=ak) zoB_hwEd^)5px;t(1`PTw1!u^VThqbFnI_GxnY;_CH%)fjR&WLgXSWrc0h9fZJiRM&Cdu*eERHit1K!5MLz z2%m+-^)>?p{gDM{9^?t{I*oMDAKC4$Co5&VDc_wwI&voAba$qh6B-N_e(6yKXC8p* zQ3Yong6UBOXC8#Zn$EbVH0mpty5gP?2H84dxDh0ap*tM*x!wuGVV~<=FdX)| z)&YkLG>!Pf1)3WFaDk?#KU|6@>J21ZJP_4?Nm`3kw(g=ipX&7pq*N_B9zvlz+JIPIzWqIys9bTH3;Js2UiDZr;8@(RN$`I z$el3xK*1z|BFYaGP0|V414WZ`DR7r>1gj}FO(3^S0YT)!T5L&-!Q}^gee+5hL>}Dc z%6v8L&dX;Cmb)QxrXbQy1#!?Ia5?1&&!W1nk%R5#}&QKOsb`n_Jjg$DQHg! z!Q!jwQqZ2zHGN`H(`68OB5vKirpqAm#MQC6mO^ z$&R66*keJ#+*Ljh>kTo&-19LY2EivKtvKGW$=QQ8_9XW|F4UEI<;e-0dg4k}XFzur zO(LjIP7ntn71Sro&AOM`n^&HaFdjXz7gWHA+6(Ga5=LZ51@$QjL()-K<&~!<$P!On z1u9@fT?Oh>lXegHR83cb`qbn+wTc*CofD3Xsp_b!sVktnohA`ff|YqdCqPi2mM~i# zbxmG*dZW}eD1U(N+Ps>sfr+OlHTNwI2&Jbd%OVqNp!DdG%BE$(<++DV0AVeZ}9loNs8DyEwNvL#lVnQ8)P3YnrVR-c=co)@SpaKUqYVigII#mle2pFhZ#6du(x(IjgFGxx+cB+fe+A1$d78$RLw0k4AS8B1-1d#~X zD&_>T3woj4ywh5MBogY&{GJPP&>C7Gd0&)tNb)GsVD(p$(n~@`6wBaO28!1p(XKxw-`y1PpPW6kw3RU!i4z=r2HT0}I^eVZqe07P${Fv@G%W zUYe9%9vD)@f|o{yEDYUmM23K12+$b9>uM3Hm+U6eS0YQCbjD%H_=lWMAX{3ujBQTaK!mmnNJ;)w;L48$X+PXz& zdreaMolqA=XM0T&>_HZt?X|Hk9-R%)sd{v_*T%Ydbhg*Vx)6XZue>fg@CTxC0b`gk z)#Y`KFk`CA>)e6Ag&BK&Qu@Qdkiv|;UU8e$LKbH14bd~bhZzI(F7q&BZ_u1m`+1nL zH+W11p+e}5gz^iCVhAvzilH|uHlw48p*O}@e1s0mD{qRf^npMHjHn{~rliHiorf8F zQ*@64cij*0IBz zV+62varl--sfyVH=>0|_sBcMHV_g-q=Piv=89Be-KvfhSz=(=23|OF|3~Fu>eL?Nw2pzP!);g?a5N@&NL*5#PW_fyn#RkjHr?`?}%{@NCovB37V9n z5*g*4amj8^YqT0*M3qE%XAGoBDyZ*_OLlwOrnp(fKtx3r7*RC=-<6<;d0kPG0a3)f z)b_mc2XWnPPf?Qv#v!4p@&`$aFSMkB`Ui2LZBIM$%DbbS1OgQ>qAC>KyAw9kUMk8m zAgAzNYNrBfp~!h+C*u;(SNrx9?HLfwH>5)8JqfCB0HO4r1ZB=Cb>)@!Mgi9@omrp@ zI6zR}8&`WkP~YpTy+zJ@e^UD6AQZ&z`!(f6mGF0Yc&I!;Z%_}F_klP9JXGEX z;^47Rc^`~38wiE~T~2yzybmUwKC=N~=!0=)cStDwQIa08iUQFpfZi&AQ2L{!=FI>I z!+(_Y-xxDP>5r0|Z-IXcviISn^wCf?#i{#n(jrrCh*S3`NxIeWh&cyUK%YY%k?v0t z%)V-Ck4X0?3D#Lhh0sT0t8`k`08Kd{ujJiFlFX+RAPjvZMin8H&c~9{p9hB2D<6w( zZJ~5N9)oUrC>=m=YY(OK@fdW|L+N~cS}2`Q#I`0_4s`)^RSJ+|K9O*)tGak7olnHJ zwop2sOgInnM71@bw>61SBHGfXmm1@2vop}BM9oJ5{|Y)Dhz!p;bcq0h*&TLK>EATYdJXy zfTaCshz0ZMC?g5}!sG|^UIQfWr=y?)gzBf`vYBh-ekM*?AQ%GlDGO++K_>wO^)pc? z$u&bh8|w{3serLm5&mq{Nk|3tv#yhfVW7{&u>u4tU_@2Fe6A6^M*Z@+I975Ej?c#s zw?Gbh4+Fyg=VNSFKq!4a;Ubh86c9?Ej}dQkG5m$7!T^B^7^|*T^b1j1kqYV;Vho~O zLy{;>F2#Wy^lcd+s9#JlV0o!pMZXv)-;%ugrKI$g(2Z=$D_=@lTzoE(p!;%^20%~+ zbZG!c-Y-XK00`ADM`^G`t^E&?As|ozT^aydYLEtip#DQ-XlY*kT2lJwz!2Mgf*!F@ zXk0B7Ltl?e84wKy=-mWJ-mgbx0T8NRkIKSQG4zet!$6<{MpQBMjo8Dag8Gfv!%H=; zzS$^MZ4Ky+OCqS>jPXqYLH(vm?GVEpa1t>o{ihH;3z*!*Op5g(^S6_Vub@jUuq2@O z6CioNop3u)-3Dlxv3q$_FwVXcL(wj^aR%rMDj=xeNxEFR0fPFSWQAsDT3MD?|0OB? zTWDZRA(elL`tC9{@ONV?0Z~;zuPPvUznio>RY0hIH@4C;wZp&0SXMwZFrY8^fR@@4 zDXD?~)ni$umF0Q$dr9fXfgy}6mG31jvh7wX%f-<5qa*=>Dxh~8AbGzZB?%x@zaJ&Z zaxwJp(cS<=T>yPL0a|LCE02Jn{(I8r_J-x^@gFowRgVM4QbqU&jZ)R)KWLN+^nVhL z8P#daP2dB%H~@sw|4Fz|7E+<~f07ERy|eLUoqU;&JjBv^hJgAFgY z4*y}Y-0d=eQ2JrQ;kHwv)qWI3)bbR`BQR2uocU1{QKZ7tkD`bIgwl^(M5UD#dG$Y& z(vwpU^^IMt@}F^~UZI})uQ+sopbF?64oKeribDqws{a*-&I&c?lTuCy)G9!*3+T-a zXsID|7$B%mN-cC)TItEFPfbg|5EzoZ?5QaVscPLLhMty&sA51c1n3L_lJ{v?fvrqH zs6H*Vcz8l3@#$$$nSnqBjHqJh>1l{)Oe&~PPs669M?>csDTgGg3lOM)5mkhrk+%CZ zB^A_Xq~YkVN6<6VV3q>{6)p((u*15;Z1)zA#G{rzGZ_S7rY z49`x(CLf5-2lUPdq^+NwhS=kPpgue0X0*5mq>yK)7EHPVNaAzSU;qPx5@4jHMtV-l zIhv!Yk)D$-_du;=hSGCV0=24ZR;rPn8@mPwCIG#|0YQB(+pIuI&GOvXH7f=EQW^sF z0Z}SoL{%gGQi^%lnNTDBQp(AYmkPtri^FWC9qs`}REf;zrD2$n3hMJT%p}KGN{&B2 zO@BEpJ7tsO&rgF9Z>1{mg0!+J6ss8p=v_r3lwXjpa>FMelwXh%3f7q-?+Y8KTDbuu zs#fk7Hc+*4zp#PI{`W;`#rH`obsQwn^*lgOUzBpu)SE(4SYMQejnPW&e_xzdt_+DB z2?=yVGl`(SIKAM~mA%U)JuGDQZz@={W00i|{QjV_} zr8GnJC28rUK1vs9fV?DKWLx7!x_?pm_0X+?NCdQg6v%k_b?-;oCa=CME&XN~?6OwB zENyXhwpV0c9_I`Y{Q~HV5FmM9o(2;eAk4iy4K}}3dG!@(=~aQM4B4+pTfC1}iRvrk zoCc!GfIg=I$@|JUrvah*O3i7VA7k=TA-{>qBueS-(a} z8fv!T{ z0|FJ$CmNuoF7Z(S2o1lX2@S({hin3g^m_?4afjcG8FtrbIWibDX1MhEnH1xVgE zrNI^m2-P=nCS_G!tE&E9ntmWu6`0(=m!c?M3qo^O9K_?_tT1Qc&Jo~7(ic#0YUx!^un8B zs%1WKTPPI~^VYPoFC=P_2J}^Vtu4}TP1j!?Q$c-edYvw@ieZ}VZD}R4BpCwesv(J# z`nGi4KtPoYd0TqPr2&<^Z%-?GLZV~{pv#-Jrv1M?y&yKCWXRjoYjBZdr6NP#kyaiS zP$feEV>3yHyd&jRxwnL5$UD+2t_rBgkaspvB|`vX=SYUUvwF8ycW%9VAlfyVG8m;eepNJH1F}gz6k@Xzyusj%;Xvv2$cYdr!JuLNC1-eLuojs0EFs? zQah$#Smf1@q@_O%RQt&Jk+emZI4hMtQT=G73ItU^rwT~kk4CD1Q2nT=?uXa&^6Dqj z(qE)W?-g77uH^;7i7n%!r$@*8hW&w6-~PRQdZJ)25Dt$9?^PpxhxNiC_yXZ6fdu<` z6!VF+#fQpyV(F8SB_LP=bd~_g`^m@>AS`{-D_W_XC#m#jDW}L9DnOtDMpTKrKTGXi zlcj?CXDN3))ez@N-2HjfSb;zVjHn{~=V>sblM3peyT*!`$g6*qmi{I*Z=IZfmA3fC zp{}}oD%J%Es(?-vki4IYbpeFxr(#{|D)O(>V4Md66)>WzE`Ob7zO^M4)W1&6IFA;R zS3jMWJ{K4|pPZjgTfCa*i=od%hJc_7=u`p8` zz7ZJ$f+0X>2#~zrhztQj^&8GmTG@#0BrW|@pt_Nq--^}TD5~F%RDqxh=u`p8`|U^- z5USsf)!e9RekbUb6(Fh!7*SQt@1!AKH>se0C$%GGWME$XUuo&PfuT+0{9kE{ZuL|u zo5aw+M23K12+*kllJ{RCLx52ImsrisdG+7Y(hmaF&E)*Iw8gDFn??11N2)+j1$3%_ z3>T8~hjH3psAl+4 zYz82j0nn)elJ`fk833XBqu2}=irXK@W&naAz=*15_;G9oQbGN3Y=#Tf4F3`50}!Zy z5mki$BMk>SD#caJ*Dh+l8easq;lxei&U2F5zthAfmHG7WjA{OlK6~rxU&J|pa}palJ^;9yR*U7QZ>#q%Wks~ zL|#BMdkC~tGtww8wfKc{Y9mGvc>&FEAdtMjP;U1-CU&m%tOmc@l>trHDJtJCGWEvc~Od=Qx3aNwvXfm^s6j@hsE_7lfX^n4Hfm2bJeyHU0C;VL1na13+K%0LlCO@=Dhs0coS>ms!r$)8)MSqH^h_ zXX|M-mx|smy~&v<8*1~UvCgnPXk7N)ziOT4ywkMy!x_o z={G~~Z6W8&$}PHYS*dJMbG|%w7Z4l(`s@cJ@5{?UVFje#FE0m$wI5SLx%8?)wV#}? zD7Uyu-!H1Kj8uW33g}b;$@|Jk6%eYg6jhChN?v_+x%9TPjR`g5tILb*{>q|?ihE7$ z4JfvGV8XlLF6$@@EHUwHr-6TjOaq{;xg;2@E_zgu>5DS2tT*T;^v2T%gd5T!)& zzTSI8{4yrq(5Se^1fWY563P39C{=jQgmI&hSF;~5=GE+fVpc^Mmw;}eRHE7bTtGp=FQ0a=5lxJW45(#X*81d zv4D2eMxizGzNOse(zT*}?C&@FO&eFh*k*0y{r&QC-LqDkv2lHClo3GW1+so>xCqF8z6EgYD$}5WW%2mbP8C zv=5ilKP#6_GXp01!%9{^Y!^p=Qs$+y)HWdM3234~AbJ0!+~c>20jcMolzFV|s3d-* z984J7t%(4AAp^A3m74-#HPJ`P7rDIKE>ZB&axk6&Q7WKoJAj~mw2WH@=Sh;{qvddF zwO!DkmV;FY2vk6uhcGNL@~7o+R|3#7+g%COl|lBga=-babHYSDtSLq4pwv^2i-Cb2lHYDvJ>95ONuvVAu&}jIZa_UFTK-3!04dsC3{hM+)tp%jOzbSX>rGHwvB(Hv^ zT>4_5dI>o{Q*QAq(wB(pXM>EW06`VdS2sZNezqJ=SOB5=*|M(^WG3-*ks%;Z0i7X0 zOAQPGg8DgUr~)cu?DGv&^&ns@RfIoZZgp8eUQj>ZKn40j5ZDmX7z2!`8e?B5hrJ-F zmN_VXJM-$_mrGv`&9IZ4e;=jMPBp`qB2^%$0y70{^ylJ~3SU}gt|>Q}>ZLS_=b7F0t)paQy#1GLnzvj+tA zYpy9`@yx5=ESJ6=7`lv{-zape~eUtpbF?z0m=K1s{a`D&&p+D=vxg` zF$5S<#n87JsAA|_k)g};>OYlB|0^(bIXV9ce=s(vFBe1q9H|0970{^ylJ}n@RY0iz zbELW}uYR{&`nN!J7dgM{v#3(pC93}#bSOa#FFLxE`tN04f@$tv!5+I@`meyy73BP(s;N__E3|z4sGR<1SU!MB{!v`UuModKj%xrA zg#!9HB_Mf!Twb-wS^R6ODdF}R;lZ# zlCzJz`pim6j+a!mtH}AxO3UiQRf{*eBm3<)rCD!u+@uJg_-)#%|My#lymRtszd4w+ zZ>P7{U&?i2^9nR?B)Cfy=zDks$C@cF^_t_{1RDK@KO+#_wJ|!hwnWmh|@+TEiFHqEj>ozoQxF@|PIM z7@VKTmQOVtqAB>U{3a^)L)pe`BRI&s^qU;2-Cm$m(6T1NjfDKUNp{|_dA=+{2{cMo z-A*dfsr#M2=IqVqUfzPAzeX|CPIO>ntrs?5j{H=%at1qiNrkls@cYFK(8Rs`CuHM> z;Ykl|O7TQ{kw?T(e36omWVpDu_;k&U?h*aYVbsj34n=hbf{j)n08%%uUZ+k>;Ce7N zw?&by4KyOJ%-gfKN7PUnWpowf!nb@X>p69T@JAGYPu$@%je;hFQ{8iDzB`-(zDz|R zkXD$f<$?B$l>C8g<>?rgJ$h;;eyx$Q;I_4aU1#sL;6!mEI&z$JJ}7rZLH$}ko(xGi zUv51%cCXyI;bl6r;c?N~3px-{Al<<4U6OUrn*P0PADgFa?*aLVFMJj@b?zk=!nTNl zTx*I}9lNwCwiEGf6-2G~o^0*Hb4J0-!$|T_;V#M=9&YC#?vXA}Oj!xHMkke_9~*=I zp>e!F>kgED>dN1kb{qs z`6IH9VIO&MRC_lxqO8PjFhTCCv7z>d5nGJq_CUx#5F@E8Qf9)A+CX*%r4bVCY_yq< zhRAU448-E6d?@SY6RU^?E;{8?(pQsnE5N-b7woHm=1+tXTGCWNQ3qc#7L=Vu`#8PG zGPMXSE8s?`8or5Zis}}d-FzIBj%~z=UDHo#bQn$Z#z2zjtT0w3*O3awOq?T5`PpF+Bc;k(O^I zO29D<5S@IG!P!5ODT-Kl-o_p7{|xTYh=gY9hdt0v|OCuG=ljtwlTM4%8&k zhlcM(eoXSOY2X#@>g?kk`d0sNM^7^eTZJkLW-dsc&55midMG4{`3nBv z`E;XaxHM}a2Emd0a;ECgJ7U;F0rkBRNmQ{ zh>#221qpQczm*_+XC)1!ivYG4_T>N{P>@@-B|dR-jQ9pijN5qyQ6tzk0_Cuz($K`o zk+I{$;}&BWfnpzT^KXRVXq%dtl1<~(i4)^zLafz!sT1Fr%m*zmFMOm} zA9MU}3n=Q30rh&8HFoavK2$0i<00DUf#DIvJ z6CQ$~IMH#OJI+Ng0n!Nxd|06=5wQH?at5&UAwQ5!EUEHniDe#VeH4D}{emMG9v)p}R zpb%Y-0uApLiv`@*D4^h7*$GpoN6Zsu#l)&B=1_z*YP`D*>zv$*3*aYeaG zxF-qb_OQ`v225FA1r9@pPH8T)D+15za!cUIX?`d1DW`F(%a#QmP)x~SIAX69GSlgN zy77#SB-wX}>J(QDubP`mC&X^MTC&gFsRE1PRpj%`QBDyKjQ-`}zyyCd$8@uE{;>6L zxpJ4CttItR)q%46t`2-S=`W3uxU`vakIDwpD#z88;+taDoc*h3ZdLN?lsG8TPTAxP zP!y@UA?RceJhgWfb-ZdpWV<8I=GOIjy)`p~##8EZCP@^Rd7T@VQ%tsFEt2Gf)vHD; z4pBm-HlkorO_Ng_sRaqK^@C0)nC>3L{$!UE)7nmZnT=J1Y@Kv;Qu^d1R@7Nn>RPRg z4O=yV2SZ{YHZyHmPTkH5vvq4;-(%WZi2aAUWR2a?nzZVc(_XW>OVU-{WNf|lM-z*k z@Tf@~rZJF{&z6y%y1weC$g8!zsLz>Hvz(Xal{K3>x3&%V1DxK2m>a9>-85@d*SoP$ zH^mdRs6NdVgRAQ45j7sdJ&m@3m3D(CJGr=`jYpvxecf8hm=G)@9N6@7Q)RT+%vbn_AE{>r$e)wzu` zxvVIycQ$F;nwte4s#G@G=Gk6H93q{C>Dem7T{&2JHDa)#E)4C(h+DG#nYOfqi}YN4 zX)<`!WWQU#Eb1c@WS)Lx9aooCaW()FF*;%O=I*gy zGdX4)`>i#ew@sq`h5fmMS5mi5@Jg+pbG2XZITWG$Sc`2uJv#fz&Go4EN!(Gq$}x2p z=nkRx40bnY=oY&_tX}L~3KO)(qdF6b=Pf7aDRyktsx+cPy3Vuiwa(uQ~lQ zxXOMLou=JbhLNtolnG7NLuY{*9sb$eakx1J&mPzMTp>I4(ACm4>(*@89OGVZ#bmRH zX>8IpLUvj@)x#UIR;Wx%4LUVFViX@;?X<;H z6qD0=Gm9JAVlHViljFR+(q)H9q-;EyT=-VseIfMUx`wQh76?7|{v3-1?p7`mjPYp2 zZ#Xis;>I)Tjl|T(x4UU&mWB>pHYuQZvU9e9q>h=)eKc~bP)IBea9!xBav`=nn|0{q zP^aR?xSlr(C=$kpF(om z*~kUWI&^Y!HQsC)@7shEPS;4|6HRr|s#>>l^qej!&Lw#*t2dGs*AX_a z#!daeNd_6K>Ao1WS1sM;jcs{7zQ&)9=$xXr`%gLTe5SbgO8){M0h9NSC)}cKrwGqq zm591)-rZ9TQNA4zIp4OMmsb*~sk%e)StJe?m?cg=l`x@hPUy59zq{I9CsfPel+PiIS2Vk zH|tr(qbkyc^%Ym=>51xnCJofcQ(9Tm&5;$iRb~Z^r`H>6H*DT|x~>Rcvtf($fSgKN z_cGEGx=<%Ep3}|9xj$jYYrV&e@nm5q-6b8J*2GJ_F^~;wo|Tei+BVZBj`TijSJ4efggg zB6T!Bb0*OCa3U?$Ha&XdAM1;E7#+_dk8;ZTQ#Y|yPq%GZvtWq{^N2~pOc*B0zSvdG zbkUXxma%BUXqkv7A3mn?Gay18mT*hcqZU8DYqGaVFgf!k2}FsPLj!TPd~za-oxNw` z-y5rOU0y#Vp(GJD>vrr-9skGOw45f;W^`OTE!f9IdTdXnbVPcx@riEn31Y(jbYv05 z+dtLq;!Q_^i`g|{CrQ`%ozx5VHF@@*L7rnBJ63IOxg%^WY&@Hrfp_Q<3^g7WiV@3F zt8VhtcFcqx!9@K4QTO23Ju&wNPsHK8?;fcML_D^3 zx5RgX9T-OAJRb$3PqkWYc-f6U78dMp^-d0@C$`wwrCZBm#WnysqD??qS*w@ph@@JN zsqYa7bnn@1hOML4qwBjz`ks^UOysZ6>w8ZKOLmBKst%liQDa$sZWVl?N)G~w%6*m1 zq4XG-MPXT(v8%(O6s|JIRDAKKb>WT)s@9qH<^%ydG0{%THn}F3N*iIR{u5TWo?71x z6PY4xnyLM4Nt|l&S=DA`E;nw1??2S{l(B4?GE$m}_g|i$ZJmdCOpc9qT5}JgW(?a~ z9eZ3;cy~NCL!(<~*LR;TPMU#SS*93}#q9c&>4b#G=ka!pQO=H$(+rihKAr0@eJ2ju zY5i7P%QDDm_6mJ#x$EHG3^~AfzOP#gGbB|iEif(aHg#Y&gC;tAw?6Vf7)Mk5aRG+$zCA$I`jR-cXPhDq4t+n;V6UDd3-66!zR)QKi zx<=S~8VMPnaXA)ik5=kQt6i^m($kHH*UPg>Q(JO(s7LatgL=Z2HD|3dT zcGY$w?N%LmUA>k#+E!RnMz0e^$0UqZgB5b?d42Vo2XBoh)MweEN#UBs7%}a)WKsY; zA!u-F(pDo*9p@wtJ*t;_m7hZGMbHz&1QMsJ}|Rq>!xi3Qyo zS)}guBgvDy*V)8=tXSFb)4I@XgS7pC*l8TcxA!LZtCo#xR*PL{xYMYLQ2Faqt*Nbf z^<8>cRcrtH++M~}o%{E6cBykwtrz9>#^j!tCnU#y=El_=^F7PY+p&w`kDkaA@nU7J zZ5JKVvEL0|Phbnh46IzGpPKG5$BmmgpfuBTAMsj8*Jn*Mp>8HVwx^Dq5|d}uhq`R3 z69mjaA1&@d1eup`D5cvPcR0%6^-n$~P;IQP8uWcx*&bqGbaA;G-N0m0Ov@*ku6JWv zC)H=WJ;KLr@j(dEI;y@)59`)!aN3#;Ym{0$b<_E2FPrsJ*Hcy-uhN_CJ#2d70g9BynJ zsN^_?SEkl%I%_R&%ae%oD0;H@tGIL${JaQL(X~F9*LUfGL%w&1*~#r-atPb)qdIlq z7bnh8$Jd8W-@1OiCSyiL6nC~UZ$q#Lws z>8XmTT>Qy>9(Rg6m@~$jItJV2g58`3S4FohAH2CWzdpBDOZMNlK3JWEZVc7?k3Htl zM?B(?M~-HV5l&Mdd-RhIeZ=w;4;jt*9{8MU(=qLoHg>J=GU?${4;jt&e=8%oTL~7- zvrxfuMvvZj*4b-01&)!}W3TI^wHwwsm~%X@K^06Xyoa!H{kfi+9W5He^?|=Rb@N#` z{j<3#4_1QGj;Iz*+)s_z$McfDv+5Ig%CS#K&y(*~pWn3zQG}Dq3gYT{v8?-e3S@zfUbJe>7LLT) zYZ+VD_!;73&wJ(?ay_eQ+`jd_p1Ad_)7ES{A)QKL_SPC@1A7*mE5tqt;J#z?4%za(H*aZk2OCO2v30@qH)mkwu7Xo|Iu4l+NzsXK8;2l08Xh-@nYn#9+=CM*tU z#Ju{Ph?GiBv?iVB*4-1{>dByI-L#8 z(ZMc}jz06$wHrq3X62q!9vzY}lN2>NjAtjGQhj0I$!q_C!wj((M)q&k2dh}WF~E6~ zHK%XEBnIn!oAmz6Xufjg|5$Mbn+sb{JBZi04&JgNC=?dImG&COeQBaJVlH2=&!L$7uVn1xP_n1)7P$9zZ#lN zYwCQ-|E!JYu36pOys^3E%u}~C&pdy%KBckY3#Nru%%1Fs?Y^Q09mc9YA&BtQ}5-n+Ii9Z0O+rI(foiwoEOaxi2j;~ z`DqaFk*o#TA|C{@uLXQ|Q0(+I>adV+{HluqV7HJjt40XKg}dnEqgDpHg?y&h$tc>b zBGVw)ts;}SyH#Ww1iL{dDfryh9@!&Y3O?l*_J~(v1wM7gl^**As|tv+J>xLdSCqqI zF3OM;093WO)LnE2fw*|SK8k4&RW0WBQ%R@~Du*VYe)J+q*yN*;5mMP49~>Y;){KKg z-&PJ|rLJ)iM-9v|t_Fz^WMhkTcZrojHg?z}Vr>Ae@`PBVq1fWO7e`b}TYKqJ8IkeL z!q9pg;g$)>;M=QwWGvF)+lw!_Iid#NUW*P(5M+CC(WjH)tCsNfPeu-d8O5HK4 zqJGHssf6T1m|>qq`arXlLAKBG!y_5)c;Au}fmSpC*}mKg6Cud<-Te^>f^1*zXZ6|! z8(}G5`*cJuq5*RE$|rOoTRO{K91vtncXNXb0V{b}5xX?VfF4#~4p%D!oe#?c$By7*eEkk0%YkDL>%ee2@PLPRVB|S4wO-wP zQ1Q6PSt1-%*T?w8*-ZMN{f~>3Zqx_ue^>_=`Xe}@Yc*3YX2%>GWyWR><|bU1Sypmz zUEdZIXMJ8e9Ncx*vbHlPd z5+xv|>Zs^6N`1RG95p{lBHKOcfCD2L=Jsgr zuyZoGs7IIjfU$&>f*w8F9TpH|kLDZoUK_UbnCO1__BUjYiSE}v3?3d6-7gSikBRPA zp9K$(Etf_9a#4@v+r(a*%;vGXy88kVvd12DaI6h0SP|z4J{Jzz3O;A)WU`4BT#+3i z$X3KTqLkA(x;WO0%xN55*LOo}n23dkM=x}PHjow_y>PLap~3Jyy19P`hV#*IPUI?H zc1%^Q?mc?h;T>4zM?AIzi+ttL-X8VGF)>6d&1D@^*LS`p7!x~%OhX#O1|rxoi^e*z zh~t>IayTDzuyl&e>Fe&t7AHhAHkWs7#oAonaX5Ep4G8Pw%6bQe^>N%NS(U^3I6kND zl3C>XIDB_1R+}G(_pV^bA2&-kI7)KRA%7gPK_bVnkMAOvF&y7TF5^DFi(KY?yj>ii zh7nvmzIZ}vm^AqK=)EYD&Q7cv21vtBtQrOk`iWJ;+~b~DHO$m>QY9H6lATmZ1`PR0 zm1Mw3c2Xr7KIMtUsw9~P%@gbTGQWD$e8)P6zxR*ud|dU( z#ZyzmG+{iMnbld_gz=QBVSuoHO4Tr*FrHF13>el=sT!tlu!p~{8U_gYUssaZN7(}( z%!-l$L;lxxb)<}eZ_oZs@jsGe@*ICtPj+S#&eJN%0Ac;KN;02to>oZ)4C|*=lGP;H z-{NsyrGSwCZ6z77$|5OM&YkcNXS3HC3Ddmc-BTkL$TmVe>)VR*|PB=BLbG-{rjq6!sbB_J}851GdILL8A+=xT(9FQ;mS&&R2vt6ge{dO zxv(;9S?s}TjcY%`R<6r&vJq81X z4Cf9eAJNhKhd zoX3qKE+h%zaNcgdc%=*`=PmPKzQ#pg;rvSEIWRfDVlsyYoIm6lIS^rT{%C5z9GIM6 zHK5T5|4^x`LEZmQwYve?KUC^!K=u!nx^`)VXLC=I3%LumJ-Z^?1+r&H2UII|f$Z4} zeQ{YU<~G7}szEiE+MZL9&4uhaTz%~<=R)?J1@7)@#k@xN$4XuEXz4#zEu9CGe;jfT z3L;GYardOIc`*6MN?r3C;R0^|cK-7b`GTtM`H)>Ome z7v)%_lkNuTMY&ypBY_w3o%lR{86F5yz*MU)l)gA0@!LNrh4kWlY_C`=*F;~E`%PFd z+Ff6f&*R+}fRJ92lR@)ZcZc+noYCq6@4*Fbom%%G_yhFs(btMS5aN0HkY7d*2&d=e z^O6vI!0CA%A@<~Yw;bE@YW9Q_&@WG@6}s{H(mdXS0|@D*Ip*a;EavO=9fdDOe!$dM zx(D{M9OH1tiy^(t6<(D6joS0`>Uiz3%P?nih@ zo?Yc!`-J(DJYM&s)m!<}JYLWQL?NKx!34-E=%x9v@=9f2#tdD)ERWa80Xb-jRw1}A z%k9>>g8K_`!cTqG^ zmV=Vu>ZAi8bO7i&0JPGmSwKjyPMWPZYOl_-*F}MJE!C@&Ky?Z9nj{bq0s*=}fZ)C+ z2?Pk&*Cc`J66m!_ARq(+bb$b^GztU=>1&ffLyg)S^6Z)@knX;ELmn@M8j?V7%>CxK z5(utz7f_o!sZ^^T_#)Z2q{lvOKBiKUQUpU0a!Mx^Q+I<;yw37ANUe*vpnwY$PL{d)5^>^=0Q}#%qTipQ@sXW4a^TA{=y4C%? zY04haZFskIG^|_Q0TZbP;4OK);1yg*Z^;S7yjCv9dtaJ&M+(AZfypE^RNj|!Q;lnQ z1k(4ViFSlYxqN@}lR%IHCQ{w%{{EcBw3F&q_xI=Q#CxsW>i&V8K+X$iW0!zF+m95w zhMc(0l2h28j0cFO0lGo~!QGxL01&R*lkv>b*!@Tv*+2*cXg40hS5Dz0 z`D`EAfRKJ9jqF)+3Lni2-txrR0HGB?*9stF?wDfKs_-Ia^KpL|zpxcwKN2Tk}z%Cx~33L zpUa1qrcyY4F5mAF353(mI6Tvct8TJl&!5;3h9?}cD7_jjYjR?^X!{Zpaz)#o)2h7dV}80c%sR)8P{Or+|UujTQsEJ_VI&bSR3$JcWb7t#@sgWkh{$p7_x$n72w zPG8SC3>A^#^!0pqEe$0EwcedV7$8UiQ`0qz-krP^rI6n3A&f}D{YDymKo0t{3=q<9 z2I z5Yl^9tFD`ytLx@|oEJaM{kBqIuz%cX56C~sOShf5b}=KM8w((~Kgl^EC}ROMWb$6F z3heB@oV2zV%FY1Y6V5d|yDuMg?*<6zefh2)@6Kz~ewJsyj0z?dQvNK(yYr;rpC>H= z!4=S(3JC7c^AYC?2-lw{EzOf2ev!w!Hi1wupilUKRvNFi1cdY#dAw_Lexvs5Jo{r5 zh$Kt->wG{Np0b=Tfqs)b2@qTXz1sl6{Y~;DfN=dy@+9*m&~H<|0f@E$`fviY(qX>= z6%f+j=1tEx%vX=!->FqS4wz~cko|)C%;woNY$+%Y1YAfbI?e;q<$l17)!k zPQS|+_^eH-(Q+Ehn@$h^KIMkyn_~e?oYdjJ&)K$cPI4^2&*%I7f*`}`_j$ZycD@|T zACij#f-zv?BtP?qbuV5aI&5x&f^;dS5_Dw-x4ni*iAuc5#ti9tBdq z?BXI`owz^(T~frg8xR5kxj3K+t7HOv?d53Ya96H@K|W#Roj4 zhHSUj7v(|{OhxvJs0QW!o?pZ@^+KuP1x3tw0nzz@-uZy&`UORtp8z3!LBZ*0Srs6a zyr76{{)H0$iXvtlf#3v~I7vxY6wx3lHRSnYk&~2kMX|&akPD@x7pATOLI^5FA%^5YBT$FD4ke=gjQ1A~2K5tDce)qs~2<p zvG=|<$`{H&UfLnm%ng`GHFLkTL#mnkr5#e%zc1qglL- zG#TJrPvZhY`tpM9D~3Q7x-@2h=odhrgaE;PT@h1kfC%@xB4$$;HEM4xvNuPr zN@TyW81Oz?B(85tV;YDi1NxW-1ous8OasF8O&ZhMKPKh7*(amC%9FpNh)J?NCD8TB z1b|RFppPp+aIY_7E)Wo|*Rv;OP2E#Xy`d;R5t|AO_6-Gbf~LSdCGU;gB5Sq=guH;> zazHC>9iB?zd}DFsQIQmrd1r@I!wk@qTYH*|d1o<}Naa-C*&zjbS5fMOM;|iC#Q^#q z4Is6?tJweXsnn34aB{2_7xV6-JT?|;k_Plyc~6VV-d*g?H7>?jF6Q0EN*!WVTT$Ch zTtyuViano1$PsW@zTBn9{8qC7el%7*~DzuD8G|C@^?Ns00yHy0~b zMpAspdy4WgkyJheFsVsC1L*7$7>d}!DAM)M~seA}v>Kyry_jX8?Lwj$B z6d!U+r*q^(08_2>1#d!mt>n-?*wOGTNCA_Em6ZBm zN5jgYeXyfpa%dka%BM%ZN}2&4oIprFR4jXH0wMiS@wa~$Yb9y+;iBYaII~hE%>a{L zl%Dx;vET8j6w(hDCq5yP&S{j}JEU_U1>7g%DrvF3*f)`~!3ZTCvDRJ4ZgK5*EZhZBK#%PAEVQKkXtAfKX8mC-8DQPQb6~+6hiv(Vo4g!Y%>EMcRZv_?v2`~i|n(}4QtQw(?!ez zEtVVpOc9soK=>2D0jmTM+|LwoBM1<#pDAo3h=JFreXhv95V8PgK$DYEZJftG;zonk;| zFv@a?1p00g2neo#&J_^c?l85(o%^09_zJD~$pH zLi(d5&_0dYeMR;kQJ{Umye|#>eI(FNlR!WS1n2?*g8S1X5FlKC>H-zzzNCVR?3a=2 zzF__=HFIBa{dwXF1Xn=k3JC7c6IVdE{ya5vUp4a=F?uh7XeMAHRWpB44Em-$rI7xj zuuXer8B#$-_S-1XQZRpA4CruLSuT}8zexfCArPQ*1qAmuNgzPD{w6hZzeeqMMfRu2 zbw4nFR}6R_Z9j4Sec}oPS3u_q2=4C_S3tP_J~eYcHS-TC?ggTmfQeMi{6mU+DTVY8 zihI?T{p3vkm{t@(kOC%BG5%u_w>2n*^p7b^u%Dp+O3DR-6flu0I{mMtTuKc&DtG@z zZCl7L37);#AIxnb4*UJ3hUbPjc>tjXK<5ew?sG$&JOJVP+z?L@>@R6A3ULJpgg}6a zRBE^=#PFX|NG}Re4f{(C7l)Xe0D=@Sk&5xfA#P1l3hBkck`YDuutx3DkX;_z@-Q$j z4Y5lfrnX!b;tCZAu7IA?00j54u&XbS0O5LBV1>FjE7YP6~Ru$=$J3i3xj9H1c3`^$yb3^>YEUNwBkjfutlICZ~-kfBM{sdg%Q1B zBECw4bmX}uMF86XXQ}jamVgw#I(RZq zkb{=j5eV+9gJti83-p@cGr1se0j(JVt<+C00Md%r22V{20vFH{$^yZCEw6@JPY3n% z>w=%Sqfk8!Xo0ana94!UF5un?1o!oUx1MA{yvoP{qz)tXG+>ff zJ^h9dLyZF)wKs?CUt;ea2aLF+Py$gMpjQV7?)AJFXYnQ=)mONGG-i-COc5TM5AhgrhdhAOK<}HLq?}F#^GayWE4s+an0wTxF$qPTcQM)B%ABfdG z9L!rnjN2Zr>fRR(wFIJdfZjSlaNoxp`KA~^xV|siEyxtUKY45*NC6Y66!ZQN14&9D zeSe67zcbEkFP!b3Tu7J)J5Zn(XfdJw9p(M~@66nLJr+^>@ zOr#R%!>Okzh4jPfDfyYQQTu4f{%s~dqoMiH(5Dv_OWq4d(vPJv4n!fKk8wc8*T>Qr zFD2S-$q4~DXj_OdlApQFl_S1DA5Sw15V(LoqX1fIoKbjvu~B{^&gxLg&j8v<6@lP> zB2MFg_?b_37^yLUJ{eI6?k9t9a)68OK9!t>y|yUO(#I+U_fs*l0K^hM-D$Y|44}`f z6oUKdG*j_{A`99Mu4WX#gsU0lGaXzqSDy{Ohd?b#RRMak84%ph2H!NGlvLI29RZb_ z0!(@s1o!ri9>`67uA>JfqW}|b9o)}_IjN6HMt#1cBxRofZG)Ujhrs=O81@;vRQBl$ zoqkig2{7qw7~C&}`FfW@eN4LPjx_H8feUEqCKZBvN3g@(Mfr$E?Mos1avUs=0P{<{ z-fO=35i-F$)65D)4+FZ30HkB?4Dk#WAf$JO-F%`2#B}b|L@ST9yixl~$i5aUSq|n` zLOi;~Be>s`^5%jL4&uf{GE1Sz1eqyVinp5q3D^sC;Z z1!og$cZcj-E>NH5xw}I=;M^yHzL8=UAPNEP5NRJE%KJtb)EgKjAcfzI(h73Wwi*P2 z`%OL_VmdgyQTuktej0UfIGEq&izb!uJ7dT<12r3;rv#!}Ju!oz$HJzTEf z`yr;hfv6SGrvN}mzaRLRipwNV@%<3@nhzKBgAjAMK#&6Z1PBP}4?;ZQ1Zc?d6zSn| zBR>pr?+XZ0K;PH^g!G4D)VKBkA^l+-r6>lka&L$^KOjf}eF_7F^xiNhHC8-S5Tt-E5TKPtfdC=B-vtsW z=J~r0sX8Ap)hfon3xn=Pz=ia89a5m*M_&&knI~W(m3jU?#MA_(h8))+k80HZD`bC; zYIqcw|CJosqojsEC9Xhl1$3@};QlFb1%&IL((2}@M(w#}b_w5KQY?2An9nWazS>dZ zdQlk{)BVJSSU;MD zb(#HB6zI`lURuV3uaA~MmnE)1a0PU(fZ$$MMy`Nxz0A3m<)bCg4_H1cDUMy&IsF&hgzdKuG_& zN3v0Z3*Y<9+w1! z)2qvPT(T|bHEDPNAp~F&LPPmAX?Rd-$Z>eIRqJazLg*w2U?SB_^V%|=rUw_&*LG?J z_jSqS+BVYwdN%?>`nqItfRMf}ncPZF7?jyHu`gDF`NlH7=Co3M@ut)kASwZLfdIjM zQ)&w!T;G(2`bz2O&B-``&>)~slYmw_-;Dzh(l;mLIGQsEW%jlx(9vMNMPoo`5RSGp z2xalFrH3WJ6kl7$@bGBKdmT%7vko9C1@u!ZfZ$$NE?SyOsr0(?VIEr?EqUKc3N#i< zUO=DuD74ar9$NrH`qt87i=sS+Wq6srGs=4mnC~d#VXtE((Dliff#3@03IqiA`ee+2 zaJ@bm^Dz?WhGfh@=pHbUN}wB(F;fcZ4at~~kul#`#qDH)TTk=P6K+0;ORv^$IbU@j zYrl>@^VAJztl7NcJvV-(j-g4l%hcrI&@wpjro@H77 zihTYv!i5)Jm|tjrvS$?boBw9dsPUZtWzWb}ob#g;%hR~1pwg!{WsGOLyF`T zIzRf2&TF_pj57WeU4DUhUvL3n2?$*5RfIJg&Yk63oP3u4>{B$OifW`iqd&`uq96{~NOYtl;1N{)~V5Z~V6g zi}({vAy|pn8&|FuHlhc)Pnz^r1(GH8a_M!I{(n>Y))-qxG-;T+qOL$x-e_M=|BDNjr!aF*ZnpB z&>^#keVj3SZNYcX{pah7Zm+=>aA!DYKP~E~_{GJ1?lt5?r<}s&^JjB$-{v(WCT^kL zc0TmAeE(%fTvU89zZ+TJlm9CJb^hD@;^KM5rNw2%JBxP}?=EgCZZ7T){~BH!?kHXs zt_)X&oAX~5UnqW6d^7x}_*J;S_(Snu#h;38;o|VTaB29b@ci)7@bd7A@agcX@S5_rf0*B!UtZjje<1&0{_*@1`6u&N6jv8l6fZAcQ@pBpb@8&|Dh_}C zxcEi!lj1k|eZ?ikjm2||&*r!1pU+Xu1I{a7oWB5b(W%zygUARB|OL%d( zHQXMq4qpzh48JR06z&RFgfE4E3m1hihWCcAg-?X{gg+M_3?C0~3fsdy;Zxxw;kU(y z!)4*l@cHnCaC7+ga7TDS_+xQ-ct@z;_J`3ImiNsLh1}1D?QAUP-=bL2%=QaMw8E1{ zvSwJBUvmDn?Z<9z53X;2Iva1-&TZ%8W8|U0VmGxt|F~8;lC`sD)+`r|W$iGQ7b|w| zA7e0UewK3>zOU9lFjyZN#u3aNoja$oOGXWD@{HL&>c6P%E$Rp=MbA#ReHI@WFZwKAUax!9&t{#e~-E$6=T0CcEghN5}UI*|dl| z;r#OdQ#H#k7vn>p%=2=>Ouv)^enFiRnxXfkmFD_k!`orbW8x2sT0{a~YCL)kCfA`V zdoc4yaz!-ci@av^mo}v19_QNRnp<#@E4av;d2cODUxro!|5_xlv{=xoplKCYgfv#F zg0flmuArQ@OmV}~Vo8Vb0S_cB=sm}wO+Gj;vryC!#)p2x8tDHXou=`=pbGu`N!xYW z?ZJCa;WTeb7w%=&^lW_SMY~uCyw`-b?-xd=9qc|V_6rN9#j^2UVj6wkgHP)8;9Z$@ zVV^axPqr~-+}Jm5nR9-6hx7NG^MkiK2y{%Omc z^Og?hKRV}{ajuDTty$~Mxi)PXau4wDpx3Z1?^JPlZi*kYDh5@>U~{l{6~q?PlsW4w zx>#T3tOtzsfLISS2YRy}n6?bb>-@{4r*VIER~2vYDu%3zAyvUYy{j0Sw#->y)5ZGj z&bn@_>tfB?~L{*G5M|xK=GHsby?-!cW4nB*b zXCcUJ7v$D1L2h$FW}6_hCCKdN?B0T~UYMrL1z9?+AhR^W*t)>k|0J&pCAV!dZ`&)%%}oT?1gdrJBy|Bgxe_U+EKrGe0DwtDb`X3UJ_j-mwy zGBgsdY%Wb4{XH;h@Z z9af;h47{Sf+WeSo7?dd@_)I;j45vb|qJ8FgtES9izxPBR1p^ci?=<@tsc6`WEUf9( z6UrtuK^th*RPB^eip6dNf%|2wb~(>PEMtSHS4WNLpp1J%KK#4ob9k? zrs3xB z_B=+RH#tpr6xwj*Wv#g|qPdElJtEDTb7Q_qMa{WWjO`SY&XjMmV0qp&`xPMA%x4)A zO?ym%%zR9NJ!igMY#L+Q8yz3|bv`LYC0+Ww8#jRpYhEE^DDaRy_qB>?%aW$#O3;!f zI7>q@kNuy?%-zq{Jq|C*V%v#J3syWXUs1`oY}`c*nrhwoOwpO+P+yFQ;*xwaRvJoT)tUq_mF3@s?_p7=c1* zF~(5{%zgBQ)9g+#;7PA_&Ub&vJfL@0oU|W?SrH>$`dr%Ht8S=gz znmv_!Gc^1 zNC3Q8CMtxc$4-W@AR-KdaAiX9DlFPtyfvoToyhUe6yPn`F*d!ZS7STT^6;d_b^_C>48qzqO_0f8 zm>98tZg!~Qb89<9?YWjq_hcK!3XOSNFXcXbv-Y5Sz+&=wjpPgUX8o)|4tU1%8AGPf zGM>nI(5sVS3)0eptc(-lOp7VRFWY`5VtP~yh*S2u)E?h-;hMH{VZ5|^5mvUBndv;( zJUV&mTw#MPo2T&t!*LrJy?d3i)`wQ-l4OeIaX!pSwPgPCH8KXHNWXM%VY8coyFWxK zfmv>|sD}kwS!kO9bI|Nb(Qr$1DgyLgyjh2VHqe@W)o;u1{%M@AA=IWp$cJt7yAM$q z)v_p!p|X9%cuONBB@krDTEVc{qXYs)Gb9jfB?H<(Y^XK0$FU}xJ_e@~P`0aF22qVd zj%IE24HqewYfU3?n5NHdS!eCh#=4>~wU2+j^DUa`i>;Y9Mzgiv&bDS+bx&)fnz&8L ze$wW_JE3YrURfcNUv32jB#Tt9)1|eT6B?9J4o+v3JDU^2D5uW}mF+uxJr^ZPYh<}f zmne6KHG=_}q2=wveB|1on#$JLtYb_a(PEdHp}5_vuXoMuV_F1jKU4OR&$P5RqX29A zCA|%=Ve!*&8gcb&p*>5_Xz56F2r9DOqF&jYSsDd9;Sg+TJ!_gl0Ybl)c%(I1;<4gm zA|9Rw-^OR(ObA3?Jshuvw&a4wG8TWtwk!u(2#^4^>3rg5eC+F;N*Q@yPmhY+1c2!~44I?rRN!5c}HFIQ?1}_4i&FSzm*-pfw$q`5Wa~Z3(5p1b^+**7c0O9*nE| zX~$VzeGP}1R`<5YTAyll&&mmeJf44Gf~K$Orh)OHm(;=l;nWWLkE7AFO6A9EI?zxc z6thJP-Xm+(#IDDNHCT$ANVs+_V&L)=8*5qnZr6W*y9%_&OilZR6|F(3eXzIM&3{#S zxuwN=RBA3VYyT>CspnI(y4RRZ{g}1) zIId+o+D(xC7%@o>VaqPKauy(^O; zx6m^aa>{c`iCr&;6=iSrPI)SaO%bjS^4TaIQhrc@qA~2O1UtN9Z~BDSc|3I|Oer%4 zKV_M!SDuLhK%C0P4y!xQcHsxh@u3@dt-}`CKJv15{Wy6m(|DmCo3N0u;XR8F#`xla zms6sqQfRBarU?yu@XI~AzV}ja_J7xHF5v$!I=6h-&`o7L%$5vx*bLUa%EbRhBetAf z`#H?y#L#(r07p|Teyh#HLhMp1fBFg5lT55U<-W7_==kW%%@<8&rzwxeYYxmy$*Hg> zSew$cK_z6%MqeSu04y~-N85o}0T_IBEU%97dNbUo>`W`->RcO?Qww8}kamlQeOaNo z(bV<7tsYY2U3!{EG}%7lOqVc{^c*WK%1O!VfzF(2$LrnkjXn6Ry|yr-7xZ=YgI>?5 zk%51iJ>`R&oeC_q;qU4^=#!{A2;C>E?(02krgWoinhd>-uZHcQ(+4tuw0WRZ0Ikx( zRN3~7%nXB;L;*3^44Vf!IPMEO&nHZ^^fBWN(}wY>c3)~oU0RS_c<8Y+8we&3UDLIJ zU`tQyDI2ZcGNI7uV$6<)&bWbKYyBx+JF+sG#`x%+D^2~y8Q@ptf8iP6m{y+NnN*X0 z(-xyUIqBz$sW}(&$uh{0Chl>xk1|%%*XyoWN*7+VBpV|rn{Bd(ZV$@LX!>No%!q%j zdL3b$ZoA30zN-0VrY(96`ID4YprhknaDVrLP75vBYdjP?+N!%Y26?7oqDcyc)VY#E z<7${v+oA3iyW6HYXnZJCXj7gBwIj4cL^{AG0 zeV-1xPxA7#JekMTJ8^DCZTHpFTbxZdUEMsr!}MH?M->u#!hTD~xChM89=vwqSed*ooMN$UOTSzb9O6%v zi{j+f#8lJ^a@KubmBvTj4kw~1$R9U?tHa-vmVlQU;<(UROaSxn`dN&=PFi*UVvX0+ zJU%s_W1RVA?~0ASUg$u!PuYz-^dJ4~^ip!3T9FZ|i?C>)cTS5G8fJnNoS`^JmA1n< z$ByKlQY@G}#xe@&ymd4Isqg)d?b|XA7dG+yCAjTCdvJZ(^5Py9X$Yv~f^(J^q(Z4I z-xPl_74gwIEh?#HTXBLl=9=dsv#n{-P3#h_Axy$P85jEp%Zp)F?S0F+#zgKxa@?iL z+80RPbDu?B2E!WWs-MOcv_s+)Q_B+9(oQ9=_lc`2#R0{(9)~haQ)N^JHIz~OFxn-4 zsKgIZeC;YAKomdBEPgM2p^PNT<3Q~%C*;`I{?k}!If0n>OMq7S1erwCro1H3SYE8qGZU-T*WQor zZ*RX3^sy|c*C@jzn>I?YJNl5M;)z_$tKZ}+b$G#9d`eXII-J-BtkceNbm6IO?LRNJ z|D3pJd9m$3p^zb&VC@T+yP#%@Su#bu*o(CLBc`qqBU5&n20M#vBsSwOl)(<CGJy)$C>eO(MmOBo_&W7Y_AzpQ8Xy|JL=Ccfo;l3FAg%32_@|Jlgq=?hq=o_Q6 z@k5Z}-R-ZI7FR@)tnEi0Cu3Y)VfcZ$ZVbB4XO^_!QO%x5b~h)v+P7P3bpl-#zlp$e z!(9aa>R2~{w{N{SFKhezSvw-M{lzdnMj6jY3e3sAdgvt&0;gQrR`nt=07;NQ>Q4)2 z`6D_52jY%lB(O8;Bvbvjna^r?3edmr&}a;gG-mO}M`UsA$edv_rZ_K*4%;?PRceBk z-4jSPDJWK*U%VG|a$?tY-1kSV7>YHobXKO{w+{!AKXuFbV*yrZp)k z={2&I&W029$k6akl1N7;;YfX$^G;PAb&^9nRp(&EU|`tZk&i+S*g1m^!^t}KP}ug> zD=jfVySu?d;D}O9^$7*$OFhatcBCKR00AR4a#d4GZUxg<45O-AQethZcBIdfUa@g~ zq@E~>kZ0&S@PwxFI9fp9XR-jxC`EF2+FUY2*cTf=^w>OC%%_%j`FpdKj(bWj#Q2V( zx{hypCtn$|pfN7cqLn?VTe=aLzSDVs| zVis|knUpQN3ZF1XuQBhf!)F{@A=+UI&^rt=0k=7Y2pv&UGfd7MT46z(LPiufzz1%s zd0^=8cI@&PA;mz~EJw^zuUmcg{(+vVPhuA$6{p80n8U4Q46i49P@|kxugBw8S=~cu zMG>*bqR5?2r2-}wot~j1ZISQU5tD-hD;-fx#!hc$v7eKm`RJecI*JLuK()!2SjUZY z`w~WU^hH%iPxQ)GOVzEBMXF;;AEAcZOrt?=L(vAgF|&$N`+Ia_r0lCGOLE{wX{}F` zNA41>3IHT8z7i*3jneAu{Jwl^J zgThANs$wfpBrX-^;|j**KL9w1(1JNfI{1P(Fq^`5^vJA}B`_jYlM50iu0?-PPs%T@cM0jP2QY zR5i=`EF1b}t>g=k9RJ8Kh~06=j@=>Q=njlN{X=!uAzCc!a3D%Z8sWIsB#j&eQq%}d zP8vZ^01IgS3j4LR(v7qv)tN?)7@tlf>d2kbh*Cm`%FK1WnF4A#0)?W$6=ad^#W+s0 z@wKFPJe7$tFujfu&Nch4T{kvh(T|me<^VY%Sp%!Nf#XK{+IL<@FSQqr-XN|R37&Lv zJ!BMOO^1`}Fg>!L^k@Dz`>E0)K>x+9!NbF{=HSZO|3HAgYg<_1$s``42{arY>dm$*oEyS z%`I=8Ng8$XPD<0$?0ZUs@&~Vt*)c84(V@RqYyFke`M29xQi6j%{^-ZmclGuM?Qh@c zyUa2Iy-2gr0gk1A^k(l^W-Ue&vzJCqd%FgBd$j5lA8$v=x7bm}G{%O$k@I3y99hkE zt-=dr0z}HnP-=);5D<8sinBX>Qm&bw$SJ@!f7_+~8eY#?s=WQSFaPV8vlH6I7QNOB z0}l_|B&+7i1>(dsZqOD!ZFyl|PAJrkb9LB-R0?ZcHs-&e^qma8v~061Um_?sf%jEX z!aQBDaeAI|X{v`tkG@5ew6I_~;_VoEr1{jjP25$6VCehpKSDDK7BHc!s^7eIp=a#0 z%t4*v@{PcY&rvbUCMd?oM}dMg9<7n`(x@Piy-aOymnYjmv}5n)?}*y$cpdN7w4ZZM zt6y2^=71y_AdBjM&yIi3w7<1+{x?>mya}nuVoXitw*S=M4qHfn>!dhNz?s#Yf&*~_ zUFX{*uE)yiP=q=j#nsxQC&>Sn$DU(QzGV!1N1R?&;V6YTXFi#_&GAOx zsvf15_C0#memDqA$J+*Q8>Ci-z%{R-t2s zsV~i5v-YoZLwzRkZgmy^6R-b!r7}qSD zic_G@_CoOTbJw*OpTm#E$1U1oZ|K&-3OMx9dCIvDG%fkfpLP7sF}>yht>}~S#1`py zAFhLmrq5T~V1mZT>u>LIZo523U1f4SZjlN&Nv1dMsZPdL3|HAwPc8PhM@>0YufHcO zB29%y%jMLR@Xw-7FWjv5<4P#rLYnq2OH`^Jg)w@&pBNC4Jb~G=1U<0Kr_U0So~`Ux zX1taRIw)0YlVq|q%0fTktoH?;q zG_R`rb7=TIs=P5{!d5>XtI?Y{JoCSSKaTG`(RV`nV$-M+qEz8@3@cOoqA@It4;J(< zqIYr>4iZb|n(qRjmC+Lj0yxMTo2etMq_lOoz(*TLWl3RpdY2{A2pdL-YmM>?I+&k%=w zE=3ekUg~E+PLQ_`%OX$IkBUGZs1t~o%?*dJEY{ZGFdK#erTY^Ktz_9?myNH~2vB^5`E!IV^e9jSCw1gWT{ zODfw2M;!J2M5&}!;Uit#K#`t&(`3N*WdKtN$W|iXJTUHi4$ayopKq8Z2pcuk(VBIe z=WxiUD(=@YM+>L(7*<9)TMnBUWMDO-7RX*n&y$*vPMfCVw zD8I64Dwm#`nf9yffy<V9i@)$vk0U+^7U#i>XawDW!is`C)3uO@DFEj1n zhZTjHtmL&yU2MI^1L*mg0VX?~DT-oHS%djG(0-cf%J(vnDjFHT#@kErsfbMgh`HUb zmZHAC_A|z1U~F_V0I;Ufapt;Oo3?#ci7O zToS|Qr-trw$Cj#=7q<(C$ApPlpmlA;rbdFh7D{1=$YMuALnvlgVQkJ5aO)@`WxRw1ELOfF1 zDl~(T%B3y1mvpt}s{U#cBjB)sw9b;r%uQXpf${2&0=MxgD~U`W#b#G*PgKm8jb(ojjJP5&_U;oW{h@Cpg!n|E@HtMtiV;@9_~*5vG}uX2$7T4Q80Y zGRA+J7Bn}Sl6|D~V2we=0AAI_aiXc9YnD-=o>KZVFa2XIjmY+t()+#ilAoG4OUU+=(rtRYytpEkMr3E&K} zRVVK8N=JN6M2vLyv{QO@EVaMOi)$u=Z-}7%U0%F>B6wW{?eFs9ritK< z5wyR{i(4mxw?xqX;@2mF+aqXyY0*URQxUYk%ZocFf_FsF{?gSEyi0%S>WQWH6m8t& zrT0xlwx^Wd>!rVnr4iYlQu;G5{bMYR$o7=d`@QrMk2MmqJ*9M;9@EvaG$PwmN-y`) zt72(Hwx^U{>7}oUr4iYlQhK$QUK2|rvOT5r4PN^8SQ?S-DW%tW=}oaTBHL3+Z}iez zV`)UTrh-^~yQw!uf{L*E#*JiE?x$E4TKcFeP%seHwe*%5MwoqzDa6E{DWu=!!h z9NPm?3Y9IU?zHxkSdNg^ej2fwl6%Skcv@RILQ8Aws}f|d^udF%L7Jx|cRf(4#XY$B zy5da!95)gsCAj-zigRL>1Dbkw}Ml~*u>qG6$CFp?NWQk&ydpe1Z zcpwrfr?rz3(F;Q{(;c-P^W7JY&#K-S)h2~)qZFg>RMu6=RDB8QdzSB>)qdgKc$s#+ zEAMS@sgkjrOaIHvw40@Fn5(fNc7x2d{iE+r++ILiNj4$lh`;&E{w*ZB zadx(q_jne(?5iA*6|r=dO1p}A2U5i$7SFTduJS;ztEOH!{tohpVA47Xp)HA6abNF> zS%L_^HbuqCc~IOxRmE(DACL{S`rfhf&-umVr3d5Ymg}Zj9+C~V_|~MhELrle?7#Fm zE4Ss(0kLlP9VDSw?tW>r_8dNEaKxq0A^ANxG*ic*T(9%+fPd7`)xgvAp{1s_-DQB% ze2zwVb1P`UW4{Bxm50-FwwRO;4<^0~!P7pTlKjixQ9Y^gMA3Zb3A;Ch{M}Cm)FNaU zy+wVVgknLDh(nLgbKclaJ1E79YS+9N$+Txd2m3A=Enyo(o(YRhQ2irv@KpsJ5<}5tV6VRoRH2akOu5JqtLNqB&#Oupv~5_OFFysLUw#TAAQ`}dX&tS+2)w%HZB*Jh1rdCg1?%kP z8trXe^ns4_2_Q?{x6k*~Q{RcUOJEMH9md&2RFm~0Sr%4!2TRxE2jZOa?X5v=@@1$Q zN!F1=LCshrTlQ#HXe}xsl7H?+mz;Nw6zgXuI}dDo#wddFXtaut7HIFaV@t8pynW^K zw{NqG+ZQmu7g^5yFZ@=$*>^H8O&5H^1;;p67X?+_Lmo@k@>Blie8Xq)xEbXzi9?_Z z&S`HT-@}CmD~{*9853*0{fu+kZ@BO{@{Mq=w_lHQ=Q zSKAo^?B2-1(2ysNn6z-?F`Zytpt<71+Fg}*QJ?q&<^yM8>TbXNT`0GGaJ88)vBV>b z9VsH)&y`TBmqSG~!mvmiBF~U~4C8xuo40LvDg7X~X^9Ix4vHeG2{RtFWU+xAT3SUX zm8f>~G8=a)&_}KK%R{}N2AaK=(x)wYZE$D`BBzSzh_m1x>*hlYQ+0;h@jW^4=nzDh|m1gb>^o&(~5`@ z)9aMYf1mkbd|{ULu9Xm0Z0CMh2jM1*u9e2$biaY=7DQ*1Fa}_N_+s zb2lHL)VLQ19Sk zu9u6^YP-n`M3zNc+UnydSE{f_9<5)?vi_BQPbEK>`H~PH(0}c$T3?yv0|nnE(~^wT zfL3FOq9vEI0=z2Hl~r59gE)sY`5CinoF)ZO7G$Y@`I*vDH>N4`X?WFPT4!;2@O71C zAxD)@3vP>+Vo*rLy}Y?rbA7CC`1X);Qs%Y^(Q1C)(q2L3b-A7*$F$qjA`^g3l=|~p zGIrL7)#?zbb?k5Fq-J9nfmxyua1ZTpEH|(#+y1GY)Ljq*robK}dfc&#iTQWbp`c{1jg`WH~Os(eIIvY;g1wb1ceU%tsq z6pBtv6{)7Kurv(a{ZJfNUFyeGZ~DRQ3#U1)y7d41)2f#)?DJKKKF-{OgexA*@W9UtGGg;nE%FwfZp;tSS$eNIW@tCAIJUk<^ zn|um=`0#LMb9knR)H33?=iT1qHd0y1oov_d=ufUep+Ija7?hvlIlu2ww$&6(Yk?7V zO3y-4vQ8&VT=ApLZJ{ewe|b=XQohAPnJav=S#7^?A>i%zYWY%DOS8iaCwIzC=+!c4-yj?jLYw^XNf4RhL zKqfY^E4J}mozT8VG^6hmw%DT6F^MPP-miyah-yBQ*W}9459pE7BT$?2hlUs&qH0_I z!JewinRCMSTm#4JF^0E_T3O=6sY#9dfv3YgL&^lXOx5vXl8)+OZ4WIiZsX)US$tWx zC#;od2(XZ>#u|rbFX5Uzy1xQB*f`rJd8m5Z=;7f!7wv49|8kXpai)ohDHyL0N=({q zQ{m4*v7u$#PVHqeC9jsjUt<=s>MF$s5c(7f~Td9Rh9c@lSagcRI>~^ z@W~-HD~iIjkkoA7t1vZBQtyXF{lL^~Xa6;Tq*a}3B-iblW@jpD88c2=wi?i~ZD*L4 z{Xmdx!K{wIckI~f+jm;5c*uN+d>$&2b=tMmZXM)R{aFes00~w6g%rs=HsZ|~%UoA? z-H*PcN!fggu5p9MrBk9v>K%dHT3XxNIHZrBiDLut)J1v$^ z0%%d^J{+L4rTnl2s43jJ1>B1`GUZ4gj?CC>>UvE z0BwJ1x4Sf6P|bS4Jj9d6im-w}?i$lUp?HP;6)oQUoqOZxU!6-7$(vwj+#_RilfOo; zpHX6W?KYq3V-cXn4`{@R%3oe^8SPb(ZV_!I?`8J7ZB-$!?9f)#iM#f10qAF{Q_-Nd zlPD*H%(M!!ex`Ew-d=T5bU{T)^XuMo?!Bifm86m?V4u_F<3^LV`VBf{M220vh7^g3k8}^7FVEo^FMs%k zdABlpB%xlJ*Nx-?0V4zq7zHIjkYEBNEg&M;J}eLw=nx7;LF7k7d`3uP6uLzlsQ3BK zwbuS|&OYbfs@%%Y%cXM8*?X_G=bCG-Ip>IDkU;Ya~WYv~gg2;ScLT+Dv=gW@?`T<`bma{^yPV)HT zNa=nkMat|VI^jMHjCq}l$TvaX6K3-1LCzCH9r zIsTr*N0IDXIR0=4a(y@GmJXOgvcU!Dl6}47Z+=$lG}dQ3{&>elB*A&(3a%L?`||h! zT(9C~&h_e&eRUiG-dAQH!#@ij7|k(@12!1``8vb!Km8tAGu)fXWZzbq?2CLj}3a}7d82;0+$->COb=f^i1k1F6o+Jq8?XDn*31SBNGQBp% zgaN`NPo?`cKvN|9&VAj*1o@w99sh26+R&+H^!2*HwcQ%R1nCi^F1 zmS0^Q>6ba*mKUk8mA2ZQZq!2#%4zF_FtHE%VcZnrDfpAW?zDIP5c5@0jq-6w&@nk* z&TNyFJ~UDOg*Ycd4-O>L`zmkKS}B-k{aJ%JszBWDZ+ji2!HQ#4%(2_$0oGZHA*QCV znXpIz;TRC6)TwL7_*>(W1LKRfPXgac~}10 zFCudDRz-`pO>l(lEo?Sk>7C7aP+%I$103&}t6m4%dGWvPs^3Ii4vSzSF%qqn0OlBa z!%*scfMzKTj{AO)-)#40$nWV@Z>An;3OHd_om6OW!C)rczp8l6G3 zl)MP7;Jj}|61!FReG$UkLJN7#pJ+|)%Xnm4t2oSP1jbWuDd2B-#Ryte*O`x{YKWHm_IX{gn{Tth1ey5_eE5Up_ z%&%^`dSdAA^TPaCp{lDAR_JcGLWg06P9z%1mitGxLi_o2qLE4Vu-;v&K+}3JlAz?o z54}w%L2a32|NHIreroG||Mq&HFjYUP*1KHx7dhQ~e|^0dD^%?taqJ^ zA!XkD^0E3|4IMfW|GBU=(2@@w{^3;KIycW{c4;9PAchDr;lmV4vxrIxS1qDxXY4^BB3yK>lz}W*tkQvPmW( z#mwIn$1Y6i7{&fHENZFXud&6vD}TME^DI;rwbdP2s%+q|pf^G!F5v?;oHre$BZce+ zO&_AdZad5tb)Ql-7<=s-Tf{6?fQtq3oBxJ4TlhYuTc@J-ZT=t2RjROn%^9GLI3`yq zQDo=IRpl%9P)b07EXrWrQC(*{%5}D*si==fLw%iDWR&$j&k?q{bw=(}lOuRb*BK+9 zMrGqx*Yb)^^Ezv98XAyaU1v_bn1Bh+u#P#y+A8ojl52}Vu(G0$>YcyGiOKx-PfJ`@ zS5{#HBQm~hqnx79=1m`0o2m;<^>FPyOBl8nhQA%wupDop)!o*r>cUzMt#EvmPBb~* zLYp^On`)hXm0YzqDMqs9cnfWQ&qg+^@CrLrY}xY)t$AncON)^%8`Q<-Uwie?+w|ri zQovbZE$UT;7K%HQPJUAViL#rFX!SwRIQf-uSI@Hw2Xa^hKveL-U$e2TxUp%0RD+U^ z$NuyzM(Q{JL=x3^{*ai9Yqh{lBbjFf*PRXyr)kB(mme!)U?UeRTfND<^(G5}rCe_c z&z*VZl)b6xC_mNj_J#@zYN~?U|I$_@ml1(%havpgJO6a?QVL*)^8oawBqw~@orS<- zkNRN4I!qJ>w6V5xd|rScA|;gv3~vSZ@N$5nQMIHE$$jJOPotE{~M zPO=?WDR$Z`qC8JuT&3!gmm;V*M1|@?g&3h=c1)3zH=Fr$Wull|7dD8Aq$}2%|7Eo4 zQ&gG*G`CMFG9AxWR#DDTa?Q#)I(=`u_`&Q@BPg*+`+SV{RU3yP7L`gg#}9l;`q|U+ zzp&4$_OxX-$TufA8hij^t<+ggt^oR_RXnTcrARCjS44tTSoCiA0RU6q?1xEiK(ABU zlFt%&+}8?IN|i6*RIAjqB3IS6r;V$?TBXHy7lD%EKI-bdVD#_>>jXl4`{?>kQS;Ri%LAC=8m%4~&U9 zmt?nOB9}OU#==IkZIazCa~emG?0lUe*#&L=1xhe>uP8T~X_+yB=|CA16usC_(F;2s zEuEJ!k(4PW=sg5PL>ADDyv2`y#YR_qnm)f+-N4X_~#~Q3dS=N8YkH| z132bZltVn1*V8BWt`hS5G&J{d+? z;2KSzVTH)+s+^!IGg*b1z)JR$uTxbhujH_*$-R};RpH#KkPl|-w-!ZcnMvpK4j)Ch z%2%He_9N0NwKLKwjQX1K?GpR8&sLr4337RLZVt`#900r3KCwDHVuGw4<wlx#dpfa(m7DT9k2Ji1FTP_|Wv0H}mwhOV`E^rv+a1zJ_kVIMk2NhbnW za+M0nvHKbFBnR&F)RAPmU5!UyERfWu!&ziy2uNL|ek3p5 zg$DoOXe3!k;ee9aF>|7zr5V;XZpF`Ii{i>PXqS-O*?;7`uc_g+!%%k$QAvhl22Z}f#hoz%R zh`U_M9#3A11+)#V!;ltTg+wc8ss&E5O)it_HyHKo=$eOAo>&ETMDkFUNZux7?-<(U zL?eqE6BTsqFcpx9HUw#`sV7iCfq1UzaGpxqDpNOno2X4z87qJ}SfZ%h8(wNJV{LR@ z7Uw7-?^l+j_q~ie!w1xaUmvahVmTjkwX9zzE1bq4UR2?A3|^e=A+>VkN0AyBSneB4 zF}T3i=;b^1s!;AjR`laK*mC$e56a!Y&98iKey8v>{ItLNxwrAlj$}&=Netc&2}5qi z*E6{L+!e+LZNWaR!y%>ru}spvWh(ngcvq>bdx0d6ua536~o z!t&Nr3Hu9@)zvb1Ed3iXpn!^cylJU>R?aLwr7!@W3FNlG3+Vw+=v|B3MLQLyU(MZp zd~Jc}Mf194spVUD3cSuT@}7}@$J`v%sDPqqXU&m-3YmvC@3FR3H^-K3pZ~05)$cdE zfew09_0ug_$q|Wk%NfSj56{xiHWi3DlZ&#*PQbTZL42zJhJyGiPptFt*qCQL9gjIG zl_S$b6?}APSFWBWs)FzC7FF>5Z5y1WQ{?nOu}!|pLw8w!J_L58I2a zu6VvM#Y2It2W2N$^%Hgt~|ja89RHk7kL4@ zPeg_)(BBq)sa*TU5~j_*+x4oa6^qgc^CS>`ML--|lKi(niB6bj6K<9iMpYAiMH`8} z(B=X2yH~~KmWjS?&&<3f>eSM0&&-p@GgDh%-!+fS4>YXFEn8pTZ1g;&UPPwNwVa$X znOn`v?U({76R=5inC+N?yzo3c2DO@|V+s;807WR>*nVUrsODK(tbyDVcWVZ9KDWXR zKK|#DgHO-@#y$r=E^KSV4c^~qa(T)xI#W2YwppDt&W~dDES?8@bMX4{b%xjPRM*~u zynYMYb<&{vbE`F&Fbzzozw8N1^*1doKQKP39${>Cf3yo>rAZW&foW z%a2=J&yES<)-#2ZnP)#@>y`jajPJH?2eR#|FN#lU-MR=&x{E8_)gv&~9kLSZplebO z!JO@sWRGr6$(n(-|MgR;rc@P{PwHAEe!mzy|6r^$VDXYztoU}(q$iMuy6K`xF*cS$ zUu~iNywdd5Gbfg`_KI_P3$8`#&uCUO$hC-WDspo$q=pPLW-#!={~Y=?|l#K)IIW`oj*1t-z^`b}F`Pix>LTeiNoPcb}2hv@bxhLi5*QZ0Uh zTZQG^EMkKtHScqolqYqHVQ6z2Pchtggt=9}w&*;sBd2EFh?4O2K*Q5(` zC{8(ixID|0SR2w+cC+~)JbxHa%@(_I{72eQDZ#W+vW<%{80k{AD0{2bdpRBZDEmwJ zYLS$RdsJOsg*aYe--di!wQovo$?O|_&pW0fZrWL4d?a|(9WQFf9Vo z`7;K}vr|6@4K>D>l-s_vQ3(^Pk-}j_?u-yCVWLU!r4=*@FV$Ks4!c|9%98RI^66p) z#!HAuj$Anrb6F9}gk`P1o*Z0H`)1lc(YT6iDl3HBu!<@%NzRVETwNK< z!6}qx%T2P@YK~N1u=Oj$ijGWE;})(AxWhC$8>x{5k5)bn%NDMT=_o;%T_Y@DKCBGj z_ab(@l3Rug7A)+kiy>oM2|%nl7HBCdj1*RulNe#wII4Ab^r+Um&qA0G0LJshqw|}q zCyrXjT49!yX2}7HYqe83spw-hfR34{wIf9{`UkR$2&y+mCa^zm?tYbs;;f`gNE9$J zwa+J=tZ*z}B3Y+EPg$t#bLgsLBENOOs2$^pX!C8(l{|4K>!NqU8tFuUhJMgXlvA`= zJtmb6y0jB}szn_Ydy6F(l~1bPh8@kl;(>5%2|YyN3STi^bVmL;`&_IPL4llHxg|YM zI3;jk#0OQY$>pY}Ol8Ts&=7BBNK+)_!TscVTO)GYs$r<&1Ny|s8 znBq|_%;YEv5>%|RJE66jC;MbO3?R-qW{{k!IR7f2I=2Me;e(6w@9?RrX*b8NY#J5H zRW*%7zGXhLwb2S6s3t=`*xKyij{imv?BFq_fI;3yb~1((=l1M{v2xba2CU;4Q!OhcZ+2^E>Rb z%3IC~Z5FC+$m%QFywlqF!bMpM!#Ojw!5LS+2X#bV0%gzShRq5>o&W(F{ z=uL&2DyL{DCpz?YhxN8p!2yX3+GVHNnT)5P139@vn|rOzS(P@$$v|gS+sKt2+Wfw? zsk)1clY!1Q=Vv;8%vC9_FcS!KvJJrWbvBT<4WwCYy%og~~)Q1!wtlg5WvBWl>HQ8t+OBbuM zbg`7Bi(4#9`;w*oZCT0++aUaHmY*H!?AK%|Ec zqpi#HW;;<~!cErMBPjLWI(sc&r>wI#m?gH!I!oX4OoW#oKS zvg%sQ?yTP%=2>Qsv%?V+{sN!cJ3Yv!_D-BTuB{UmTP(rGo0r_KRHoXRL29XF(@Qhi z^b};i+BC4uGu5uhRJ*b%KFwyYVye}Qc!QK?q;9G$r=u{BZZHpx@kfnDAUH@%8_x+? zyg_OfEHzW@2Cz(fer6e@;L$5J9u4y-xpbdhP0Rk5&N&hC&s{;$Nc^iK9UXsa$YfgP3jU+C={21#{CrM>Qy4UG{VT`P=e(=+kV ze?quim}n-ZjA%v}&WOe&j!vlQm9j|0)3G;|dn_sFB?nrb~tal3y+;E#!s0PJz`c#3?r=maU@7tA5(AjqtWZk5E^C65-8Dm zDU3-!DC}f_4A;g(f6>f_*G|IY)(A-P{q(E-^wp1VFSwS6e<=_>q1Xfzq;dDhn5Zp? zM?E6x*~*W_g%s((Z0GcG1`kZS{jQ`D0(~_I^zaESR%~zxr-~fuHF)`cgnqmnfi8j^ zVZ!*Kx=IiZ-&Cw_Xx}h=NjT_ePX0sxCoN0%SKLh()!a?=R+S|OoGiIyo-DzJCQ(Am z(#>b;y(9@%HyvF-c>IzVD56)x!l-t5LEXVIlV&j!=cC{oJm1CS+D5N-&|f|+#&nd}*%w>Mg^ zJ1Vw`CG}PfnZLyKR@hpQT~+IdEFGb@KQsG76*f7(q=Akic@B+wp_W_#Y;m;L^|JXdtFotc$eSn znwaUWd0ixO3JQ{E=r7s!mqXDfD_|BHE>>~4#9exEae}!`*HZpr`>fiy3mdAAdFYnM zJZRPeZ<#*k0VeNU#3ywu!jG_TS6t3&!Z-0c5ucq}pl?=p$5+s^hX&d)b0ZvF4!QzF z_t-ejbi7k*?tyYE-T%*xnY*=3w8TtZ1)}qc^7EuFmbRT6FlnP`^uO8O_%xpn)2PNY zLc{KOUa>S5Q&2-Bs+u$R?yj41;Z_kt2yI?z%db^YUAD{=`GeXA zs^A%m-+i;=PVD^B2LwL?LncALzv|zV6epy(2WeV4(PaLisd~|Ya8?OZ?Ow|k^v!yc zUNq3Pam+V6%4GO9-4I-`e<>w&>*ukwM2}`0zUEOsj%B-g*t{eZ zpAGKR`h}`DYrAmflcI2DVd&KI`-bJD{L6;(!3vJA0Fk1NIDTj|t+;O)$N#cZ*V^0h zV$}CkC1iK5f(?cj+M0KXsm^NR zGf6|6uUp&Hi)EKd>f4Zj>MbokM>MqghP4Ujy=`rd=yrjId2-pQ+HOtzG|yfcJ2WeC z{8ZB=sud2~cX7NEryM2uop(!G>>B=t>_~9+?R1IRia!R@4T={PMN_2re#lOb$WG^h ziredJdwo5r6y6QiSIuzr72CfyDY{#xnr`na=dG`8+wOGC#uLM0bSnGGe=?KL(qTd+vQRyEfB-IKBInRCd0aN&^kvMga(&@xqN*HC?;--Sw zzf}vo61siRq0sGv4lUZt_H!k=d)(%r*A0=}ZbI^EB?ffA9H*-GVpiX2aQyai4mg_g z&4NSp_Vk=Lf>UwU)5eEfaad}lwn7m&Q`A=IxhXM9?-wEA(}TKAao!gu8k7$SpCoDR zo>v`u^V{`wEh;?!zt#i@N^lli%+J?tccB6VtT6?Jv;8>RC6m~D6vOR^$!pLGu7#jR zdOq^qDRBECb)+CCjzA@bIzL-eqZ$UP9PDZq5_zs#4*6LIp@I*u;xNcB_+$!A#~B$nP@-=m|sb$Jyl7G`Lc!q7){!hfwKY1JkCZX zrEKF$O18(QHbW(xTJzvJo91DPvz7Buu#ye1f0CSS8XV8r=73Y!OoRB3$~@p zRM=w4AUF3dTV6h^UI=058S$>0voZ+kD$xH#M?*6JoXuvXFT?RvV#T5XIP z_6Ktz3>1J2<^SNY%On(i=xHlxb6$F-oTm#;!_g2zO6A~eV}C#S+@-T@MY#Z| z!C65f=s@pi?t33>cyB1Aa|-WeOfJ~n`bBD-S%Bhe#4xB!910Hbfgrhv4}e%FavXh5 zxLGyKplN7IzA6AmO1uE9E3GCFQYF*`67jn1Bae8i;%8NW%BRi+A%ygPS3Lnx0i*Al zS{C%8Ny<})E}D{76mpX!#Yk0J4p4&7DJVbXp+s#uFGaVAvVmr6*+&Bp6b&45m;6Z5 zCF;WjdXe7i=h|A^T!+>RmDT`T!0%r=IHQ2Sb*k`x^Hkuc%uNA5!D5?(|BMoT|I)$E z0{*ta@6?ONW%9Rzb6lTsj+LIdOg#P#gHCdVaBP+nInVBz#sAR@m~-dg^iH;?S>I>R{TcD zRAP$}u+6Qz;g<+ZY;K zb-$LDoUANv5{OB_s@G+Oh?6Q z+x)QCF12*xY8aax79W1Zju$i!lqP;;is`WzGyALtd8*}}(!_%-Vas~oC z71zP`DV?qD(Je5*|EchaJ^eJ={=XPetQzf(GEuT8J9_8mwtk#s3XSQ&`_BlnTcurC8t}Qvt{^vh0ey}6|h#h7h zoYP$=iIrQh1(J9>6m?#x+;*t<6HfMS5Q^&ZFT9WWQ~yN^Y)@X#rI5hgGNJalj-7dt z)^nJj^4rjDp%xa_6K4O}B-AW;ql=X_6bI-kc%z&D(igR`c1>uq+mdT{sZhjOBt@PL zb)W<+*u6>!qZzlnwR}2x$n4LYwBppo>FNO6lB^5ZzLiSRhsec5>1Fxgxt=cH;|^zLHUksf8k)^Q#@ zJ4uyzoPNZK1k=eaI+n&x3QP0jl{&iHzI9s{la_j4`eoa<9^}pw#Q_iFMV2e$fxJuvCnFC``W&(Xrp~wX!FWH zR@?1;+jOClZPVrU+Eh^lwgs|J6Uccorps^MuXT0ygxuUxr`xwyECah&Eg^B7!>;DI z-QbHwaM@#9n6OcyFC7D^i+UB}El|9aNM&5N)b@)k?bh$$>Y6G+!8#ybMG84e9`_9a zx|WU28S*+ikuwtsVzvm;e!06FWc{$aSs#fjv9yL@6hE zAt6(@3e%8*eGxZ{3~W^hR7$RLU{k`HB_TIeD^TIvb-&CkEmm144bW_9<-&zcLqn)I z`PYL$1hlIi=wqI`LDnfeb)2D+kV3j{TI|xqY8>DKcZovesU3{|oyC#=vm`6z0d`j1 z$6ohAnuQWHNV8DZDjC#iHYkgK@)FLK(plmqsX7AX%qwSLD59N0V6r=y(=HFP;vhA? z9$F$b!fM~0Q`EjQs+jDX)48^c>^Lv0X`+4N|JIP5k{LEfcFO5G-DIasiOoR+@~dPg z@~e`KF~#2h>FzhZbvBoT&O`tjm!LjXy~FBq01H|^WhImN_6v&*=NU>#uQ!F zPF7QQ`&{(m-oB@CEb+DOpvI!R`=`2_I$CYNgKjl+M_!q_b4R53RTH55jCB{zRugoC z(2R7Sq0=jA6IVd^qO}PTlxo9_f^hemdR+BFace5%y4#;N*eGYq3^tZw<-Ro*EwRN| z)EKcdl&eLOQYDM43ac-YBog;EM)n#%RcWM>vu1Wp^;mcFAW7+3>hnsBY+A%AMI>;j z*BhtuQE*xV6^qo0>{mbIk2*1y^>2OEiTSxHMyy44it=XWCdKFhONyyx&GfG>j#$|p zQfsWs>O*>wowrICtb{W8Y4RAE@?aQ)yxK?qlRAt$F!))Y)_*;BYk7IFw7jAuSeLE! z+$Fe8k)Gv`%>k#a3c)W9C%hvf!N{4gXoB?s}o0{ul+({q=Q6K?#LUR}Us zmt3__y<^roO`mtD4-x}?5HrAr=W=$1bmhHk05Rm`y`s<&uD zNN8wHJz=x~1ncL%wF2Andmt z)($4BdfLkGh#N}!H&?{I(<LNcx8Q^ zzOt-;ld;xHq?whPp01j*UJ>}JFkAV}e%5(s*JJ0Ny@k(3JSlJBQ=;I-Yoh@_za6cq z{#kyEPwKv^wDt-g5AYKQUH%ZCJNWzzpF8>d7@uckPQl#;#P`tKnOwh_&$IZ%x;{GF zyuV5=%XS6duUyG?;n32`lq(JcXKn0mjO40Wc{UZ{4)Z&GyufbNYumli@dB#dnNQbe zo-V?@*XCG`imU9KCVn?9oo0j<}MYcXu4ZA5^Zh0ix+T+>;QA{x%KK z@FXB&7Ixn?N~=Ui>);M<6+XFY36Ps(++M9`r|Tl$EWJ1i>k$IOeY?ZqtX^E5m@`MY zMfxF7T43Ed1>ua-?3_S4d_A<(nIanM4L1-C1s+eHXz)HYosLFv=B6pyVxl?wB&LWO z(M=F-G0~iL5)d&95Di8qkc3zLHRUJ3KOl9 z$jTInpjjG;oLiDel${HSaQ)mS_r6WuRL4M6kcR;c*o{1;w=qxI-8>o7#uv)jq zQ!Y$|m(56zr!`GPsZO^_bz0@tH^ogRU@^hAS#b7l8XS;cWhJx=Y$Sn=$gxkM&pYBk z43eyh0l6b~Zg7>X6~j%ko;Pi=zt(BDpRnM{<@}8wIkt{>t+Kx^iV&R%CDjssLYvQ7 z+u(GUDITP;eX_UI{pV-mKYv`P59e$e<5nvEEQIWsXo=P2CR>CHR15w&m1WIzXQZQ4 zR+IIZPFqLTV6pIFNhj$HxZe%ueDO7=#Ra4QA=1EALp*@0dV)a0fhNkjoncin7b|AV z+axA<(=EjWyRxQ*@kG&9fAI-T>RI`Jx6j4ONETgk13eOrbhYdFD);M3K9FvgxL=rJ^QVE3?CgWmMvw8h&V85`dk7(S zeuB@7QdL_*nI^?vh$pY`wA6T-MWpbxiLVh=)X+IHwcu-yR9C{|-^5F%`P!sra#Fg| zt|ro+p({~OwF#mv7JI0*o7Uc#g(`z9btd`+Zll|?zHuGS7=4NTG8s4tyRCaZagI|^QIzNo%p$;&0DQal_}cV z*il()M{GqT*8~W57z9PlILN=~?VZ-!paOvp)=IM>5EP{dwA#a1-#-QQN_f*!m6dBn zhc+L!Hq{t=(X2|C56hV_l_biqSzB(YAn}Y7THgJeFrC^7EqA(YUS;!F?D$Y8@_XJ# zgQi1T29-lvP*@ow5oz)8X5+tceoo+|DvOH%lvjsyv7V;#(NX@>oa&LMml5#=)1~0I z2!6K(M(K+r;b8bRO?s$l{|4z#Qg3vd&0GB}DvQl*^QvrpkYE>7>z5v z?Le{aE2XyuWkH-DH{uMThbgkt4iAu?F)1kf3d%%?R*p78rs8TRT5&o|Pn;i^T;!ks z=BvqbO>^k(Hi+tATqt7q90gQ^_w0nj(`^n>Ocs=b;M>4Kq@c4PzyI-Lthn6AP$+GH zao!P+6NM!3^-ju8gF+wdM7!TLMQ<_xL;Lhxv`K+^ykKS8YL2GRq~=$#^7p1C`2()N z38t|%?|zLeuJMa;+zMv1?C>~oHy!UmvGdU#*krV2J3<9X@xA!6J!5P@C^{|o?Z`+! zSklL$OY`K&sBIk}uSB(2bi{KBLFJU4U0g^}Ea6&OD~TqW-C>eZWS?Kdwc)3X6=Df$ z$;x0Qi0B4Bp`#~ICB+78De>I;4LbHf?;%8U`0Hv32T$1XxN-FVeG!8a0kPPqAfww@ z%rwqpjp5Qg%V`?pF1Ii{4OzJMF?Idj(Sim!95&m!%bLh$Dxu{-Nsvs zAQX;=tSnASG)83U6T1lhhZ3tkn3UBAhh-KNddiVJxqckoJ8H4qqI^B(tUk#xzFmKD zG%!AZXo-ifvIS2ahMPxrc1P@-f#Qm;cwgXx`7U1Of*Z^GWGPTn_x%hy2sdh_xe9v^fWMm2U`={`W5vcWO!!g z?m32R5B!on(Esu|Hj$miiPVhQR9-watb@_0l`o87sZLLA=L?6%ou%kt0y^5dKK+-o z7_!cuF;?AZd129QemY$;JvyizrtNB%0}`0srb*MW6&;xK@K?o@QE4~uktTKr?3~_;(xc?4#e!!IJ{C%q-?CM9V6}9I0tfv z@nZ(LTKR1{*%Z0!-s+i$Kd*bsVa457DYOlk1J zcu_-GoG}DDnu*6;C)DxpHBTN>)y)>x#$7{3!|YlRZAb1QD8a4q-D|_w3E^|bty%1{ zo>{aS)a*&sQg`cNJ=Q{lMzj8?6&8EnFZQYB+YgJquf^UP@dg&VnoBc>Urn|Ee-Voz zXdBbZpnfH5FE50Z&{LoJ;`D0cD)DlYZ?dEN@}1K+Y%wS}U2Ce%qGYi$Q1q;J)S(R_;ho28sK7YY>a2A$3a5?HEse* zDsVe6nSfWwKolpu&R%P?@7d^}6v@V3DtU9VB-luO8E~%Helq|5rlG*pz^}2jM*+Xg z9VcW3)(*D?RZ0p5<PH|e?BJ^R$ z6KNCEC@djMusvI3ROzp?we&aOL@IFbkp`CaIOa^Rv)Pjxmw%!Ga$`-S9(Zr_uM?(1 zbH;D|(>3l(^5X#CKy2F4S3K4mFG$ucaHebHG@3-~%$~-Pb!1T$P^0WpT49CYoW=1` zlv!=`+cv2PS-scCC=~j5Q^B)?iHk|*G)WywNx~K1mITwKy=NhogT=|d^miyphDa{K=h67-f<&y zuSv81SLMQ@W5SDjQXHAZH1AmMzhm2+HHe1KGw!DisSUiQ ziE!CCJ`&S9-e=I#hZ~cV3c{e1BEe z>=2e1y6?XtNl1Equ@4(#``X%vuDLEG728*~6<$QM*|Tr9eMRyzq+aG ztL~QDSL1kz>0lk{3Q3kUuJkebX?UH}PxW|uK)n_lQFUF}h$=3@Hll)H+~KfFw-Ie+ z9Z{a1u5U!M)G1)xD7K!0#p*oCHSfv8>7G6R*bF7qDes6x z5CJRom=`KK<(|A#pkiWA-U+DaxIH-*mXqC+zp2}jJtNr&8LT>(M7x-OSU-U(PNZ{! z`nY|CHo^pr#5XlqC~Q*>(g>8yb*{WxGgh%kVH2GoHxYu}S{f$XX;DpU_-fk-DNLkM zPRO`F_nuUNdq?K^H9{6*d!q$MV`rFRv z%9`fTX?3=%)8bF_e%W`J)y+)Il+kSN#7r(&BQ4{wrXnyy@WR|UFf-*9@nUC#{Qxo3 z<(VMPAVHhHDO;GKWolz8<Qw3*tPiVv>GKOy9oR*rZU`;5aXkF_Q^@U&Rv4H_M!P|V7&JUUYJQJ62VPK^_S z6{M6$hn+drDNo0}Hk3jxvaMBTHqH|087`g-M6eaX`kq$OnQ_MoS*?r1f3#DU;NWQ(}o{2rl9!IE-5sep$ z%d_`G>?~*QmAHqRwk9hh%gFVuh2*w1gCOdHon?xb)lGea5#4zmVlb`9LoU*K;G`1f zbyeK1PN%pbf1-GvAL~!nvZCqNWrZVrp;Ayvc?``-|q5CbjopjA);byS(gzh-Ee@Q^6Iy?T;I_c>yYF z+jTVIEkqQGsiGu@zbf40P!6snoZ&`t4alD&IgflC>*BjYKDHg(N!wOJaF<(&%)7o? zM@)c9h9|?<>+MW91^0xzYXM|Za^981$&D=hnAwY%J?3uAjwII*l0EQ*M7PR6d&tja zrw#g|XC(ykG-$Kt4fn1aU}#dz9)m|l(4KSWYlKioIZ0e-gwjsg@;i{H3H6bG!I8srq zeNh2@^l!b;*iL&s+nIta9=_3eX9DHVVLMZe=dzv2r-fj}B|EQfvx~$sj9?&SHgS3> ze_e}V?hS7jkR}q`RGl^`Z4<{NG{VOOvErP24Jd89$FR)()-}N=$1|YISJ>ztn|;rG zaP+jH2^rFH!tpd?!VL^D;UuIuWzh0(H-05xNAlhpG4gm2nOqMt1HNMi%J*z5z%!7* zEwSqpM>b3DHp`XbdFxbCmPg*;ACc-7D~wS>rT&Qa-cLtYn8p6mvT_Btbmw~ zz*(__r^9O;;@MMy)o(fbW7s(g@%e)e0Go1T@@jGwk5;XitH}+n4{S5sa1jcSdl{Kk ziXh{uT^IB!mZdnYmXr(a)AL_Lc2BYhE;nCokmk;s`_AU)o33{A-G|1}Qj#3V8GaTy z>qLGw=$|bd;fe}Sh=wn9)Wk5FR<>zv+>;~FIbwpUHZ{anuv`?{g0KxnWO$r5|aCQ{voHjZkurW8^2(eR>G~mqf zrABSb-WjbL_Vu=7np%TrJ4V?Ql@JHgepD2I^U{kPb5a;;U{3Ilvkom0w6giXth;=c z-=}nY3yXc#upI8#dB(0Yjk#=|Ez}6=@wHT(GxOR52c*Gw0SVka}r6U4b;b6(399~E7+v$SilNBH{4rdg%O7tWUgUuM0X zi3IO^b|EzLAJsmCZ92+1+RdYa0DKd}H%ZxaX!yia9u4P;8vc0GXt4Rh5&yDBNf$N| z|7M%8a|G$1qG|hj1`}nQmWC9F>9OZ*Si^qXg$%!L3t`!L>abW8 z@UNZ(ELAr1xZ0UI^^}4=Gi?(MH2Jva2S&FuwnGi$Ven1FYN5O{><ohB#Q>~uedHY`^jiaMiQ%L zN3jxZNnerG9Vt0w3X3NgAVnP}o>)z;b}QMfu7`uWrs^k?>p-K?JH3fq4lzYPOonv= z6uFSE*|hFF6`(TL**g`HQtJ7!i@f|iFil1@*0!FGz!YNR#^X|q6o$)V7Ir<$eL?=j zOxFeuN}CYJ(OmGw3@^w@Z9Qj2f%t^t2^yK)$^eqZ62%rv5d-Yf`B9&06++N5=SqSzyKhP#|vn9^AFzP&=6}ZkPzMEJco-cjkQKp+b>1CL6j4zblUz5b%h3tvH z>P2o(dXXFazf7UY*wBkf;E{S!CW(B}j)Ll$Bpu5nVWHK7b$>IB+ZJ-fbSDfY3S7nA*$fa@EdMMkuwbC|_%%M6zHLE#M*VQ+W{1a4Kx*31XxRe)> z5(o3Vm?|z16ThmU$#kuMUwbprhm(vLN&L!9ONK|x`mFA77<_Kx@k($IvU->>t4Lft zVZ89CCZG6rMS!9Tt|r$`K5>)9jW`BKP~K|tqRA(2tvq4z4mZaWkDQkdox`$Iaablm zNbMeXgNlEEUTQ4orJ1{-YJK$^tL}!X*Vb?BcQ+QNZg_905<{vjR+9s6SOdVda?K!% zCrZA)nq2Ol;50v;ps@4g6Z!3x!BZ1@HKBgv#4NZ%#;zmkMXMsyfqP-}@sN`=l{AFogkT@p@ixpi-#;kZv0u}*^1&~vFn&*68VMT?1Qz!0;1;KvwbbjB7 z|EPMwDp^em#tql$^U^&dG^G4RYhz2*Xmi=+9;r?EBVQ@K&X!t!gB(~?)O$Wb;Cxvd zlkd5jJoJrY>k$QrmHoMzJm`M4GKwOQHl8QfQPV;_j~CQ})|YaDkHe#wtk3o2Qc4c> zhIiWj)Lj3%qyRRQ(ChuT!}&|rsXz<|#weq<)Gr!7pAvHXW^_Hz5$MQk8MT%c|@Q#kFU$p>nY=+qG*&JBv=rP zrTm1Ph%Tf!sKVvpJ^sq@?QW_7u0DzVS4!n{pB|p&c_LqwkJy<7DKlXed!)o^95q@b>^}dIhiH1V3Vc?eIyReZe#|6 z8<`Pn7WIEWK|)Kq?GC}e`Y~XZBa3Rvcd=A-KIYiY_)hnj$Z4_XW=Tmc=V^>Mfx;;7 zB?<###g}F&3oAtfBqgy0DD=XUx?$_ArDexe-p~sDqKT1M&CD1jvfOOh@{+w270Zu( z9V&6FG-IiWEe&%{*;}QySafI)-aqC~I8Dcn~ZtGJqa^vpyrcFe@}=ow2fl?4u0`Zk}$k-PbX zOT31_L9w71)aAz?(h6{7rcRBEGd(f4)1ybmv706qr4@z%2nY_|Y|mvx_}kh9e;M>B zG61dagK_ry{|i2^2wU|9nddB_svwgu0(UP*;bVG=S{H@Ur7R!1Rp%STv z&?u%eKBP0=U^*lC2FuVh4P9l+IBy>zojLXZ{hJy*jF~NO1ty!55+)d09VWNkk|N_x zAx5P1zIW|{2>TVt;&?i{|B>7eP8zN0RaogWvPbMt1MX)_SC?2|t50?87O@ori;ozS( zb`O5!uMh#TI=IV2d-?dz%a7^GZodAUyZJ17(DN52nSE2?=Zox{N}KQJo=$BDT<{8S z+Gd1fu*t%d&XgW@xuuMsWq+43DBako)L$O;|9euhRdNH9eYPNMm5d@sIz~K-C6AzX zfY%8=>%&XMru1#y5D)v!p?9mss4Y1b*Q1n&wc-}~MKQ&2dnr$0R#11fjck&&r;1LB zX7mKlP6NN3T`AXdFJP}1%N382FoV-n zgqdmo*a$^mi4$fl8OG~{na!S+$3<&0aJ(>M!Az9LRb5EVgel`2#s?8!SL(EF61$_zul1?6`a_ z31|POEeuDnrl-eNwws3}P)oTO#G=aIOM(g!<#tGG84}S4Gj3yFDJlUo!emuhjsxV6$dGM{FfZ|>h1ks_tMs(MXW-0(TIA>#7x*j zDQr`@6=!|U%d)#k);~O6$a`9Zl2JKKJ6jmO$A}!JH{cvCK6)eNB=e4CdR&mYU>8z* zLAETpkuS1BV0r^h>O!{6vKTLONv-H|v@D7rs31IO81>!yS_YD_J=&Kd;UEM)>oH3& zh*s69I_OgX1Yyb(KV}x(MGb4T!V4F(rO|?3Ot;I~3cDBs)Lvj?Z0S*Tk23;e$ZE2S zQSz}H9iR{a0N3FWyL;y=pozTYp9!*@c@=Z9C%*a;Jwddw^%9=o0a!jrAYWJg>75uh zFHD{TZY?Rl2D>8HIvrV!&>3gYV2OBd}dVzq`1B0 zr8o7gx)hyIv4E;W`nAV~lg2(D8Wy+%qcVMW{gn zLSu0-Q^<--dYK>=lo^Xw1F>uwS@-Ecz{4d}i;0IES3qrv`a58cf-#1Dyj!cV|LUaV zeN&PYJ|xLmpclDm0@X`WU@|$%?wqed;uRurV>t>X1G?y>)dw5EAAMSB}1!FO9&Y5hj=VYIiP(j?l7XwJrN>R{44Y_ZdsROWqS zX6StPJsTqcF>BjWh%bot<}F!YlKf_;v^|GeGR0fF4KA;;ZZFafVi#*7(Wyf}#dP3{ z*ZTiSK5oAkpmpSjp1z2uuR_I>Xu~z{hJnd8k*ddQSM{Is;UEQke3#if){VWgbZHCy zQ#9}!=Bw&&`?gS|w>7;30SvpV_dXo_cbi0ZP!AkNv{rS&WER z?RSG`J&5r&07!Qp73(i`z$Lr~hEhd=FwDm0N9Ys#@tuOG{=SCCejn`hqO*{~JAf$2%w6}wUb`XQCc2gk4Dmz+M(iEVcftN%U z0{~ya+0QIQYhG{e^mjYza!+HM45F+5<2CkBY7T~FW_EZ&Y$Q3rfF;StNl>`J3R92v zk(IuPQY$%ZijVLfjp11)K$!**CMzF^NrxX)$L!_Jh)%6f#)z83dr%)Qhpgdz((0Lw zs`itA)J+sEhrgp9u<*vbtkk6hYd-L%XBW&MA*eV3(;qc!>0xAo_@<1=1|)J7nWEEK zG}6qD=L7K$0@)EAJg89>2zbyV#cA>VNBJ!`L4tWZe^8&p zPrJeWnBXx8L6D&_4hX#ECWb$X-Rn$0wBqT9t6CyAnMAPtyXvn0{)&E&G-!GHKoiwF zC|t+JE<uOw%zzZ=}2L$D(yi#K5`-KI^B?B=D3k+@eA-z^6AqPPJ$|i!QbxZ{bEOPmc zKZ;KH(EE>p>K}k~QCe>nMdf$uzr%N`H-X@$s6yNQ|4`eK#vXOhsaqB|I_l+3i97d^ z>n&6B4RGI|?^m|YAeAtr25@B2@!-6#3Xa=fi?B5iQt^;s?4O~vs`;%Dn$|B@{G41pn;Tf0En@{g z8VNzXSnR~DmI*@h%tV9+)DR&XKdl&1P*zJM^#;$W`ae;l zoUrINwXBLh{Q}WUGCzTK8YU74k4+@!e{FIiANl~4uZ8!nM(*H<&Txv{a9glhu;cYD zn8LX%_D^a#Nw4JDtxTWK)p)BU&gX_lWJt>@<-l(#6Dm>(sbi`xqc)X(~ zJjO8I^>?#Y@*l{*@OD0fjLu?_nPLz0O)$hZr%?7UV6A63KPMH%cf7v>yPHDuH2{^k ze=yc++%neMUcg|M5`(cjf6y6g8C|%!&xV$13Fw<_J{oI_3z_v=4)<@9RJC6VBVP$j z{!H(DiVog-f24Yft{{f4qkOBfsJ6fzf=D>Cj7QeT?Sl`$Cw*}iz346!^%E!X>wY!* z_k@bXNAj<|0{pD~U=;mOB=z;hYghqpzwsK5uy7AfKGLFFb}iAt6CeKU5B@lc0_RT1 zq+$^;?YnsGUqx)1Js9*i#-qjc%6mKWUchQ%8aw%-GgWCUz7uroI=IdfU{6pviq~|| z<}u5%fMG92Tc}GOANe!0*5TowiCE1Xvp=ZZt^R*$$6d2rxT?%QkgutvXVeZdq#-k29d)2JILL^d8bs5B|6rGdLyORv{zFTN z%~uOnFkWzbYN>|M+Jf0f#X_#&#- zM<>}mC=|uOqSByn-eaTa*~Wyft`ZyX<=5)NgX1AHJ0+0la(J;Uxy8%|XEJbOd2-ydqAE~M{u;LDwVc>Wfh)6^z)*x{ z;P~JrHkif!`H@tAq{R;-7YTHe2o{L|DOrC-S3DDp_oA$BkjEem=u#~4<$IwZ*$%h^ zEu0Tgne@>`Gu7M0#9x%nC^-r>FQQVIjeY^^9Kq+Z&L2Z2@*nK^iyHX=!NqBtOGe9! z`FD_<%Qki+$TZ_Tog;27zy(ZIVYx6dZk>qmUzm4~LROul*PA{6I_{YoKjWSY&1h`L z!fR~QTjPJMD{aiiEJLUl+GTi$8DomegGEHZ>e2kfOTN>+j<5E)hSuA0~maxskNWkuX2z6T>hm5!?u~)T)8zz#iiCGT#l$yF>^NvD`$R zhFWtrC`MEKz-U(9bm&cFs5B#e)oOs6@y2v43x*|Jkf8WX(1)%67PCWW z@`7OvHyaWKinh6|bZSq>8V*B%?%)t9Sf0D;m}wz>ujae~=Nd5!8pVX)uJu$J~6^j+iWJwUHr4JM91fm#Q)fUZJ-=#m9CM6|Cx{Tw{Od!-D;v|APBk-g_ z4bd9!c@`xZEq20_L3lbq#qW6z+(?jjZSDdL4PA5VM$jf^+P&019Y31EtopkmUJKmD z(OSdLzhr$qnlc~^YvFAJ9TmXmq{7BATPN0u!w+-h|xjz!eoaoU^SD&F1k05 z*`)ocMo3}{MV2EA=apF~KY=lRfBsppkgwozWb3d8ZpB6b<^?{H(QtC;0AKIX5n_nt zE0Hzb#j{VM2V(#^4p0JTU*bat*GZMsc53)J!O1=Zoh^S*SIB@fPGfcf^geK0OO3HIZ9`z6tEz*NXcV ziB43DtuGM|dY(II3y8&C<$iX%08`R@HAigisAMTy11U-9T}&_h{P4JM!55ywk(cHj zM+8d_^Kp1=sl?GpZ5Oj>?ShC! zXyAfI@|f#gt|udyuCzo+O!{a8tu&%mey1%;Oi<;DFtt$v9)u%aT$qi$ScTa`co~%#x%X^Rig&3(;Xg5=!Ome<@AcWjmx7d4c+9TbO|ux)h1M zs0n6LkSCbmyYhD#)OYK@gfO5ikGp$vq(8n*WYNS5XKxo8f;H1Ah=eKRQ+YT4C)ugs zP`sn|1PT$FQq^GzwDw+bANtPkHK2HO8UXdU*9C!MCz46vY(9u>yhUxaiEL-R^O9Z9 zP8AN>&d)Z(m|g2VJI!>Da3^jNJ*qfIx*LkI@a=*qyEsyuL=51GO>H`sQi65GauHpIaH$> zL}MU5g@b4sr{uH+(QGs`zZhemH_j%@p7AyonMFdTnr$aY$0F+KNQdoHHG2`1?MEP6QUzjlfdr~6D{(jpa%s$$^feU8aYTL{5r z`}vRwK%ttw7_&WN=ETa9G5?mJz|fEck++M=V5di$x`g*?~ z^Hghq6I4h&Z2|T0UTn|0gaT>2HLda1H0yZYDj$R6t#-|LEBDZ0VDW^bJ&O;zGv3O# z3mk8SrHi+!vE!{V-&}-I03UQAK}*KTk1FGIQExh8O)^(bYo5=KRe0xzq!b$J9i2$@ zv9qmHN^0|C#3M9;sKh(&FNnPEWfSGXx#875g~wBqBpt>|&C|#35rc1_6A5g9B*g@j zm@(9zzw?7Lo)`V$L+XDsNm_N;?n#Pwwg9v$+xe|48X^Klz6A>k^I@xk63J3h6Ty&^ z^aGS`^KWg89DI`x_ysW4ge|&Wh77a*XAnxefl#<+guL(_eoIZZD#=xQ_MeYSAtF8L)~-$nHHW+ z>b;I;jd8f%t@<4%}0UcpOZ)3deVF;592Lm%fCWs?cVMm^J0+79sS!nltuX*_Ty zY>*{$ngynhmc&f^-h{o(MNHT$G#NM(_KJ+c%h#i`Q(BBxdU{#cXyQ!RgplVccdU(B zvIzn=OSW}w4=+eNmkE2Nv?7m}@?gT|*d{uVb!ABgz2U=}bpxE4!((hVQNHrT1<~SR zVK>-GyO%D(g{=OGK3uLKA2L82mn)3VcDAT1A;X@)3=XLc5wyc+rwXDN-eo|p%TEOSqh&u4%l@)9E{x?|_Hlgd zc|O}oGelw>ITzF~KA*gQy~b_i>`+stRETwp1ESRS2CsceDM=l`7^vg9!R)WzyE)+J ziYI9%& zWU2^k=295tc&M(v%RZl{tar^Z2XZLYjC?d~w0#iTKTHyt+$9<3wJ2dod(*}IGr!7O z8Gc4{&rCzlYLY*w^)mc>ca=AF9oa*N`r+o0CrQ>-gTo9Ih}iV{Md7CZ_XvlSNtFB0 zub)|Ce#N#+>g>1}qMgy3hg44Qh7&*-lBxjRJRr8d40ougEux|7UluQ~wtN16aoX!8 z;YaneqaR&{A9G3z-W@v?Bwug+P`rp|KYDfy?D-cSjDIMOns0s8ZrR3Yveg&tmKMcS zhaa$8#@#1dJrL<#Y~mA5CC=rzq5VhgA#@)ea!KIR@3`NdaZaNeV$d0URA&E(9gmyj zBebcJa}T?Pli!HYjgKlH7;ngt{7#wA%!}7QGL{vWse#TQc=OD<3VUu#%!+%SgsqsK zv@;SOP|Wzi93*RFxKqN!C^ByHkr32VdDoeEpmkHJ_n*ZkkUzuXi1HDCH`S=OmET=J zNGq0*5T}E*tRO>u2xPcM*U^(X1!NInFv^cuV=ZU2GI)6*7eCu(>1w^wkywAiD#lxK z2zEg*b$<8lLJMnEY18e?c%^z`ZJR$O>uSfW$O#i{d8UdX$jk^H%BNPURtaB7M~eA! z>vx{F#(({9TKh_0}hY_6@cb=_p2o5*a-r5hw_B2*Soj(ZKQ3;Jh z5P8o7uc2Xne*S@9(C0_v{5!Ym%SY1TH^mR(oh;cn|Ilr^x|+Y{m-P9TB)|9d`tloz zdyE)5W2Y7{2A>8JVY&q|<+s(SHh~Ktvn#<){`jAci9%!Tjy3*QSDzUwc1R~Ha)zP!cox!Z3#)=C|GgQJ_iKO>-Fdn-MoHCd#u{DMe5`#ITCX1!+F$$3--%%{C}Q zMaob?gb_RLB{+32UqTb+hN&uqTv?Em8=*qdU^xmc*FMIgPJ|p(cF84TPOxG~IghSK z|A9AvR4JTP5y)c%j0t#d)7I(kYK+Pr(>80`zG7Q zC0yB$JvjKPvPi=NeixY1sca`Cw-ga$!-l?X$k1|7Oii*Cim4NK#GnXC7VinJm?vkd zwk-dHZ8x5W>Z5PK&CkoZXI68KPPlnxd4;fZq2n`JejB6X>H-}OF{=e)_7#XxnCcW_ z4E(G^+6iWOnVSfMP2B*Rr_Xx`HDiq5K-}Dnxp~rzxp@+ZQ{b>V^P45UhF^5NFvR^b zmp(5CqJzx&S1_7yC2@z03Wzc$Acc)=zWzNzVtYrOPmkXav)kBe6`}i3)_HBDXiP1K zG2aIX7cXF$;N}y1;Nvm`^nm?te-vjdMr}Eu5mKV1cw&+0 z;v$d`UDFjRfPxT-EM$LY4r@zI)}H!!b zE)8xW`nvvWV=7bmJxu8z05EZ4S;g@0e@V~_oz8xP3Mw-kdUT?|@m+M}BgV;Yz@|sP z@>VDL8$zVD_O}U2kI#vW)G?j7j#bVgDR*0G|EmdszvZ?n*=L3%2a+b3ccYj;Gk_!O zNV~9(u#N>a^0Z<|NWdzrfMFXLh`e=6Z8nsf>x>J+FApw`?ZY%X`eLqCn-={{RrR5$QDB#ksZRuk>#^z-B(8-gGEejxNORi0>bse)M=O1dkdkMQ2+gEZyp<~!CkqDBQ zl6_r!u`YpA8F)CVmY1OM146BTZDfRz@J@%)pq)6=De6mYkNwi57OU^EtPxrOz6@Xg zRoY;_R3bX;mj%6)bR{D5!CWiq6!FO%=O4aLtu*2Iie4pFIQ%OSkSODVXfTQl>wH}T z;kKxiq^cb|QyjZ1CaAG{u@(yFgPDG3x*n_pqm4Ok2UFt;w8Z`AMApJ6MkKRd_zEAlu`tNk90Gz{qz}QA4j0k?|*ox~8f5tcR|8;FSjndeNUD}1&K0p%F z>vPe>ReUq9!=UKVh0|C_mjJwi0H}OHJT1NV;NpwPAjodM3-l=xlwGX_*~qHyIF#FQ zW-SN@*Xmuw;u9IGNHkdi*l3i%`WJ!h@*PDt+Tkoxs#z=BAXji6N|)PGXm2Cae7!?{ zgNE>e{@rpY_$6J5Eehi+!-XFGSRV#e4bge$W+7EP%w4l0n+^_N0GEsowr*yrD6)xp z1&h3h`EzkeEq z&3XvjyiCu1V^X558u$TvmCa6Hy!+bi!>E0Nw!49^R;{B?Gf z6Q9wKF%U)ehP5#ezS0R|MDX$LyR%08* z6}rme{=W)CRn1!|uE`zSh21mpQY!mddU0G`8!e&R$I zqZi*~-R>Rz>2Y%SD4Qz)jgK78f7s0aa^NT%Pi$8MD~c6srxpGLgH;_yO@Inv=H?tR zO;~bBbS`+N>>ed+o-Sp0*E{=zbe_d&Vq+&5?-Wj;&f7(B~KBn>)U@>|YPlJkgUCrNXT zk_EO#pvK7BLMTc2f{7+EU$ouMKnr{5P?ZQX9IkAO{bcbC zUni^-9m)ocfo$Wsq_1 z!SLXRZh!x6x4-?(A9(+bFK0^3=MG#4-+%X8|MUAl{&rkZ8-|!=V0i_H!d}tXF6%?If<(#>m`R!?~A$g`>(6TEA20%RaLXr zEHTUw!DyPONK2DfFdDp}&;c-!36N&2N?o^VSNZvC`4^;>U|xHNlLX@5v{BpEzbroT4s9-0RMEt-luk>)XMI~8JTd=L8B`uTYFlZx*w}1MtKmI#kzW+5} zG>#e_{OfmpF&Q8>(5KzEw`ei9Y+WfS&i(HVlrXG3_%7Z`3}*o!k^@iNl&R-C%V#!hOd&8VH-#% zUfPXlo8RaKQx+`|B2jOqyn1bSP4;OcE`?BPnFh!80UvietYV1`!c_@Xm^JUyjbL4H zZ?_kQ{;~p?Mo5Ezf1l*82F~8n2F_O)mPuv-6ckwWs0QdB{&iYQQ=WwGw!16P6~2I* z?5g(#7$cq@@VoEYOqU-`t?r4P)s;x+()6E~y2FMDMVmOCmAcSaGtKKMa#q+5+jdD> z+POor7y`Rx^bhJC^9Xm75Cvn2`Fa7FFHE(lz%Zkp5-+e(`NZ32oDMeoURQiZ6fJ$% zSblRKI#t$Q>q>f%Eo*uLucfh)u-suX8QEv4bx3A2zUORVx;w3DwAqooHP}zvavEW* zCA<*ngS$HB_DPQe>NWFE%!)~mOt)|9v;U1}qZ;K>W8=4UYM?<{OO1MNpPY+2pr->e^OZ4ad!-uB_dG6_7Y9M-byUK zWm&5^)1Nj^Kbs~^KYeNXftTs0mlurHe^>r>HDAmJ*Q^b16|P63(e8Qaf$>1c84sFQ zm-KLpm|M`(tI0J1``!Fou1m9Pjuarxi{N|&Ejbu1ka?so`Ue@}ey7cu8htH3etZQNYX@^`r0s4^}(}}IP5;gw={LWj~zBi%(*S3fuGeGOLzy9>Xh|OU~$JR zEW&~|f#&6>#3;oSx0Fo`9s$~5XF;HZMgEBR*5)Q!Iqh4Dg2k;xfr$qf=g0X2&5Y6L zdFDNqnku2WrDmrxmuPnEafhwP9TM9+vK`Iu*fIB>VQ^Kvw(Q%T+51SRkbDzk%RAOG z@sCEW!Zf^ToY(v>MTt;5(@;k@M2tTou); zTb!{&qS?HC!6wA1ZL11AK8kN9eK1>@#W%EsY_IaHWQIXu-eOQ>g-uR(i?nRP;yPru z>l|ZCUg0=KGdE%RFjlsgq#&+wN)OSZ#Hdm}Nyop;REg<`;KTwz^^uYO*2RL3Ole6T3h%R1hh9X%|~YgLwG zVbg^NC~#S(+ufw7aFBwsV0&cIjr!>U3~mYt&lY6`$Am(6goqy=FT2CR68g!OvO%`& z4+oR)n*3Zoio?NV=x~)Cby`@eE(S1LIwTR7P!FP?bI$3K-CL=|gd(nRNiv9TMfDqDhcww$&+FEhmMenZ*9*q9~Zmt(IWBW9v7;Cc3b3&L( ze9@P4D=Soxd&>9YT|8z_fY5yD5a(0tCG?Kp%0=wGua_=kVAHw#_$lX&tJ))j`FAW=F_aXuQvABH;k{9jOZ>zFHav zU#!lp)wEr>-aST{&?71X}2Nsf`FNQI|d9?xQ##{)PZjFZ6>5y7`^ zuCyM=)KSHN%*dQdG7;f*KyT(zI$K)dEM`|^wg4U z{k87BmhF}-#Zux(j^oI>9p{gUA%+Q=0D;LcVPe`EuoHh+4wCv_4#z(#4; zefL(KI(6#QsdG-9s^VPSh?sgBrQ07E6#5-#GVG2s>v-X6X6cE%z>l~oPNUa$jEOPL zc*wNyp48PSI_aKtJeLFCWL(yIl(Q1FL&RAv;(INVG`KV_%Dr`$Kl9FXGFg$X)DW%y z!_}r9*jcL`n2w&s^J|^bN?OTE{k>;>GPyomm5e!Zu42PmvP#F<=@{5mxKZE{!;77I z8uWriT5NyjwH&^-yVxIF%LrZxPylf!jnH9Ov8$JcK{l>>g=`zJK%aT%gqFrwc-q`* z5#M907{Wud2h?DT+k0VIjs#PMR6MGbA>`*D zV$CL{1O?C>og`zW3@FV{PS!{5kdYqXFV-CRdVQ}dQ4Sb6Q*2gu5Sc?xXm+mND`Gqg<>Po+E;C{GdnbT9`@$H$|DP@ zP~%N%8YGvJW&K*o}MI4+39|#zJvfU6Nf?kdzgeuO@SgjCL+_ zgBh}?*BzNQ;F|!;J{;4!(OB{j?_frt|xAh zjs$^1{7IWGy4eoL?yDf1A`Llqn7-}H>sXPGT#Rk>2Y`HgAxP^O=>c zFNv&GG+UcZd$wWNjB;w{pk?ttqpdwB)j~6+(T#&qBF#gtp+knK#VTIM>Tv9=@x`HD zsD$7N44G!ql38;UL-VQS4T?T3HRFYB*E4dYn<8azq=T)^<=|XGW85_7E{W63+H0N2 zdx66I7rADe?%bD=9Pc6MAhkJtz6k8JWZa);OE$Mv#n+)86t&858i1N@iGxGbk^M&8 zz;B+lF!Xf#dDDq=F&q2D{91Q{@a9YrVlQ|f{#k(Dvhm?zC%uPb-&fB*P(7*_3D<>ha}pG96%M)Y&zU~HZ|$wG^g_}AVt!UkryU< z#rq=LM|7?GLYbq#+M@*msRu7O!R58SzSF01}! zFy-+$x;$qi%?{*wMc$jwSf2j#g~=g2kZiki6`_XK@=NYn5kpZvr@B5H;0)-Nt6gep zz4_{hsi`A9yCq(%rk-)GF&XC)bNor=;8_tn!EiIv9n=;xhb*C>cocJSDg13NzP=C8 zrt}-|0s2YiI$8&j%g%04RQ6yEP8<@H=)npPJ9prQhm08_U68BEV4QIg{B@}`866#H zhbPE|HQ{L5;+-y>mtGB7C)Q^ZQck$y9j{6s<-`^#$ks3*n0XVuNskqY(@6)J<8JDu zG_?F6n4&fl{!#{o`VFcNeP>LRS+B*(o9>+JEh)o;7#~V#+N|ZXv5-JB^gdbNWst{Q z>l)>XuVJT7DO%XJNd1yeFn ztjd3TVPUrC-6ZH!aoCMY@H)GY{GJ^uxfrv@rp5OQR$Av#u@lvTS^eQ35!rDd%8&R5 z)jYOlRLzrLcCfgj#qzOLM6kY3YfByZ5Md79UsmCL|Dt^Rb*_`@{p%{c!(~3d>sR71 ztPdo$McYTUaMNqA?{{`=poljEch*f=W_R(QvoRye(e^h*TWLz&8*FX=e?g(&RX*45 zD|&4Isk%>=UXCq0WXn0}9oiwXrW{kN!k7{5I#7 ze!AHAf3{V<+84jLo!P1FF7^CKeTJPnb*(iY;t?cgXW8|r-?QZ>WJ2}RG)JW$U~_@? zM_$q9W+c!>Ql50a+zFj7igL>qd7S!sTBFqzdrGhm+|!iSOjuq(uMr~0G3SWp*Y$mR z9O4>Bmrogp*$^1k?Ne~i&!I(d9BF=6NM9zsne;p<7XirbOV^TOU8d_uPmpRsM4HA1(xapsNe_|ULQ3sY z(l+sUl{6vcl)E%_pV;e=W<075x4KufaWhYI+>QjPZsYMJOT!T1b{<6pNOXsPB*T<{ z6hXH5=VT_`>L2f(N$=$G0B5dk(<=^!-tIqP)TM9W`6@ovyGZx5y0wE;)$zEK$MY0> zBk3W2zln5?l#3D!4SMg*JSsPi>s|T^%3#0SXJD1=9v+#k(zlRaA-$LMFbg_wC5153 zi|^y{8tL1l>~#LCcm3}C>V9bh@re=bE+5MFAo&do2mMgJ{XgzUlCP8ZlMI+MI@Ixg z9qJgXbh3xT!6>r_orzU8KHAZ3&E$S9iRxD3h}>Fx9_47=SwaZ*@QKnxDzdE>`>)IN zweoq`eXje@tL<~;`v?_lL_IU}V!Bs=KwV%*dG9M8=1sNY`}stUMsmOMkNB@e#s2e{ z`#kDDmlgZ(<0{<9@)xIHjh;=X^RaXfRLBLHIdf-iEPMNO^nMl>r<1o&kAD2A>DlNh zvW=&EL=%tl^oEZod!I_Cr| zua4E(o)xmg07VyZrq|83wr@;Syl&mx%&(qgPR^ILf0FuH%A1Dh15WLeYCH{142NXzv^G#Kf4|lvc{e@!Y&N~SezghhM!tR`O7=dV z#d+hs6LEe_TL<%1MH|5$VTGh+%eQ!%ZI8z!N6#qVE-1@0b?Nyf1Sh{3hP&cXySg;Q#+QH?j=AaQwW#`9`~#2rchFbQ%q(+WdfNwrMxIEZiS2(%dF>sSoOT`=|9WiQwHGG8q^L7p0_H$4ITiP-}nEBo*&C{^@7MMm$2$b`s`-yS*?= zZe`xory5UfKTY3?z;+Y%t$yAniC?BCr>T2(fgcz219m#6l;3BnA5Zy@FZqursvlqA zhbo-&>0zH9@aZ+|DSf}<)0cgE-lu1Ly5Q3jKArdJQJ)_2>3*L+>A>#O;9=>XaZ}d@ z=Hpt?;VL~%V{kVARR{cp(b!2HlmSVd%t(EL)jLifdGY-d+ZdyJpMNynqj|;68~PL_ zdzb`rL<{}7=baN<@^dyC*A}C3hidL!m5x96l}YS4<|_w2y||=t|HeMU>opnoZ(hRi z+Sv!O*JRw^y#&PQn=jS4uZ;4f*0r;+&A!4_t8cn_&DwSAH*CCRQzAdE7)GkB`0sxg z|49HOaOeO1pB50d&HOJd9jOR_heNsH?sA(d<^I~0uatYQ7aSilxxrw2#71-dLjhjq z;N^c^Sr4Oq+HVp9gSaKf%+o`b*uvQ zJs#?z0_qWu>Y)OvwD)eP>nc#^4eIg=)UWVRj}}mmd0dYcP-Po*LtR^e`ZES~Wd-Vk z9_oAn^|;4%zJMyrqZ=yM&=#Edd4oDxf%`F=`!r2-wI_?Z8GcbfMh$Te&LFy*-qZVcSt|vQqA-m0MA1 z{j_hz`JxpUJx$LSt&nBf-HH_zs4p7SDoyi%jqGAy^u^g(C&Uy$85;7lRc4Cf1j9fk~akE{^>C zIi~>WlTH=m8;S}0EYJdgh@6(ne=3#;Zm~D7T1Qn6V5oKL0TFrt^2YRl`%MpslKac) z|NeJQOfH)K?^g}4M|%pLgg6|9Mq0a25T|2j1<3eEUHmTfl9@a@P2TK+dEL?H)o=g# z1Y8CL%5>QU?!Bqv$Hj?Q4z8v_Hp1I`P01%YyQ;7e7DlvKCmZ2D&#x~R{Ce4sx|a)n zl}E)h-HE_-gs>M(_gltv)zPyj@awIvXek49WOiWI|6=8;66tLon=1u2SN-U@QeY!L zPd7I5VF##}4Qh4t+(++pDy3faGu4#HIjPtRprgxR9gPEQIHPwt9;&>G80zL#>8!AQ zg;yB}UY#|jZsn#JN_5!cGPxZE512&gBVcVP#6X{-wJ{9iBsx^n=9RLugCUU zA*S}zDKglv6=F(mX)mS<9belYf|!~!sO<{Ww*+kDAMe0M3v8j>aVxj3Qtlp)%>n19 zK&&5f*whacJSt~-H#S_bP(b~>L9OZyyFJvy1=ORyjfV@U^2T>VB?!8J`kxJIRTS^? zQ0EG$^B&i^0;+ud-B4K#E};I2L9HrXML%jqL6kcarYS8b2Fm?6E4Qjbxw|FiI=mH~ z6N7v&_$I7t>vMG$jVs!V44B`vdSY!75{q8)-21X|?^~+edns`5#SZSBv#V%4_cGZm z=H5$=dvS~YEz&S(nvA>%SRX{y} zzAU@c4wP{Mmn{wjkGMWSbV}p~h(A_P-H%?^ZR@KyF@k8_C z3t~)INrq1k9j;U^ufGVg=N%KfSW>dS+~XEOuu;Pz$R4){BI~PI2Qfwrh*gxz8$hg7 zE^i32H!lq_`Ud^dmN$!u2MsgQ+C{zQPjlO23ZGND)q_q)LE+PI`~2smo3!%3GF35n z47HASr5dJ3aYvk~>pg?aR5IuFEn~3;I^o;mKcuWz>grx4&+jd+*X1 zWLp)n*OYVLvNXi#n@-L}0g7ZD8Sf6&W zAumdgM4xeIYjr^|SC-AX1@j5XBEMAb-4XM0P2lw~Cxs(RUzZp&M&66PuQufp^D_p% zj&jDh>juYN55#OLyK%%6*Fx7elIm2}A2Il)I_@fC&O8!y1}gS76ZvBISu}}4#9tNr zNVGdqAHVF<`W2;QKI^|9_vv|`E+~#bQvqFSQ^CA6BG^;$NWG^5jNe}DxE+?$RPc^& zc6>)U6}+R@RA4Kby{p0<7%E#5OZE)r*n{ywqh1%5e8oHZz&-8xf44bdMgA{;S#v}1Cu>d!E{@{`XP*?) zBG`{N0?3LRjy*i>e=B7(W42Mmb%6X*hbK;ucZcFzJw}CkHC!oo#4sAJ2<}~>+?MW^ zXm$(reagx;E9J~pK|orf!K1kXRDUL*Dvd4_owuT+m7;h1iE*JI!f8(gMySoTQ8HCR z^}Y$&#U<2EyI2GW*7kXWD*15HLp@(Wy`zNcd|8k+szB{r2o_Yhl?-JuXEhUfg)FWS zS>u~>d^|Tx6S3l_xlOy`0}80>C2!P4y@detQZIS4<;LVqw^M#k%M`-iRpChsm3=AX zO~*|0nofaB9M;lI3PJdq%A2<@P2SKqRe1v&qIO!iS7i!3FqUs}`V$O2=k=$T2B{wu|e$ocv*8GQ@WU36tpryjWUQwf>BhXAU=^>>utk5zK=ovvZ zD|(XWxE;GH380uLF99wD37}CGDxJ4Vs}g`d88E80;fr$aF+u(P;HnCPZo6Dny%r(0 zVrlj{Eth@%65~ItTlQF(GzCZEmt1BZo4$bh#BYYoAT8`|OY3Q~e%vUOJ%2U!dcMCOU>1Bz%n^h5eO{bT)E{*Nzo9j<6#~YRD<+LB?FNN{0lZeb5bb-LsN|W0}?p%q!Kvi?SyGfG1?B&A#uFo z7Ajpl#n3P16tieFC13tI$DZK%qoV(}=*raO$9$N?N&U+%CW^zt|MqO4og^FER?Z0Poq ztazsLt6>euO|Ch07jHk$4K3gj3bMf$tc-X znOi`Y)>tYDOJJs+KCig7u}_~>^o0R^R<7OMP@gR&bf6!Xzb4Fd&k~r4z6nYKrM&&g zMm|mgCKibvFQ#1|E}%_laMZ3i^i~ipWt!|+*6?1kaqUa%6{@%JQwSU%I{K`))>uX+ zGqmpv)eFnZ`Dv92xAQq!w!j&~I#tEAg)7u0?ctJAGz)y!7Sm=1oy6GxtBO+b+|;i4 zk}7jt$zWwGY-y7z7)m=!JKpo#nTDDs&V<~_RQo@<86oFpRphkxp2`DXaM8{(j)#wGVL7%=3zYV77kGismbwehxrb!| zm%CUW;3p@PE;Lj-BU-U7=zL{ti#9Yy)+S+P(1{ZE*ISTp0kwl;L#Z#;p`dZjA=q$@ zj=C|chb~@VE>o|Z)SSled8Moc5$Gt~9Zd!Ox2wH#rR*J1^K78ET5SF8hG@sPqRmsl zRWB8#)C@=KpI2wuX_vvn80zI*%YxRUMr-lV09yaX4b$3(@!iA5x#YIu$C}U?c20zg ztRA(_kkID7OE!)weE~(oVb&}3=pEX>tN+9<8i{69ddzU7$Lts^>o0fMiPNqbaO#<} zN@?q9r8G*KIIEJb*NB31)a1YFr9Z0n(p){!wrkOPvl~8!YL)-kg|9Xl4^{H56P|JV zZv@Sq46K1usemoBMR$xj)>m;(){KQ~E!i1RW%L_=bz}Oc9#b&fLX3(6DR*lG?DRdb zTip@vEH;rqb0WNDVMyE1aRqJYgo4(OMZtW``ivdEu(k95#}-GzB1eSk%`}I>SZl45q2e}# zx7K)>Ktp6uv3%k7?YmYU^_u??)BN8GuYOMLQR$;W^FJ)jU#F}A0vTL_V1|j!DR&ei zxd5`#vr|}4{seL$Vud%I&PG>YZL9p^fE7DJqI2u1!?nK=u;RvawrdDSGxzeGq$1bhe(mVx7tAcf*$^D}B>bXB)q+&UTd{Jm?>q z+jtb)#g^P~*-v-7bbR3kbT_7Ee;j~@#ub$bx|;^kal8^HFvMQJ;sqUJlre7Bul%LI z;594c%<+a7roGGM(K{!Ibf>iQw1$#63@M3Z>sw>aTu(GRr8`S96I0TOqCRzUY#8zE zQkcxU8|q}fqomZIeyz-z(1=;5bZ1Fs66b1*?Zg9<3usb=#7flm5V7BFOFT{?DhWnY zg370zK$KPwgdjqGYKgL;Sr-iFkaL{QuSZE-){4Cz5$0cHlMc!5i^H!MV!qm#>*Anj zpc}ve7E#ZLNvtAt89R{EXj{?!dk|;;7e*Y^co1VcFNg#E9dTF?_rzghFA=kfKul){ zD=QQ$mGvHo{n-~rF%)$Wb1H@FHRP~EjOoH72CJ8dSw$cgTmv9BGN>z``@$&31u!7y z2AM<5mb1i8EGl@!7)d2!RuPEVj2`;en7J~e>tp_DKcyeFv^u3D1#L=SRhiN!{kzqb zcU=8oRAOyxRr#n{$Mk?bt+hQLWwHAV#beyMrQ3x^*j30Fx8$O=GThsv<_7DT@bq9k zuU!m4@u@aKzx=*a2%4d}AZ%{PMMk(_r=M6F&7#IgZNK6Dqdm@*F}IBD5g?me%^U*G zZk!viOiN>uUX5`vX?|9nwS(|6zTlkvfHB#syo1>muc-cQYvsl-td$*Fa-rK3_qyzH zGwHh(1x6z#KJIBAbnnNg_wh(gn2JwxL@{BcY@TlT5%28jeE*ns=-Qr{eAb@bcS>!k zpKd%nF(Rp%pSSr*M_Fj9xnRHa9QoXq>E(`0mlkEBW2fSiMv>5~ zaBkR;;!o~$ktv+O1un3GugeNhLct>xM!G z_RBZLB-m$jhlT3Sl80QSvlyAg??I9X|IAIjmZXkX!*=CtwB9pev?u>Hiw)SA|5Y`3 zX0b5(+t9d`UvT3tTjP9Kp_f6|#3ABzwjqY$^RgEzg@z@i)w7l!yq4iVzY>aF=`6N? ztYE`ym>>1MlsjhpB{QB2pO9?+2uu%YcFb3EtrL>`p8m@zt1KX3oLYXou;`-#1;xhO zO3qf~m0oQxIr}G#F0K6MTIIy8f7#ye3|rdBKyVcQJ=kcUlELV59SA(Izq98EMRIe@6#Lga z5B#FpnTkH@GhoCOr}091n1il3_dO)nAaP^IL|cbC;ja*eAa^ADzET3ii}mkgzwdoAn5x zbNU#>KmwYhZ;m0_Cb*mR6FtEW10qN5=1uO|owq^TGp z-){$n_%S;>Qcjr?-2Ui0c!+}+E&E~1?pTFBGEwn4&n7wmxm#4^hYI+63fNQe5zYt| zj`p#AIxtIqw8&i*9LHSl7hLXNw(2BxaD6_yFCCh8*J!ZIJKkI8FJW7`9FMim^Da5#GLOun1$k};G*iYM zd}$||a@PUQomw)ot;W>{`Kz_ZU!;=zAAj-V$?*PETikVlmdXl+5xZ!T0Zx5U{!6;* zQP6c`0;@%KOWy9fB2Vms%f+amhU6#gILD?mrswn>-fk?Q8l6YnGsG)cZE5!L88I^c-RT}n!ip%j1#)bW%yvo zz|7$l+|CS-gbWC%Z`@d}{vANIdjR{pnjx@UUC4a+0@>8lp z!%m!VE^s6Wy24Bbgf2NnlJLuA};N1X*%VU*U}HQqlb2&GynX-Uu$?oy2@JOcy4~|K%^%2q#zE~ zPkA@Wlg{@xNA#NhQT4b8#^4T932JUPJUDVQsJ1rhN^@Y?#VAN^q3WqLn!q4%hZF2v zBgT`0DLN-K+DU}N5dF!~%(2#VHzT}3 z@2b2R<&Aov%!6Xdq~&I)IGoSSC0x_Bui;cKW;*#yblSX|!;@O`*P^H%u6alv&0_b# zMgiBJ;dU{hpy5)_qbD+^ZFnhqkFLSenK})7dgf#Krtk0So9qr28oJ>la=)J^Wx^f5Kg0n7Fnnut1F>{vg_C&FtMi4R>9s z@wc#jZ0NA|efrFWctHEe_II;=Z0E4{{m@_iN#nsh?&?2QT3r8;?eAv$2&T67ed?2+ zs}JZuvi;p`A5qQLzEAx8p`iioBirlVzK_R+(vP~bREITZ5wtaEH*CW*83FY zgNIXvQeKm!Mi>UWE^-ljK06=eOYQ{Iwm7rp*gOW(@*ql!s#KG^6ujX2}S z3HDmPzsMINC9U|W;x#=|Esk>Y-=7WE!Xf(-wCKY{nL-i9!sy8I!lEpy4RoBN4z2m= znf9xK@7|iPuGTiEk@`{Bi9i>05W765*}M{1vtl~}@8Pnsc4uiI&{X`v%9pA5kwEj( zZVW{Ch(TBP%9DjEw*R$sK7((U zWvQDhx!qNC5xF-DR{KYa+({4EQ!}pb{8%x70U4{U3%ae>8DnwzRp7;U2^-IdcK06s zno7S3RrP0g&tY_S_S~-)J$Ip?10_FD>7V>k^~;B=UvM9mefn7S%WPro0qv>kmxI+W z2Lcrx!g~W9N>S9A;h~U$`+T~3&3flZ75L$5+5A}b(_Huj(O$3*nYzyz`xQg{cu^=6 z@RaphvxG*i{%&>EpAN$>!)`=#L8sin+#jj|J;MEF zX!gEg%IjfBr%@;lFR}avH;l)ZUAT_nhoxW zQS*KW9A(T^akNF*zQ|dn1x%q{2#ioES=8~6rY=tc9>;f0DDXIc1acXIBZ8tV!>;v1 z0q^0GTM8NifviPG=1(Xy(2O!p(DCa@2V$gbOd^6?+)@RUkXQ^&^dkt%0ht(bVbFa` z83}xSLc|a%@$-!_>&m78J`y2Dyv5x_T4P6rM=K}E0+4Yaf*rUsEz z3|kwH^ACsy@&O6`4?4&K2`_7h)4jPX$o%?VsKAYjp{zuGD6YAyQ^bFP z#eQfS4}1Ldc(BpSi#4tCOXf#2{Ut|q>Z%=_A5r`JWf+?lsAGRaGbit@El(mEwR|RL zzoD7kY^K$S%jz!%BR8KlPxde!7BwXllHuoh_t74Ohq>~p_I~IzJUx0K8$o(;2^NA2 zd2rvNs)X>CdfRS12U)!J?ZX9{Tk31v2Wsp1fd1l|hffD;pSKx@97l_y=rJw_@p9yN zxmS)yTn_Lf$CX|=o^UzLmzZDcm18z?S%@|P$N`hHU5MZomnw;CL|G2^%7S0qvJkyV zmbqS8aEx1)am(T+#;%I+j9V7wA)v4@1n+fZ(KX+?T0U_F*kaZopNQD zb(P&x-{C$i@A^O(jt9#13IKJ}Uk5_oy`I@3DvUZ8!#1P&)V_4IzBebtoZLTX99h?u zfBN=;nQT<-WSoA&ae65Jf{Mra_i^^{B9@NxUT;SOgO4tq!5bxmbI4CW2DhVSue1@(6Ml}{6gr9e_1g1mij%83WdGB>=+u*XHrbD&>}TT=n-D=7gZz1oq|y79b$peh+iR_?iQh1( zqjm-Q*OyNJ8x}BH!bY}?YB&f}n94+j6&sw|n$goHQ4|rFb&N zDxzb_E%m*wYi?fDHLS>3x8kU#YhDRmQ_8F@^>@3nYZsMet;Wjg?t)s5dD}g-i93j0 z7A~wI%fVh*SgkQAc+JUjWO0`Kj2lpvYopjhx1vF>A1HXwFWVR(;~VLS`7Lq)xBrC= z)ge(+Vd9U`zkx2`t6tcYi29ZfYn9zA4kHHbY;U$>=gHz>^O`e_lP<3~Vg%`O*$F57 zyC$79^+fF-_0kDxYvM`w5Enx^pRKE-9Ia^W)i_g3mkf?{!UkKmxOo~w{D~b~=)1-d zNSbK>KYu>I|6}>szWkSKGx^AONEbnSY{^L^mAEAfMzxKL(6v;Xk)*Y=mYs~R$G~xm zq4t?D_VL~WG{m#NUHwsoXI>AYN*#A1Dx#+y!`d(_R9C2b!YufhNyZO9jaJ_K zX;Y79q6}@zxG!C118rT;fi|k{&={GDA94eu(f*&D<~x898&Wt^)!L9KHm?EhxophF z-Ev^IAk0Nb} z?h)9E&A`Pp$@?icC}d??VU+s0oHZew^w?Vpsym%h2oG%$^QG+>*tc|0E(k+txrE|4i0wdZ^n z$g!p_I2gJBiF1`HP1K?66{m6O(Iy!;#iT<6q=^K(AHrQjzOO!PN@JfP-pf{xZngD4atOB}h!(-x|PPI&PK*}+bQhl?*Hhvnz#baf`5HOVG-Os;2Qy5+Px8%S-$UtP zO#P4}^J~v>quw!x6*$617|7Yf+<}y|EImi zbU)w2ANY4evT%GkzBG;?>>8(B!D+QsB0A=1@GOie(DoAevXR7^O|ccZ*G<3Ze0zYt_#vl9Z7G z&7~`5U;Zl-rpWeDKT-9Z5?pZ5HO#A9X9}>% zA@I5=8wCEc!I}Xqb}_UAx44!39`pR)msYcC-=o#6F85R<{=T%DQ1%|JR)l)qLsR+t z(rWgd^=LH__&xQNzb~yuuJmX%A}7v!X)V0&B`n#4z&(0BzVyk@92@7qal`rL^YKoI z7;o8T6m=XyMf|CY{?XYc6n#rvGtei$euxwLRlM4FOc`5QNG= zYE?e>9LPeS$b~9_#tI`NA!tCRD@c;zdu!lkTN`8Cfb#FM<%Tt|sIDjw_HBrCoVz)b zo3+%wtVXS4eG%t%3a*M+wOG_7-d-C@EsAPb>#RVsSSQe0mH+I=9WFkqvx18ax6sUF zsQFznp}w~oqIp+CRh@wS?&k&UrXCGbcz|zMy=<7;fo;xa9omxrxs?_PXp^l5wEs|T zqW_fdpvxEK|JCx1yL|t%I;11tk&tipSA>nH(V~2+lRM3AYrSr3qE1^TaIo5cVl~UB zUXY;H{QcFVE1!v;9^$WVf1VsyqGGd-R0tDD5M-9sI8!QyQ7ZVLtD6XSq1pp}MJTVe ze?zvII^;3+{#5*gbKi^-8IZD7OskCJIR6y;GnR00Q^d+08)}WG$(AJ($#Q?xp@lZK(ut&%EMs>M?%38$!g=F*Vz?}K zPb@aJ^)v!OXo+8JZaNPn97=l$xM^{`q9K5aN;_T9O*T?u@ZofYF!&HFjxC}B-C>Iu zAPxfYI6#0@lMV_lEuhRfY7Lc8KA5f)ln-Q+Kp6^9@>@JTz@?V0%5T9x>fpc=zzO=b zx1rkqhamE#HVw7sQ`T*H~?<@{JDte64Gh-Y))^tja$!ldVOX ztj&jT84k5jvs<$0R^@91Kn`O2b+bIXH0Pdlo&CN^UnlJAVzAwBl2yxYD)smAX1hJH zY}4{h?G=-z%b&J(S@0drGHLBvrNg*mt+(>J*Jn=E8t~bD6FWw5ouipWcRJwyGy57N z=a(VfTvAkBa4AZcDSbIg*DJjkxe;(7N|)<5D-|1*o{Q3r&d%tI>vfRPNupnPQ*_kL zpwhnmS?e7Wt3qql5}u79)j>YwnpDd+!hKnzV3tYBre0-G=w*U@ryG&<|hPQn45 z%~_fQK1%|)UiO1*qr+KCa*GjImA zlD*j`dMok0m9I}*baB2x;@&`H%WW>)qEyukX_DOXJeaIXp;q$Zbn@{eeQ|$!3;ES; zk|J@=EX)I9TzUit?S=RAo$s#_W@Y)AEi3t{xqgI0rcX!Gd%3)d=L7!vB+pMuL!ac< zx`-5={h(5g%bZoZdN=x0Em^m_eh&%C%?=VyTHQu6zAK(0LHF5AvR<3lYtnT#kf8jm zA=#i!sAjkDRfQtaOhkKnW*f>g^#OSGY%7o^Z6R z-tOm-EzCS7Yj(v?l5qU>6C^k9iXS7nX;-|DY|=CWB|QNlAxG!Y@OSeaou!~?_4|Qn zclcq4mqG{ zxTg`(A%r)$@MJp5*^@A%7{<7yo)??se=CZ{LK`rwzSuSheP#nJT5WF{_fp3UVQgi! zZ7dmgGKXx|GDd)wf>H5dKAto(zTkERd#U-U@%Uz}ByS^~Eo~$i&L#uDl!2vM(S!Nu zq^48Dp{LtJ;I-t6dG$!ex}wySL@nkOEvy^nC zh8#q6(z~b~w>wN8g|!e9&|{jJBcUzQk5e`gE-n(PVis&h!v~clQ|}aJfd}Ze_sI)g zndi^A=5$Q-rq8ZXd`CIadl+67w1I2qJ1ozC^E1D>^*tOGvOHh2&kb3e5KhhVhAJj1 zUw?~yHilKM$Jk|tB}i)6;9$blwS8=o2?faMN`}vhgezF0WTkA=l}Ve})O3|$WkYVY z(aBp)46#YRtDkNz=fqVhM^q4jg3Yu#xw+I}9?|X6)=#!({ExmVz#yEVW?qxQH`2=Q>XmK!?}wBksvc~W8~^Feu`p!1O^ggA{dqZ%AgFO7PKmTz{zEGNKBQvogAu{(a)J?-5ce~>4n>?0(r+yu@%tO=Y> z*94B})dUXRS|o6Z=5g3ZVp(oJW=bSTn6cS<9j0kHA|$mO;ayJT67LxG)t5=&xPYoK z53}p?Vmf0cJGFdh1>$il8_K+hJYErzCdHWx)mvKcbJBbmhtcmiHo}MEc5P0@A9jIv z!{pnSYF8z*y=Eb#dpge8#ABYB|fhku9&7By?QM+1b)anvMxbt5!WKunAIuW@t5Y zXEU`+=Gx$#NNddf(+=F)1Q7;wy3*R#^zj$Ycv|Inl`-FR>$TbH65J{c0g0YfV7j{C z)s?2$PUzEP&Y`o<$*4!twaLoo(`B3@)MDpqUN?>1QzTnwvTnKC(h58DAkAh+ngr6U zD}*>2JKepUKO8`ma(h}a_+fL+m!2%yWQq9Q4y_&88W5ihh+AnVU#+yKGDzDb*#n<# z4xg!xuA)6OPj?0xzR6hn4lJdSqHu|Si0{3LPMad($SxNR~Ytu{*k7aApTlMfnc5}K}jT-66Osx1-yHU1a zkA8f}_2auc`N|ymZnzm&gpy$*6?;3ZT=7R>!cn@OOQdV*R{lErO8n>>pWX>MBpE$w z*|w63A56EaSs%dD+~Szqs-L&187w2dw+5&;z`}Q>lgSQ-^iDilZ)93}6XWsjbak>T zeRHxK|G+&k(Oc4u$-U_Y*KsnXnz?Z#spGoP8hML(>3EX|r$QB9MTeH3ic9YDY(TwU zYxM?zdYqBD} zFKMT5gQd1`fY2W0dL-SdU&o^K{^aKL4asDB7uj}@Z6}uX8uVWNo@BS(7H9k|wu9n>*;6+i2_*6xfpDl3}xmrb6?bCwruNMMj6M?yb1= ze)pE{kP)p6be9ZiJ>EByaIvtS7>XF)db)cid9!;$=B;*5+h#OZ>nln=6T6r# zbtwsE%g6cgFrznrl}ir&$)U56I{_m?i@?H%zuTXd-OEp`e12-^be@@U&=)c9K+ha> zqit$3uR_lpH4Z+yMZW!7*eVh8$Pg0(<*_Sn9N279?u&P89(LfSU-c*pIHRmuAl|E` z%zd_cOBHBJ9-(lmjxu<2P36e^p-@+pZ|n#SqGwpQi1Ub{qjih=aKt&RDq_dkKx?N} zP1Nnk1PSqe|B`6gWYA0+W8=-q$=1Z=GT1YKSl(Ju{#e$s?LPjSM>Cu>U=6T|*FHG) zBs8H0r&)uS1KFu45%IoO#wM~ZVC3?kmhounE{EG{#U)_Yx4=ZDfGy_qMmOXVi=fcD z>L!|aEC_C_lF82XV!=W3M-@ZlGcSx$s_~j_|{(ofy{PkfCjLoemj(=upVM z4JDWF{W_1K_mQ~;dV03-1O%z)vF1+b)!UUqbAB*Uci7vgFde^BQ*zW(mQPc`Z*o*8+1d2qK~7_hCsMH8 zf`$~Suq2}O2^=$$_Yhf2Cwe@_q$*a>e&a=5hWs_wdm#Xs|Z`qsm#KNC{g3X}KvVYk7u~ zFBTy|(#RA*)G8QG%N5i<1Q4A(B)A*0<^WDOe*H`R(G02wa^bRX(ehF99^bKEB-?rCk`9GhrL&Rg< z>RE69XGs|KyXc#ZbY~dx8Th%SB*1jg0ikKH; zLv>6_%`ZWj=1DJXC^CWA!uq&e`w{OMHCk@hx|YVf@-pev5h(5-d~X-QXv4mCYAGiT zd1W%uhAvhPYVYNSBA{4PUYXP*=PRg^WN3SBb|p~x=GtCkb1uVq7)WYZQhPKFAdmpY zaNoO9%kG*9^8%@%a7#?K{PYXfZ)eG; zRrb@$AH6lYNIDJWxx_wxNzX9X@$y}LK0MFP4(Zx*l_GM4RBFrxpPtnFS=F8&;eMch ztPlsdq(*1u-;%~R+V#=R(XNvV5R zzZjm4LgytJC?yFY3LO>{()~WY0)B=XM<9|@GQ_AQ5?r!of|>@Wqt)fO{rkea<7Gz-&nDJ**hGyN zn^Xz(g%cwXArR$?h;@E3Attvx&aUEjU`z@fQ?+34xCjnG)Z-=uZ;ID>mj ztw#Or=d#i1XP%?ftAF#?fAweoGy79m6z3NLkT{F) z-A}Ut%9@pb`B7as1q}QZu$4_+1q2z`rq;Y`0;&N-H$6^xAd%D+|MH_4szXRJ-RA-3 zRUFfm($Pj~Vf6859~OeOWCdDd#sC-*ZR-}`u1w>}%5bL*k!Y-SRf%HF2AFzxOnNl& z2-eX&Ds;gnIXK|#^4)cYreI}k_As%YEPq~Ca%DB{8lg{Da+d7l$+#)y_KM6Rr4EZj zLIGn;64eZ0RCJ~|c*i)g%`D(1KK(q~)+F)xqc9;MAxJsN@+l@!%mLCXE3#Dz6XEgN z3Va3to}I*LKzd*juL1Ajy){bVy|qfuwNrR9zruwxu0{DJ9wmkr-0x{-^NTHbZavvF z2Qx-cCC0K1{9YKF$u`0bxRUL7Hx9(hU*g>+aY&MEpl3FuDTFp9btu2gx_CyOi=?*_ z1@fep#MeO2bTe1op2NfG<{bO$h|kTz3UN-Z;~+&TP*^J%uww^J^#jTf&( z=~jKX9I-S;dPOYlgj&j(kU^qtVGf>)_syi+SVn2`w;g!KfIR9IRW+G?b<~W7Y|TeH zyI2;X4QGna7r?w&l{12!2%(ChWc#l8D3vE$Y+k#=l`P7!xMXwq>7hjJmD*GkF0=ut zJXgoEmFFH?;+DtVI>RuY`~2z0^H{U}FlTHLw4^hq<9DLnM>&>w60gthq5~|{kDgY{ zAMf9g)Yr3`wMJb2%yU&nm+8s%UBjtOn#YEbMw`=!#iuw~mrj_dnaC!ABHXyusP@%V z%{bJUHp2tP<)$XGi+35cGJi8sUqY!`Q#Q)6C-Cf~WZr~6dnv=7cZOT^1hJ5h+o+J0 zthHFH(kxv9gIIoMiR-fq=*?64yp)g1HFRVe%62{oF;)^|x6{cC;$RRkL`Lz)!1pFo zUAbE1;?<+fiiMD}u$D?Lme>~MG8K$l$#I&69PgIm z%Nb2kojoE+Z1TIEEWf_|Hl0$^j;h*8Te7W6S>&7x>=EU$-l<#q;plhLxt`3~GP(va zmo!YCGXbu697iyI7APlf(s!H(unvuIa=V5Sss4h7l7pxQ$&U7~t9@_+OmnFvp~w!8 zmH^xeDSuBN-6ur}@$I5cf5c=9x;FaJ!A)FGo~hd zJCB;{0BA=lXOi=CKD8y&6&GQ9#E)4Z;DIEW1g=|b7IiyciD>~o>|v!Qy_ zi*0Zj|8)m&ZcCbv(v9EN{=d|3IVP!lN>UgBRko1%u;^CoDCr{zcq@aksh@XZ0xCsJ zUy72cM0?hVsizH7sf2%W3j%^k=iAzUyc4&{=C`y7Rh%eCLDqEbe!qbzc>5I2wJXW5Hk?b1-(7FiypUF92g9&fW-& z-2n#py(Pez5BS{cV4XBr#~rM(=Dtob;DWs^L>3UJO;@L}!; zpK}hz+e;Y7;`9N*n2WRff${bL1AN{Q;G7Tmywl-x#9&=?u-;X|IvS_%7OW$2_D*2E zE5HhHUasJC$Y5M{Fdi&n9FEhk5Dbnueg`lf3^2gwJps;@fX_n?p92Q#s)O~FC9H#S z`c;B;AkH2F)>j5t;>U|4=}iINwF+Ld25Y|yBztcOYk!g~uH)rr1tV;%K-og615*C{6*9#V^@B4uD zbtNoFLL0&F8))N2D?V!?C;K4c%I$Kd;KeBYMkftkj2hcU!3in9c$w0{U-t5 z$jMGu} z&A|9bH;l6Zmv42rEEt^g4$e#o=TwwFDmV*K_N~B~32^jo{}>a~kZofg$`88k-B%WU zDN6sDvYm{wZ=>iwUsSISIY$_QQ|ek*64z|W7{}S6>louilud{+CLLp}r^<;`&K_e7 z%=Q*u3iAMHGSYIQO!=%<;_(7e#;SBaV#fA1ktS1=Z?_65mXe!N8Uuk}7R83EKrcMU zcvkU@Q@I9cQk&agx``LlM(Cb}5idNx-AKz!IFddFZVE*NjngC9<0W#(qV%JBI~Qf& z4v`)&=*ybb{`6xMJAmc@eolq3{&ADM_zCn23<^VZkUuHaff*tRqgmBu-v&`P1j}lO@F_mOh>tYPl z+mcKzyiKMgv9HkVws3^{ouFJJHrDi@+2u2QVI0X`xlQ{V3o+$OvF^+(2yTvxQ+7Cm zbt~Dqy~+qjCt3~XT=2o|MwC=F+@8;ICFpx_r`)b1wX?12%wdYU1I5$3tmIa}-<}tj zhpJ?SE?a$n!u~0)c94&f+fB@FO(I%pAnr7|jJ8pc!e5aKfm`-$O!zKG4jj)K}M*1&I`8;9w*#isEas?Eoql)A34ir;m+baacKm^W)*YoU-M6O&eEM z?5;1!zsvLKHp>+}GtW+@+jzq=k?tJO#Ff^PlosdlpNLhax8{~h)2J(&StG%ff_L@p5N**ng+lKvJ}P{OwMl(o%q81g5k=B> z)3~nQbrVxP-*39ESgLF}(PMZCqr}u1FG(&sNiqo!YYPXGWHSFjF5i_TnUtS`h+#<* z%O1fUlX-DBud-LfN94r^J0I|3Nqpd=@0=3Df`F6j)9tPImHN1p2AKh(Um&|3cwR=ioHl!N3#wQSRi`_Qwg#bRVRH z$5Z;N*dE3oruh?jadq!VXY)g_4vQo#RMxHe6V`l|J%jW_vB2X+)qRFqUDOF)CN8ew*=TPH?+#p1 zXu`LdA6u?p2UgCCf}qKUWQS&9EjKK4Z0gjJD%FvA3?%bmkeUyJ{e3xEO zsue7yL};D!=~<=euJ}xptdlm*rYX0OaB3_agUpg2vxr?>@=m+ig0ARxz9E%n5#jV5 zPy3_n4bpqI;P}~>j;lSi1n)gk)97%YbLfjCIP$mqw~S7w3@GzQ)8MXQO<=3f!P}K= zPN^khX;jxT4W76tM-Qc5`nrvVqNCFDmc2nTcz|jq@_GUY) z6Rs7%+wxAi;`gYuP4U(FQn9F%z9(zpp6Z+}>wb#Pq_ou1yCiCcB#KSo35%(6;=en+ zQ*d@eK{O?4}FzkDFse!Dtm~C0qGz@ouE1a{1G^ zoP?uiQ@U5Z$}DNUd0Z-;c!4=O_nqsn@GF*C9?54P;Q102z#e%Ai3S9~j=_r>!g~z_ znR}Pm$>PdP_`T1B2nc7d;W&$=N)4!}MuMF@Mp()pK6pqStw8pZ?%0_6p@3bvfD zvyW0&zB)P$Xc4GeYZ$knVp81MAr&dUPwkqDA1P%xPL9xImAh!=oJ6lQ`NK~7e~5Bn z1Hw)zNUkxi2MbdK)6BTeT27NpM$%CxBk4$MDowPe(qYzA2r^&SLe}*sTCnTD*`(=Y2}6l~I#ac< zN`_M*f5dKRDjRm&__LiT_s|LMRv@(sR5-8Y zIIn>Qu(L4O6$#S~0=}12=mBDt1JUjT!N}+aA;k|l*8{``2VzYp2)M2rgq+7oPY|0O zh@=w)9MTO!@_wo(2v+_y3a2_j$Y#>+aP4%GHRd>u|Dtvly#!l99OQ!Rz79_$14-{n zPb4EwFEC6EQUykAHN?)|8%v5*k zaWP$z>wwKXCjN+v)bxh(K*K|zuR{F(SbjgqkvxF`pp-bFOG+Qz6~C|}S4f)}+tNU@;Fn+Uv8`WW~teH{FieiZzb zemnS+=2x4@%)n#$33!`>S-e7?Ks(V8|I;i^d8PZ6ej~J2`c2SY=?CcxrQb&1DE(*j zmC}9mol>@k&nf-b?)aEecvNcF6~Xp%)(d*iLiz=z_<+tU#TZq4H+WoWDfMT8vwx=i1VvILHtYp z?I_~J*F=vJVpi_ySgTBHbyllo`iL^EQKrX2Cdf$;y&GF@$enQ3oucSVK=R_p=?d+; zb-{r{EGn41sovM zNk1*90mI1Wp4XD^ws?#w1e-eiGxz)m<~h)pe>@TA!^eP2lk9iG2+ZD?Zkq-)!49pg-6f^oMmaE74(` zQ_{2eAkZyM9@?g`q66P-9A&R}ZI5LIvn{!{6(TO6pXd$xDTUeL5yU6BWYCjph!zFL zIvcW59-H3P9f&eTbu$5vExo=mH3i@Y`vB)K47(R|JQ-g)@NqR}C4XAsa6`48V`|0scaitrYN;=z~iJzEX{O1Ap5)8-q*wc%S-M!`-g> zwkO+{Uf*^#X6KfAGH?XHL0IkNZ(Ao;_-9}0gVm`h+a|2GCEJ#c)izi~F|;_uFt?Ev*Mp>TeTP@>2$JmQfQyKvOv7cp{|e zRyLB(w7*AjaOp-eD@^|MV zV&2oAm|%6M$LeAqtS&`^nuU+MufzrTR$+zDs)LwBiQSI>3X&0fwuuRPrhUMOxt7eg ziPRYB3^DX@CSVYNp55#JR$LeYytcZ}hMOfX! zQr?pL_ZDHLwR4?WR-xx=T=R<2c(TLyoei8EphI2AbA(4!Lgi(|IQc9v}9;pu#wtv81HgfY|x z*9OAljXtVJZ_rY*^%;$G%2Sd7{DmHu&u{iLXk-@ky_%B!o4T1ur=B_uXl_>Lb$8Fv z*n?dJG^YroYM8q~6WkJL`K-jU_&3I)=}fW(M+NJxEWWa!$ztbBatF~7Y~N=+8E-6W zgfq!jPGFizZl6i+B8Z!%V~%E;N!D?s6Nf`_C_r*6r$EgRgFlmSx)Z?&y)h4i+qRmX za%w3~8`RF1TKM%wXfB~IGwmOfdCL3zY&Fm6kVgw#?fH=BLXn3p`9NSfX{PGZBx>uUx<>4Ea@}l-Is0>J0MMV)?28#i2O9SmC6L|JdvXm{ z%xl82hu+VLZ(cE%xl{@ZDdvMO)Wpf7gQ0tJxfu(9_KJBo=nuQ&p8HxXgFp|kFuY>k zUE5>Vu`mYJ7Ay>}n0JGI>N*z2AkYIWjG&lzSBGLg*v1%CU$8NPV%`lL#eA@pF$nko zD94Wefs7z{&`Ud3Sv%=7X(_LG?K+<9dqu0COQI z=G|DKm=87=24NMKXU z1~CUK=4LK{RZz^ku|hE)&@3jAL0CC+p#vY=zHw)Ar$~lkerrWBmz8s;h`HrDin$pR zln;t|H_j;L18k)0;OvYEP?3$$Ny)*AxtR-K6%_MstWeAcn+t;|+0R^9S1RV+^`V&W z&@k9R^iR*Jc2Iq0WdJ-V=H0+i%m-T;gMf35NJ&^{Zb0wVUGz4^e6w1xIoaH!1q>;o znjtufIXU!%V(u(Grq zQ_NA=iFWIaxdw{Va8y_91QcB<-(Eql2L(M!2>t4)zOb-#MsMnk{L=P z$M3BjQS6C3{zmZ+1Sd+9hlRvOv5*KZblKq$AzIXrlp;0{lvUN7Ih2j<+aH(NIDqIP zcHE2DvSqGEY?o&V#D38o=)E*$G;Ls))WMpQBY5}Ge#F3&|LUu?_roNk)1wD6 zouj}qhT{H`#(jMV2-&bt9BJEUWHPzlOj%^r#$Fg7dO4D+LlvBv(`aAfhs~VQ=3nt9 zQPJFqi7jPD1Lf&**Mn{qhYXlz20|R-R$}Hnh!DYgusp7W8Ht@ALgW;|73&qfAs!)A z0IN=C-VC=XJS#@wgDsuvQ7dFl%INj+R=KLfPwRJAU~W7d5OZui6zJTW`$Cc9c%K|6 ziySZY$#JU4ak@{Avqg?`eR5nVa$M|_<5H31a-STp6gjT;$uY~J2yVRXZ}z6g!6L_@ zJ~@sQIgT#Mq2ADXqSiC%pZ1T1zRO=J@>-Tkhvl=?zP#3~e8QJsR5o7#=-c2iuUKl{l-5Bt92geefKm}=CfnPF7|U~!d1x^=9VyoUqTunhU)FlnVjgg z_lT+&s+hf0txW5|-dWT>9Wcw!RP&#%=GVG%?i@$$U$!d2_ly%#%yb;&IQ_KUvJK~7bH!Pfz;%K;L&A6hlD86KalaX}uap9%}j z3t^!d$xN^p!p-8@7Dn>R|89Xv?M{(#go}rNM!zoTUHdZ#miUfx| ziR$@Lp+*#ldccAdX$0SShKz0j-+IC|;b#Q__gf(4$toHCygpLr>f-q6m_q^MXA&Js zO_T=nm+pSn(@+v|#+;oUVuyg@xo$AO<5tvU*#0vikB^+!I68H5&ZwS;IE8AMlSamp zVVxK?-hI}HbXmVl51)?UF5XhpiBZkxvSFPVHQe{aDBGY?-^*wicBhVXo)|?80++b5 zmhVrD8vafw3&^LE;b${0yiQOr?3@wwhlV?6WI5<2Zl?v`qlDizn*6={X_-IHNmT)M z*c~}#Mbox)jN!_0PGhX|H#lxpfNA)0Zc~U%rx;1vI?gFup4Z+D>F`EF*y<@M1~zN! z$h?$Mr~JvdJE@6*QsbPiww=_(=pq=;UeN<|1UGNlu}h7_e!C+@xI&ryoS;G(F8zRO zbe9)#WNafr$+<8T$IlFf>xIs8*md2FcXYRf+RU^?HrJKhLk|-OvO~%AUwr=8f93gu zzw^_t)Sla2Pwds7{@%a(iyu65;b&gft2Ml$%#@Pp-+%cB{^GYT|ISO7^`&jE{_^v` z`s{DN@Vh_r7kZ_8%4~xmBY}7+HukGww(%(+YX7--3gWuMH}r-6p_AMPvc1Y#kR4?r z_sLXVnOZjv)&(}+2xMKDiH)Iyu3`N*$9tvG>E>j#JbY);TXp(1O;eb@C+R4CF+6?G z$M% z%EoiV^(XhkodkR$;2w2HL4o2kirE5xVrq4`z%s+HVwi2%QPaN zc+{QE_k7lVJY(}ftwSheD@?xSjWj|;f~?MBrmZuupK2fPS;h{svWiwXVa4bnXH+1O zG5~`Cs#5|B#lR%a%guls7yN+!AA4Z#}O|uitK~7kr4}9liCU$05*D{Jj3wWWX1HW z|J@%_SliFxt|EZZy1cX#sN9SRw{p_eD5P?&RE&FD)Zk_NpqOKw7=rl+CK%&u`)`~U zByEc8>|B1BWD=Yv$V2x6nIrVFWy~JT0hr6$o;#T=6OSNR+B`Wr>mD<>VZ@kdG_VgH z+)Nt=q&a|%5;84fB^aOM2Yaz!V5N3avJyEftkmwuN^t0-@ObmlkLhqmNEWGm^n_+O zV_vZt4ie91BabuXiH3SbJDP}ijVPTfC=x8sAG7Y8=l}vo&1B<%6rAy->A>-h?W=_N z<3niKRKXEq;^k0@e<#>gX4W>2f`eE3zGYan2owWZrkpM~iC#5<4Rc4R9CYafjULfq zO=4wqA7Y-l3eGdGjL2{gY7#YnyWTwydqGJy=3(?8x5voM#HeAKiAlO=V!i5`iHF=w zOwv6Q>s8lGYytkrBtHiZ8F}6d5_HE<{%hPC=x0|&)M^SO(G)oRW=0w+sHQ+dUNr@> zOJ7r^4f{t^_BCn zvSHr2b1--^jkdI-p^J1Pkm8`u%oGpNiZ_}nx=EQ|sAlS&@S=b_VWiUKr+XDR3koB^F>4XF@a3&FJ<}9em}c#?4W*V zwQw%zROxk2aIq%Z6|06~g^5{&=;i}T>0u` zLG>$JbF&8UT%ZF_##3X}ER5*uWtpqganzox0N~uBZ8+3R?4ca`lSzL!JPX&UdRe3$ z^>7ly(fmvS$6|~ihzuxp0piEOKjrO1q*hURC_ry8b{hX5b>9NuMpf>gnI!veyDg;@ zS|F`ek;gWZ$7E7K2{45!LZtCdp)XHoN8iFFM^!a-QG$&Ue1|5trYx3{f5Z_c6wv zOgC^>0uKj#>ty)WxXlC|AkqMnHFdd{cX@xcOYAT)U$0B>V#Ke3-}sHhZpwgia}VGPicx$JGh@3Rk(~-V46$XB$R{STVJtUy_=-!^$~YOH!qhw`HrS? z9aF|L=>WI-c#$!M-8b)=h!h4GPhFS@P_UtD1iXb*xFhPx&XL}mI?xl1f_1j1^5nZE&y?nzGT-Ki7D52fs}pZsfoci9zx=%J|55T5h>hV z?btsaa_sOx%{Os#r_WbQ{IjW3KCB(=3I#%n+DTvE3vyhF1-&|O9EAs}Ujr`pdT>FD zyV(zHPLH#X{hs9mSYEAE-y73RU^s$;dO!Al*`qx0+z=^TUETa%PkfJy_7G><)E=j`&AENp0CqOD*9%VWr0On@Pyhpr^0ZW>iWUgv zbogFCW9g`rX{A}K}P+o{TZ-nM)qN93^pL|>)Qg^eBOt;DGp?kLW$km%|*h_-w zinz&!l#Wk{Ovc13K!Zx<5sU8y{Gq_^=rX3Ad^ieyAl@H9(hZb@>#|IDl7MNe7N!_x zxo~%!rbS9w^#2OvVI~bJD&7%?7=^#P;y5(%_g+ePj=vAbmqBNQj^dD<@%K@>g%^Jx zk3-zV-zVezioZQ^s95m+OdN?!@%Py{vhT}eX?Pw3qkQ>$;{aeMV2eNOv}M0OgJDp- z-yRuX!^j}WAfGBhgLxm5RvH;}78E8X?uE)C(Xnhl;-dWj1M#LGA=vOj?0WV65(|(c zC$S%-bm)xcom<%T8Jq-6(~UUDLTrog3lt+ZoZXXvwj|QDup#~vn!>_FKhmRPh}27r zTnupR7G8BO%E7nb-hF-ms_v}=X_wbTDx9P zXo!?Jank-1wB_X(3Gjp&5CjVKPteiMehNKxXncJ~(jD-m7>>j71GViziq9t0t9heaNv zHx+L(sS3PIn;1LNAJG)LpuzI;Y{odWo{uj_c6qp1#7V&=7xrRo3;Un*p!Iy5q9pM9 z-Z*5;h3t+s>3E24xUvV91?hO)%ace*l%#Vh{$Sb&lO<>Xm5#p%#dagU7FXV%glrihY+Iawls z3frNNc*;Iv0?`ihTkjF|NJ(1dzvdEQo8$cw#ogZ?qt#Epcq|zy1pysQvu<;e|Iq;y zk_$yNT|YOc|277>9t1^Db#Kc?M8E(5`3yKqiq7yC#l{f+O8dQL4!PtpPi0)Q5Iqh} z9KI^Dl(&R`fl4)>B?-goLvbR^%R52CgyS1?ecM<#v|Q*OE<6Z*QO6eMv?MMZUMgg( z$paNx39)|fmzG}j({VVv%@ah!uO8fMh?v%3rVS74-90*WqZpJ zmkUrFiKvUu5*UFZw-M^2e)sYf|%feHwFBQfq zQ0QW&lKyLH9H0i|HyMaL49}J=L_d_)4OO9Jr>YA@)t1r7SamEcE%}SPqHL+yiY5}I z0BKq(Z}wO!@5gJr)i@GiRG8+%Y8lW!(STPbLAPN*v)qZ30;z`Sg=i(;;`p#fUcMiF z@f5BaS;AVQyGf8)m0hQZgB*QLK$VD#fS+V^1JvZk{$H|>a$%z594^PrtGNp7y?xh2 z^)3_Ab^FiGe^t-eUHt4UfYwlE9X13;k9?vps)8tEXuxG8z>upx7o2DXIF0bxjXNIp+P_~^+486hc=kcT@#k`vl6F&A4 zD+A9=;1f*?I|esI5Tiu93Jwwv!}Cvc_U9ag$eQYwS&uOb&3I{ktt8-}cVs zQDU`0ZKF3ras?KE!ZV2!Lh{ygmqQ|gvlr+;eJ2(EMiBtu8R4$sNf|y7OJqzYOBkqi zgy?)9$Tz$ESdem%e8_~1w&VNg7IJ-o8g&g)9ChJFYT-iw%KnF#96P_`ov4nZLrci0 zrO~kEp!ls7qi)L+WD;U|GlUJaKG7%0}=BLM>ye;e1m{ z#EJQZe05Gb5`iy6v~t_UiOwBRPvAbck|JBX0Pv%zTF}nD?cyDjDv{+V zNFa4RO%f%#=%2+2vdX@Fc{jcEFYh7mq8|DTx2T?rmiHpUHPK79T6Qaaq7QyX;i5vU?A)ZoG6_W^biFa{5vlOk z2dD>V#;6?A^VOBR&MSA5zZ|nl^K$*a^;-2iU%a(u!l+Hj+d~o3LhhhsT5OJw{g_5T z{9WT5C9)`s8A*jFs2m^X-Y?)||9|nGQN-W%(>@-v);Hq=sbAce!#*#I6eMUC*$8%C zL?e&_MnLcrP>$E2EDLs%-9N_&vj=uQyqyf(&(iP?0<6g_Ml_6qSV(Y9$VWxsDoaQe zhd0Q%00c3fG`fX_uUyQ>LjJ+{(zoKVx{1bmFA2sQREbb`)Y!M~qsDNJbyH(gq0iS@ zO@Thw*U%?;xqtqm!5s9W9@d|zhc!+ooG7F%q9~_Xg7h6zL;4p#AV?q52BiPVYR=`D z^T`^d#YXKC`}EQuQU}mn(?0bT3to`Zu>S;_1IvI5OF^0YfoXV?3kgCwqQ8Xm{XZr= zg+hik+o77*v6=$y zZ5ZHSEBeY8Ux3Nl3u$lr} zZ=4GJX94`!{&YUjQ3E?AcebTXzx~1qMgs^@v6^yaIeaXBi*eBzl!z4Wt$uF8rQSF2 zc7urw>+H7U(Z;RsKs)EZdU<3?f_kh{9qK^p|C>j3`0KA2)d_*(*L69PD`vlWjo`?wtCJ`pb6+R?FLf)isRyBXcMA`DbcC z5U_$H1!pGfn8cfF<$~!!81l(3Qfe1oS5C4q!*tGUj_r27dy>wb)*S!S?3~}P^&-J2 zO&mXXeB$_tA;6_(flT;gAy9%5aM)9(v3*dKR$DH-hcWKrkhrC`+8ir5Mtt-^!a3qx z>(BOvU3N8ViW)_+TWd`PPi)`rvWcIz-etd*;DPoMj1QBl2^R%8d+<%&T9P1zvydcW zb&>=l#XzzyZe+wKlF@L<4b+r=u=V*4N{Bn#N%Nf#31r?_p)c;c#cI zOz`0lhCnQu34z!<30({UuD>!qp&e~nNDk{=_tSa^4(oGMfonm8sW9Nk1GT|(Vf6&> zW{2GLRAHbZ0T}rC+5r6wL7nFCu3DM)*PJ|%+9peTNT6z=DhPvcy))^2_aq1xf+4w^ ziNNhxE$(ME#m4)>REzM`i)OWeMIu%Spw3@pL#$HgeomanUToTY({9>;E2c#d`Oavy z!-KIt%-a8ghJpkg)1b|KD8FKBFF27eO||x6_CWVc_Q1|b_=4|E&6QowR!SZrGf9P8S#xl$CqD4YP0ewY zkyN;c6%(YMWlpNIHm?w!-o)l0B(Sg6S`%p1z~Q~9`*?8>m-RI^#Ogj?%=hu8S>4Ak z>MWsrl9=*|dC~~w)7|y6gz|}4NlI2yRX)wwsQsd{%yfNq^EAI+Kg~VDdcSjat@k1H z^|!NKaCjRuVBhHLAD>#E{17uSOi!5Pz!GFR!5bXCiYLr0~d&ZLy zpJ3$>d_(l;o3(Pm2h-Ft;_jKqho@>HomW|aj~_N^Ky{F2l3T#eTHtk~RZigJ*EBY-sMw_+E*#*peR`Zp0-Qs{<4 z7pCW?MGs@{_%&P5cFY}hsPOID{Da9*2vi6!62e8AP`1u*?v9zE!o&Z6p+X4c{SGr! zO;gq5iT$k%B!1*=Worpu;1JfKk29Z+gPIW)CPvw_AU0SoIB9Ani;1Jrb7+`+R-lO- zYrew90m4fM=q*!WZiuDWy{|nro@E^fp<&7q|7~g+>;k5;oSx#VfSfRVIo`)=3tfcI zq%D>MiD5XPKgh}n3wg6hR29UoXXS(-)}(l(yf?hPn^`$wO80?5^b2;Dm;0i>-)1=x znR0l!dssQanB7}L1ph90kd+Z+{=Qn7V0rBr@Wczs+)M6fZJAVh}MR?%tsD5tV6CtSY` z^@{|Zjc4B^TNOTlM1keP1CtMWp*TUNm>WNjq{5@DHE|xb@Pzp}D)uo3OyL$34cWF0$&uTDjnjF|~urDdA;5 zej57%rglxDfqDl>(@QWsAbZWFu`X8YCM5fw2}`welHNlc&mW(P8Gt$_*xT?6tft_A z9-3+i!myT$X99zYEjumSuk2Mg65P0wv*5LG5YfcrX7)k@17GfyBU_(KI>}k+wK9ng~ zgB9nzwe_L-`$(`rDByCRj~jR`l@|!9xkLjz47kae8(q zWN4avOZsWv{r)hZ@6N_aGNTjq%1KU=5K6{>vvcvKZIG{4JFtPp{v<&$m9r?)Z|hH%y-{PtOVwVkm!NRPKOTgmLih_{-Efsedz$b^f%5QpkwiF}Xhp z?I5Yp>WC1_?THgpXqt9;obwd=t%tpGAr)Lx1haC}o>chAw|w-mnJ6{zD;IW5=~?{& zX40Of@j-|Wa$zT{Ev$$>w^spEWJKq%znPU2F}Tg;kQ<$s`z@QE&#Ejb8FsGPT@?z2|v;_ zvesJq9C&)vcQIdrmYVk}+Fr@Z2@Pqp!k~(_J6Sm)F*lb(&UwBPx3O}RbubXZ$Q#eg zeVdgNjCS)%AcH(F_myuGf{PVXUR>kh9MB*fgr1SBG(* zq}R`?Pf~~=il076pq-}926wm+(2=2!)ou!pXHDm~0=n$fDcWqJ~ZCItWY98(%o{`>GQ{T%jFm|Q%aVJpxFpUELc{K!Lu zG1KH&`Do-qm@@Pl=ZgsK>{Ptk{IO{;r*+-gCvOzRn>(i)D>Im{fW=s6O z=LmxQ(!q`(PxMTAqW|t5TC{1N==>y<3x8pig^{%R?DBZxe~3O>XjcT$kO~)ni^d`j zucl2-_XlyJaXBj|PMhX(RiZJ)A|Z(pZ|~!*y;eHm!i0C))bSC`{a)sk`x0wUoJ!5* zuJy`2#L7|9Zk&eABW zb8{YN@_tsVB`@J|CfBiI!aKXU*9W{_Z)4?JP6Docd~}l^{e@e-au2ii#L2OHVgfNF z{;XlRAcnU$g;%)8IsK9~*K8XQ8-I`2+_U~^+|tx7Zl~bgKF`Vtu>7E@T$K~MARmxjp2M=j}bj%C*Gh9o{^C&dRl1Hr#8) z+xsmmC+6EczI(lLe`n={xP5&STyPTxZ|`#9z{U2^4;#zzGOaj3v1ZqKZS7=j2^e2i zYb%(6ctUz{5+^XHb$`Z$&#{pS29c5sV;;yugOu)7r}Y_UOn6b*;J`qlANgkqI}T@q zXE%`Oe}9&bz6i4nq1m~O5`?ssu<2W@rjXQp`#G44{9;$lGFP81o2+y7|Eu{^&FzUu zax@n2=m$I`DMTMT1Ny)XeNFSYgkV}|Z_V!r=z~&GHsaNXSswx3k|E}? zE1(Zt``gsV&8(01V6{7-56Y(BIFDVdj{v=Gh1%>u|5KP>O=Igx493@ z0#?@*ppFyp3+kQ3$D(D&o^MHbAB#zPX$itv?WH9$Ouv^9M873^d%d-^yG@VihZsuB z-TdMYr6uut@rTlqR!=#Ur^RX2C|L%d%9G%I%VWJl0#`7-fG1gaj8YBHL$3Nl&zdDj2~Jmck!-PyzDEK! z{1DrMRl&XF$Qw>qrnhV=H=UL!!&`(jEg1WDmNY+#x5DEW6Ib&%@nvBAAnO#Kp>J@# z5e~U?sC)tA+uZ*X#<@}U_E~9%P9L-_j-NllK`u;=S3-zo9SBDI z$4qQsdW_&LeSVG44)lOSV&vl^r%yw{&Al<}La%mD5_73+#S#d=aXqE#UyT)Ax0MxSqeFQPde`51!Px$(0Kp#O&@(Zkw09V8i^SF#s zd^8|c5R-fzO9j%NKyhnx9}P@$2tMlr1dO)QXSX6z!Q>|d!E6a{@62z>eYP6PGKq;k zp6z9N&S4=&+!70CYs9VihPGkca{tfPh+BEd1dljGwuP2dagq@igpz!!b5wZ8P37Ce zkK5^a$A#E>Ef17QX2y<-mfS%wkY58fk)5i`B;tiA3vb=+%{MTSI$GF+Bp}41LT@ZX zZ_F5{#yg^~>Xzcw1nW}aP+X?S+nkVCqm0>t`wXIg__dlcCHTd+bQ>jIU4(lnp(el! zeT;jzn1Xpc2N#t@y*n+yLl$cHqkH#V^PVn~mh;=23$^>vW3>xbF+bdoE=ttyNB2sA zWjusk9!Wv0G$t5I3$QohWBIG6#p#={pjBK>iJ@*v{^rn%9$ge$LUDPz<=O`(h0nG@|JJ^y6? zOGtAUc$`#McTewv1$}cC%*Az=q6vM@{64xTgEy$p>F=A%IcU}ai(^HvK5+9mF7^W! z5rgn1frQ0;Zu8LEQu??oHd$jR#X`dLITQfKnnHy+t$VLknB%J;Zj22Yu2+g+hO4u9 zcXN9CLRKNDuo2Gb=_4t}15r<3*QA4FQ=vX_ACzdIi!{pB2D)&iG()?fL9-vKO+>sV z*iP~<1B0-KZ36PxY_-)mjFx~3AV);uNP3ym(bqYnj?qxH+YY?2Zs)}5hV1I(;!L>o zU=@Ra*mA0!Ur~-4)P>kXq_swVU%nG7neCkoqE@A~pdj}I6 zSb-Q3GDgF2k8f3#@C*|x16(yjjCF6bSV<%tdKg~^6D+-OdmX%+>Nd#643m*Sl=*N7 zfM3h{Xpef`5YPw1}Li`3}_FXgRnD);OnSv1FRrK zG{Ndt)<=8##$5q@%n#^eH|rxHmqG|wJrmGJe?TA4vp(9xtIK}f3@eCOO|ZI-$_1(7 zLd@fifIj91^s$Te(cU~B4d`P|Kp#)BKHB4K{~XXqUqBx_X298ANp%}=7xG-wdhBF< zw1>NQ1@zGq(8o-;mfZn;1UV}0Ve<%xrVW8te-7v)NSfL46yfgjw$78AsBXi01Q}qy z%=!pOZx~`8j|TJ+B+Wd<`e@G?YR7My=Mf~$TutQ)0V!`o%wtzTA3-*l-K>u`X<|Sh zLDJ0gtdI82sH;iBZdh|6&D2R6H!w*&Aa*?rtOEK7lxA2T?cvq4&3#~(LlLrK0{@Yt z5)^gMhhPH>2ar}0G|0zfx4gq6#N_uhGp9nNsb{kHG6uLkVm;B!Xf#az9$D&wx)`b# zxiF7i+P*AG!@%Y7Fwz&I@E$ma-E|MO6uyD_Sz6Ws|AT0v1MUdqYS2O6%pK4SlOsSP zPB)$RCdd$kd)PY*Z$Ec=7nIhl2fAK;c{kdmrn(B!$#8VQYh`&yVez@kWPC>H9?X83 zgx)(g)3>oi4dC~mRK8V z3DgNLjj?uOMAUl%17*$#O!p2wJ0{L%Dv6m2zwP>&!qXrwK*%PEA#F~u`{pHT0eUj} z6Y~R@DcoY1?6MBbGe$t}+`Oy{MLG#GRFB*@sPh=<+hd8i7(twU4>-g)m;sPP=VrhZ zfQxTl7DIFR(}|X%n9cGo0qaiOcsfT0$N}p(hjkZ+_1ihD;|$hO4r}VSgG~D!SjK3D z0~`ywMF5=YBw~pUf_0R^53OS*6Y%Vy-)hxsVE5O70}6vw72#s_2p8oD7v%^C35cye zIut+<2nS_515FL#qMI>eK%$$$8$gf}(Q|-<9O1e-!r`@wa11>g5KaJraJ(MjqCp5p z>%b6>wH`@yPKt11)o59OaGePZl*pZqwLwA*vEqpabo0%Vftxnaa!CioyAqv*V1UqO z+~dw{Gpu`oG_`hsOcG`zx{vgJL4v`lehnty^Z*7nKJE5;f*oDM%VY*inKqzrp8|6T^_MYqbJLb)b_}eE(B60jR5^k|HVpJ+aqZ<>Ik}*_C42_Hp zJ0s3$#i$GwM-wHdGF}>W?1V9za7v|ODe^|tqyHp4Z^F|&7I&nAaom8u7UOpleI@Ys zY&_mv-i*(0!83>_!T;;&Il&k&qa$ttAw-3K*(SFv?27IRfs80iriO0fY<5_%g ze}rK3bTkr4`QHD6_ceHT@VIz3;#qk{*(sG*y2i=@%P4Iut}HnlhsqT|l3+MAYCBs8 ztddc2$^%2iqf}SP*{PIi8cH^qRNR&3HvDpwGLRZjSOxYzI5JMttwMXP)X(5@3Fe(c zpWgcy@UG!~)H!c(*cnY6n>adAU8`dgkw;Ol2fzOUf9Lq>Ed|iJ@ot#KQe~DWfVIQW zBa!)Xf8-4GQNlxgd24DFOAebUmUQJv(MI8c0sQ`cl*e?~JpK5v3Ewd&CW_;gMA1!{ z#qm+Qyed)INoUh)I+xbc`AjmSWKx-QCX>l#)J!g;W%Ai%R>`Ka>1-yO&8pd4R?Ftqq^hVX zHLYgUtg5OxRa5i1WKPMYa_L+qm(8iUTu#g7wWOwKDJ`vKw5+CTIZe~@c?>a+?(=9m zk7{{*6^Zz1#xUkKf@c(uYYdm2N@<+-!&bnQ=D!4sy~y{z5buZJy=07TbQ0x?Q364r zU6Jq>l>Y*%kth!jSq`l+)^%fLQ(|k3uid5iyS&e7E6TI66Uuyo^HSA=^nL4r7EW@d{e44f}WWM|#m_0)JnIzpL@L3x7|=-wynJ z9=Jht)&yxBO&oJZkzg zJreQyzZ>OgZN2hMI~H_V(45G#C`-?KPd;8b3dltOw zlFL69n=^O*f>$V+Rr=~RuRirO+d2D-Us|%Xv#Y0f;Q?xX)lHvy{HNWyE3dk#v*+kn zxy`o6r5>jpm>K`NF#OAO6U|%9kH@#z%Kv|H&I}yy>>D-hFRJZ{Pk0uX>gK zx=-Ku@b`9h9(d65m%j2#iOyKqHa`LIWTd-!BJ)2(%6dbL*hroPmIky zeB$Pg6|ohu!@4rPr=GAq*S)BxYw^)~K5BJ!Cl|$+MVG{-sF(y;s!YdU)-{%3X1!O@e}zvzmaFF5J?uUwGtJUn)G$IE(F_Z$}A?}9ttlU>bEZJTiAD2_kk1d+I~{> zqT}c7zhmv89UT)tJ|h0gWzyJD(Su_0_TvtnzbYI4cgk}+CO&-OJQdIzLrRFPBCjq+#Yn`5_h{ayY?{9p3F zqc3#dwsqTk-usEmBdC{HphT=FYEv?Y{dT zeCQWX{pN*8j3IPx)zQbUdG(ujT#g^_`s&y2f9U&id9GJYtDMp*>4%No%de#8C1FN!CyZ0o4s@9>uYRhV#nw1{>C?d{Mg?AzVM>= z-0e(9U{-2a`&<}W(nO=thl zU;q8W#K^h7_~qO~M~h1j9(>0;KYRQ6U%PwJ0f!vA`lM4&Bm95o`Cqv2(H}p#_rL#A zDqm3<|KQ68R^E8~m%jO(M;`m-hmX7F+T<07{_xT7zi{elZ+uf{*S!9huYB$gqs82@ zuR318@~U+k#~*lb_YWTb>C^vyA(9wee!(we7o6C&B-Sy1`>k^)Ziye-y?sgaz%D7a zGM0&TMy1Y<&iOrS=Pl?wqca*?+S48Figre2Q2oAGJlfkK&D}qKYUh&9vpVID1Nzp+ zPKX`_@-)9=Uf-(N!7m+5jKt1)>BIx^3+{*>)N#STqHpM2)V;Wy5bm6go{ob$-q3kO ze09%}G2lj2={+)bP)Bcc;#T~$Qh9B3;)bqcqVuB1bZT8k#4mVZ{^G8c^N)%yo40J< z#AUGyt~s!G|94*cn;_-=3 z_iz78r<6Fn1HZmEHt~(GtNuHqdqp2N- zL?C_uteDf!XnzLfNZvVn{B05MI}`6O!K06iRkpp+DHYcmrLwbTw4#%kvu1RQF+60S zIJ9x7vbI=0O~_KGIvc%@tQhJ?AZJ_{OGK`UZ-~5kznzf<2P6*dOKdpw4@d4i;_zhR z$l|B>94X(l;iyCYv0)(c?}YlX7dEK>l76L1JiNtGjbG|BN_jbPVPq3zp`5;M_^Eek{GH_EmM2CQkV2P zN$%|HG9|gYw{wlW1no&uZmxtL~Zpn>F2q2N=1Lat>KQ;#+JEVCMU>`kLJ_P?e zPL?{mB)PX+!j6%~<>k_rXiV;wI-*Yi2pBEA#L#nBM~^He4^?7GyvL=(y8ECUL4l}- zA5k&7s!NuyjY@N*PUIHn4p}RMwqooD%5mu$`Je@J zq?dIa*gFtSVs5hha_Iy>UY7eXua#07t;%v7^FCbelAa^5Nno)0`$>e7el2|<4z@;) z#}13eq)(y!h`ctsx>t$4O;YE*0`u&NDrmD)IySljs=!xDeR8H72Zl5lB~St=(nqCe z*ZvH$lC(&g+Zm02yNf1s06`UNMdc*<_ZV9T{w|Tv=%R1VA$Z_dCyGUlN4h2XFIahC zgmfkPi%C$|b+DD{kfQ?tc%%~mkxpNPp`ewwcAx{m9N0!(NfE4WCLX6xQpdaq$Vfzb zRqS>6J~AL50LC5B`dwXe=OMAHqmf)J)g{f97R9A`XmvhoF>XscQRmngAkaC|8QCzg zH*$)x4IHG{XtaMx{v+O%9^yG|?C-bYxfst?c!uztY?L=87!OzzuRY;BGoR}}R<j;ThA%N3hJr&5Y1vt_6_ULM|-FdYarW5uCz5jrlc#eV&f!_fxbj;aieP`#nb5DtR3a&6d% zv5m=FrjrJVq>78L7+{)WVt(8TL7jXMB+B^kqegIDjPh<(#FMSOkNmnw& zPb#4N#sto+ac5N`>3z4^+2(y0(H8bc_CtBwLl94JceYxNV>|ZvsIzsEMWK^e#}3-dLsO^)sLaURezQQ0=;RC|gGcR*jFIS2D&1 zol@zrS8PaNhR||uIBoL=K9xvh0(EYJ%t?5-9nS@L@XDSG@plIv!aaH}!sGRy!xsnm z-oK1WBx_^GV&uj!SEa53obiqUjgK;*lHi)4EngltF;mAFDX*-3JgQO}+PKjv9pw%U zqZ->_KpcX@^ZWNlR-%2PEhHa%?=-%b<2lmz9gXv-hWA(c-Y>>y#0OW~p!ef_?+JV+ z7T*tiOM(nbA|SD~FRLq7;I z7RiEs5M7ymyQe#r18OVUJ@SD1cE5v3QoA!$x%$=*s|~&UQBc)Dy9PCKTAU%9oMBLt zG8S?N#xRUNiKfwddiZ)8I}<#7^>8(@;ZzVRfvnS?KGV1J$se(}KXNSk^I%GP`SxIX zCQ%>(QS6_Fc6YVi?rWeZgHjnX*_2laMb8NxJp@T_~LJ}fK6Gcbb8S7UcrD4y} z4zrJkY4&ht)Gtwj+h=Q?(#TM`OlBXx)gs3n)X(<5*Y>9&>$Ha#1&f)JM7!6b zU4oO>?n=?_+4#<*r$l#V501%!Ea2y-ncg}2Y4k(5@9nkvv%%YII2XX8c?ScH)8kEu zpV=Pf>jzr>s0fWLG`V4yAl0vixFMS5fH!6FmyB6v;1-olu(F{4~bY^aVDRv^+p zqud^}w+w$hUGr?il8fLs8W7tm_8r{r3cm*2% zzSgO1DuTq6i^E$ui_Z(3Wt2zOgY%%jXN+zhg;2qoIk{Nr>p-wNK6*0Mz>YxoJd z7CZ#bvT_-E3)?>g@jIh&&|nlS3--85t&I46-O$F-fL~!h6ilucS`PifKCCYmPl556z7XvP!iLKG z`o}R*a;TTl;`qi*#~Z_h@h4!tJ$;g|{0W;N)-?+hmd#u+1ocXYx!Hl4nMBukeo#gehn85}R;UpAzZ*Li7Zsq4!~M)EF(69rz85+QpGU#(3Z#!q{SIaI91uE0$q< z<%1eA%9}SDWqb|mDdRZ?izUcX#&D%rF@^`r<6~pP+x&epQDqpEanPk=gS--lOyk$= z-7dE(S^o$(crwQtYsXF460v82CmdA3oy`Q=&CWW^ z0}^KSm!a~YT|{|jka|9Wkd>o~z*`Rk1Zb!-XYvO9GZJ6XT2?l3_zha@$hO6sx8!UG z0O4q%$R~yVGTFDbtR?3>>=Ki2<3STXG8N#^#v)kH!8*hf2)2f-=uT-J?JYKHQD_j~ zauPo%aIh?jR%!of95-p>F+wv|X{sX{6Ezut;LYQ0{jxV8@w<;IP@|%KUj;D|MjdTm zX@b5}qMb+tk+q5y$2%u6lZ~89FvhT$f%t>XTaMGO<4#LskQU>6zqhW?0e1<~VU z8DnTP2t@}5jbnymD{F}%BON{byCJ(w?XgDiQxJI9*udnXTSKEEyRq1Caia%LjH*nS z!4|(#-0X~s&wP2X_lN8ehtGg$(|CE^I6J6-*W;kYV%>rEwV%Wk*VOvuKYL6C|FP-l+ZpL}GA*7SpLXpbmipE7&%E z3Z4+JDFzMT0RkJV&7T6LsyKRA7fS?M&Xo})#9m%s0&}vycm^p9H`jg@UsHp%FV~Eg z!QjFp0+N0GFC$PKj*O2CvPM>qkFq291RNyQp*Un^I@LK(thk_c9*aW*B8aa6myh-< zVm=J8DANegx=GWXW(*33xfk=3HZO(z@fT+tG9Wmbea8* z)vm85SeEAYFa;lz*8IB|5o59FS6hsCkP#bWGyhxPSsgz{m2i53ATh7A*RsQGtx?$o zh2f^!+nS0qVuPz{&nQvV0(kV2LmtyS$rc= z0~-ftd8opOCPW`zjNu0VC{UNRM{{9F&VS;sVx{?4rZegge!-^)fadhL9s;%pC8OG8 zZIMTet##}|bqN3@A?a7Ys%=L9G;7}icZ=r{&+40VdG+}4FmIt&P`6Xrtdd{nSYuFe zZ&uD!^erO{?N4#s%IU<=VcDE@V7KaQB<}(5>X-j7Yj}Te!#n94{pBxjc)z0IegB5{ zD;wUAX?VY?;r)FL@9%GTzq;X_^!5JneX!yEnuhml8{R+E@c!Y3_r(qGA8B~MuHpTo z4eyDD_m4Haf4t#+C*DEJ&ngWuN@aF&4E7>si zTN>VPZFs+};r+7>@3%L+-_h`XC*Hkbcth~R`g8c5qg?$Od>-G^1pQ_2!gqqbs#~uw z;CsPf)qVd(eD7!E{B3=y_M5=VHMalvFXMMUE=CaR+WiWC6KCN1t^DV@-^q3&R@~3| z)Rp;at&E>>sw+cw!@2{nfwS|E_wL5BHD<1^&9Ae58<;wL=Wpv9cxP+X$ksKK`zE#3 z%;43vwX52efC!zipf>*g?m?LVey^^bZ=p;*!&g7%Z`XcjXSl!pd+}Qn1BmbZW$wef z|2Xykc0Ya#;{W_*zJoGTvw`(}K7cwL&H^^|{9xVq9u#G3EJGt-Sl{+TP30%!66@=J zx2bMTyXBwr!%gL?REzBjf7$OfmGyIx_5FXpscaqhSO=F!P_}N5;N9QvZq}BcL#+S( z2My(ahO-5=xo>Ko2)!?Xf5$RBN8vdJ&uToU;vrim>C#ExPi4J#`aBoU zmGB?miDws{$MNjJvlq|(hoj^n{miaUyv`y##KSGY^Aho%!0*T5*??yP&rUqM@I(&n zXJu)O)Gv*14xagVsDFC)!$aTG(}{*BmVBi z6Gu7nvcaF!vZGx5=_I zX#{m>h60ZeJLT$9szqg$YdKKINo5eNlGAL{wUU;V)aA=0R!JFLamxA?wSEW5PB=&= z#DRe^b6^0LLvA|cwnH)=4*1Tb72~Po3Zu;aKpRr73=BZwNh;cbf!Fd6D^6O!LQy;w z8}w-yPS(gd2$s=ILq*t3#@3@BWy4Xlc81eT{@K9 z46y8YG5~PJZJWSKt4Sqqsg9;4b6Go`)jQ7s&SyEO#>+5q_#5Up4_~;_zyQSfp$dZ6 z49h|g*uc8+5yIkhd3*%OtGYQmnMvhrRZE$cYwKOFM6(&HpP}k1udUH%QtE5aA+Qgk zzJ(aAWY#jWPDabw*{t3@z4|sH4J|9DsE(Vo-K?wk%mB(h^|&7x)7Z;y*0HmIM#^># zCyD9w&R{s~c;ob?&Ece+Og67Lxm4105%*^4eedOzF4e{k@#|mO9FMA)IaSrtrUMG> z0tx3_!*^^uVHLM*` zIU}dfo0a`)Br!Q`)qGCLrqhn0_s>eRdChi`In%^GcFerx>hou%*`#J*OW|nSuOj<6Na%Q9M^DF8xhG?*38-Z0XHx#N@d#^N|{znxEpX2I&r1K zV!(Az?GDpM{I!E{>Wu4{j&16TKhB4loTXvbbZBjkP=&_a%qDYg#xyk5&guugk8j6z z*0jlJ+vcF0bkYS|l+C0Jk}@p)p!f4ZwZqW+Pq`T#C@RDYR|Bu*TIrObIr@?roV|^d zixeem+7&Qt0Ls8|rz*CSb(490>FgXHSt44mm$ zRC)c16L2#n+&k2=}l7RyiT=YtC+$8fF;G%#{Q=N3mQEXQQg6W54TG!9A>Zp}8ATgQ9k)j$Y zHX=f-si=9I&9;fk&#=%~JCw3e}E+sWPpVp61 zt3#zCDusO+6(z<0g7U&AiUyTWnt3HTt~vfPY*i6GrrZx*(SC-#64Rva(O$aE9vG5sXXSRift+mVJ(GR zIVL186Z%xJq#hIkrjrq9Wf*GKfIwtJ^`RlrkDjhUp=oPWJ5$_r61jH(H9MuMW=7A1 zsij&*GVi2q6N*U-DS&i9$s1y|FQ|_<4~@Z_(;23%=j7A20d@?yiNrwQ6V(}QqIAwe zsv*VAsp*VmruE$PP|c?7L{L~KvuG!!W_4{=RzIEct-gsR0n29@Y48pyOVjg755w}+ z*i!y!kndgLB1C(5z zGE)I~`YX@#z)U=*mTj8+^bXbl_McDEc8r{oPh*A|?C-pyzbeddwY-xwGAY-{X>LBB zHT2`c)XFJJCYuCLn1!>2y6zEs!B4 zok>}C(uJE|%G6hH^8*Ds@Rvfl+jO`2@NEXEuFLTdDF@R zemVW*Fw;+_owSx$pwxp%hL~=BO{iLMiUVf{txe7bRZHrx4xsp1L?EV9)w3m+hE5=F zK^F;r7aCptHKBTflF>|QP+lQPT{h$Drv&xH-1DnFS*auxIgXt}x;iVHH1yYo>Iqsz z1h7N>1G1ZSO#RfL(|H_|QZv=AOz3S5bd!h0gQQ9Qv`}4v&oGocM1ZskgFsea8>W_$ zadL<|w?HcLNU@jKUl*#Dnan4#QVO#ErSoP^KRr|}7xFitm&-X>oIN>xU6@*_Ea){@ zKFbCVVK`ZReW+SFHGvK@RvqN?2!fr$+1{KI+ zYf1ghVQOWPDFDV&Qwp)$P4OsyO;N-9d;!O^Z|Y}n)nnO?D`J5rP;LOnZNO^G{n z4h;Cz1Rh_bsTQTKG_o1c(4?zxDAk=E$)+Jq??A0LTk#V$r>63%lFGX&C2M4~w2qt) zo}dE$2I|e3omZkyQUr@xx<(4TB(|oJ1|_kRx;c|$3X)ICaLg=nhJdp&jR-7vMXX%~p#fGf*5 z;6F@#W2joWq^X)tR)O%T8hJzC6sne~W}%+4pg961Gt&A{s9H9(3|88-%v>Iw8u~e5 zYN;7bRjj;bs5zKOl$^dfOf7f-!0_lmIjDkmW>Wg_>|!ZbZ!wP$fu+#kK-h&;3Gv-= zq2n3}Gfvf1T=3nF1=h#Tn~pvjsum)>Gyo%O<5-~6u^6USE^R3$s9!GW=4mPPu~4)MKbZkSpc$T-d`ax8)2Bc0bvVQPV>xQ=RITe?Y-Ec9}?S||<$IQ%RO zQ!^F45~h}c6vegy%Z#O^fh|dWJXEc;o5Vt^Dcf;iIx+Mup=xQcEM(GV3Yva6UfBA1 zp=ue5na?OtU|C62R`ji5YMCnRtI$|M*ma?-*7R**YFTM;gE;zK&4MNqZ0=hxX%N~| zO>}*BjyKP4iqplT_c)p1`UZOgayvm@)Zg|t{|TebR7~_s&^|X2e!RhhH64w$36q&j2!K@et{Hb5#e47`2gNGNRSR^#=L%ExN1%wE(4^8 zSD_z3ioPA8s=@y+tKcw&jU)?8r=?#Mt{OZkKmju#?JoSAa{9%gs<}}3q+GZUKz)}3 z*R8)xnhB?w1b+m}7xWV_uj6>v-z_nB)Y^(ePtb^UvqMW9FSgkWgol?VoI=LSwD=wZ zSlGgjs^y^!0Y{eAFOg=M&hpslsu8y~03z%42dv0h6$s+9wQ)ZZHp7#Xl+ z={z*5X&d`8Y3i3tm(m=RWIO&iwe%A0%>pb~8aymq&eG{TR2=#h;bxI1+lOM{Vu!S4 zSb0~!GF&}VQQ-0-TotZf#(`cJMteJND2fK7Q4*M$dhv7kNh zL7NVB3p@z(Cfq#XwDkWnfQDV_zn7 zIsLkj^|ByD4m5x%TS;X>k3K4Wm`z-1#~BuzsYPasu&Y^5W%Dj9U65d*FvOKHDNFyD zbR8dEJ8pOZVU;yD*|%!pPXQxqX-NgHRyfvRn9l1Tmp;bEmuiQa*E=`+L;0o?`BM+U z4EUn70iBi!#u0`?eP>9#a8#(1fmPt<;4hQcub0|)wDr`WAz|FGqf&?fL_C}6y)a`DsLJ@;$&iwOoyCfpbp4tFy$+n z{u$|I7*N$cCVL(-DNyT{1LJop39Eo>rc5oR-^^NP17Sh`0JsB*0%;%T4ve6<#miPR`t1^PF5>_Y4yxSWhy)bMHQNkeYZ$311_n-`KLp|| z`vO2nL7j76Mz?J$o;NVS)@Kc6N`%iHcY-Au$VWY-Zj$^b7X+Z2of<8xM!0Y zc!s(97x;4Vh2pMcD#wgK_crEQNXzCqw|OgE)xry>Lx_O6tOawOnM&qW=nuart)b=O zAwsmB*SA=HWMN@0bU+Z%7HqX<7Mu(`0TGIew!Sobln_)N92STm-0#yVu;5>oRuOsN z&IIh=lmhl)1j!63U!SwZ86EVHANEK%-;n1wqBmjW(7z%v*GgYyC5PO5lxc`pqAM+d zkT6HVV5-5$3MLi$4*jdrOKQEcixE*OnPj|r5&$$n5ET7u($Te2d~B)4u~mwq5BOL? zv z)P>ie=4M>DgBULU&FSCZN4p;+DtXQ$r`1;U>}dkBP)LIC*%>zjM&B~@Z*nHzUk`ES z!;XQ|m5&;|;kE-KiJ&hR1R)7|P2VM*L~{(`hO4kL6r(dl;ofk1#MOm3-C$`k;5@3| z!?to&O$4SoSW6@bD4aeK$v!aPyWxZABwB`}2(;)}YRoLHJ$aLl~^U=Tcc zo$46#Ir9968?f!*(n>@eJQM^^Ude+eocT?->O$yLTx@XtyS_b_XOpAbRh5ibhAq_* zY6c>2O?dXCNybej^@n``@zu;$YgWgJj0te7%tJ@(TF`K&_3yRlv}yYoxVjT(0sLm6 z1a(uI{(T?)@^$K=+7@pOlDM7|-h5dTp`Z%9AM{5;@Q?6T%0Sm+<#6)g-cVcLJ$qyv zhEXtgP)M4ZnkAQ>A4o?6_u2NXIo4*q{1h%U)1Wi(`GW-!K2G`%8Se7kUK}fHuE=nx zt{$c9R`QffeUNg5VxQ$GQpn1vIRqtRsnhz8r1?w+VHZpeg0V$bhevA-*7{d8HUOT^ zuM8oRPcQ#mcG|Ck{f9}UiJ&+j~k&x7zLr!kN(Em+z#yxZYeY3n~> z=u)kcU(K&Uh9iG!%&BE_jYw3Wu7 z&c~ppJ<;yfkQu}G^oTi4VT**uKpxDg)&^xG%2ss5UPQ&1p}_(Fp0x03Q_s`&!ywM zY1Dx<#bLpLiG0h*y&}K~U(zHT;8Sp^SM4OQ^B2ngyZnW>mArFTUh)_xJ=x9`>iCd47I2 zO~%w?oe^S(qPj>`LKc}dI}P_mQHrIGA`8PW{2wMnuhx^?l)Eu4UkLc zE&bQhM)aU^O_gu1=J?PsmH_(0wT4~b%ET7jZ7U(wh1;?~K*4ZT+bl{AkUytk*{I71YMiEJ%j%U~yq7_)R_4 z;gy6ejYhC(5HW7zY9&Vj^~jm3s{iLqr4lYjS%`{2AROEl;hYLLc>U?wlB2*M;B?I4 z|8B;H3RZt+rX%K%fO;@#K@P=5*YI{RRQ5sxmUOeKYFWRm+VPQ?sPguT)_Z)zD*`M?fJ$#Ki6#_%uv#&~^|U(|`4 zUTNaUj)jSuuIA!c0J{V4W+-Cx|MqQ7!hUf$I^=Ouqv07a#UN~H1unqZl>SF1L~-n( zN_lY zSPk?)ODs^6XOiN;2U9dK!0t+i4=i1qz$_&8R+@N70K*6a(;A3%28Iw5PM?3_jECrk zpcwABfSY(mgML9%p+3g;(f>y>uu^q&8K>O900Sx!W#2uhA^XT9;=swOh=5GN3r@B5 zzqWg(!&wb18`cwMECre<{dsnZ@WsS1gVilq;Uc*?h|mkaP_}U>qI<*QcX{OB0^V;pil!_8#snP`roJ3L-3hf7aoZaQJBh11l1;y ziwJ?&|Cm+}!E>nV0e;6O)2adKErdep|CA`sPvv%Jj_s2O_2EzygeZq}4lq|hv4I3% z`o9=a%2VoKcI9T8oDP)L=xE3s8pbVZN}v${kqst!16o=5O1t@t{_hZi7AS@T@iTA2 z4H?lTdHn_H)tF%ztw@G+{e!sO1Q#y7m9be!lja~i2Nh81QdW!G>qxO%0X&1+wMQ@GA4$NWNJj zK^*H|N6( zH8gzT$|0d)LL35Pr6!?hMB|!%k^E-DbgqLHIBphkh(JYuLh(Ez>BmolpeG$It3j2V zfi8fwefq`nTc~}WE=RNvg=d)57e4scpzXpjDGAjjqHYj>fGE~?wF_u?9%gbbHUp_S zvLM3mZnq^czt|CuVk)qqIZ)JGBEPDNVu-x}q#G55D^6LvV#VorFf*Le@IQuD90~F? zSR-I=)-RQrA(o#AXab%?&29J$V7OFuU|^_-L;z&*8W`YmIfI}ld|)L5;6JXh)ukhw z55-0vA=no%2+A%3!QnkJv2$tk;L}EWidRQb1*nn4NrVn*{o|pw#?)+8HD_T=Db(#VHy@Tkjv$1 z*m#FV>GHS+8!xzaq8GV)asI1L!@~|M92{SV( zAIi98sxnyp4bunjWLOa2G@^+>4t7NhP)UQw~S%c2tc}V&*NOv2fHdIv#f(_vF ziwq+Qj*s`rhoQDVi)%&{1>cLbtBClA{}Q4@H2wYZCKRnMVXhe`86L9fP^#*Ifq?lD z^M&cIHMR{G4O?IxrQ3mxz%a9R%E}?g@@o0*=nA3mM1iK?YSWR-#xyW+d~xe*k>;Ix z)##cNGC2S!2-1S?lC*6(WB`Sd;!$@D?K-MC{; zfI$q_HAK>nZlfvW=#nX1nt^t}2|(+Ygo`Y)0U`cdzeb)Vn>Lsb3yM)(b8Kbde4y&r z$}_aWb9q@q-c4xikw^sM5HdyRA8K#rFe_ScUjwG*ptVC7+J|LwA@kTVrIA1{mCI-HIZgkFOdj<6cmx805bQ_s$?)++BpYIF^y}IKBgiVacNn8A zGp`{WK>w&5X9W3Uawk$4Q0l9X%jAkkbS4dom^lv@J7!1$o*{iW40OY!yG2YZZ6+kk zKuSTRd)0R`>eLW&>NyZMrdWn%4I&YQ2{@3t2&&MpmxV7FXVIqT{9ko(7HJ!2H~HBB z#+UsBPf6WYYQkCen4~OW*L7+X{^JzBi*%J9kxG?f z_~6@+$>9J(6yy!EFXY8DXV`e?6*6-)Td@t)P zFE)}_LPtYNdmJ5b7yp#JmG&O*m5XjZ0Ib7RQx)oGEz!?rrg=J$fJYNjZBU{!grGuA zqJLVZ>@w_l43$zDUL43NfdA9zQ--bIIL!v&ai5~(l<=8z;Y(`iH%+qvScWr5ZNQjd z8!?aiXE<%2X|id!@Vf{EK!y+eAE_twn`ft8Kn(sBCJbW8%mXhj{gzp37k=@E24y*N z2f%`z)o-09AVD~DNT`cw?<^c8?38}nG#dc@gb5K^4wx5^%rB*Xc6Me4?IkEGJWAjY zkwqM#e!Kh$?!4MgX5KYqsSCbWaGJ!d-hmY!c}UZS2RA($?m%4_=P+HDCQTWVG%^?cri2Zy15zTK7}PdPwU5`o}f z0Y`_s+HMKq_9@^4yfT>uUjIVdEs+y3L^C9;hgu5U&llxdz?yf?=ft%ND$mM>a|DTf zk#;DHnOKMb%&YpBWaeeX^gY!;BTAm)OXJ2QOyYL4$dZo$M>u{kZz=uDGFddUTw&)w zG3q#I=4}C?WE|H+RPPrbIDXt^%(S?n8_gq327dOgoq}U4tjKBoD>6&RUX^^HQEw8* zJ*D`}a%K7N51fj36gv$Z&U z;cZO@X*fnA)ltfRYA4j zHva>zj!=?Kv8W;L2%B1A#JN1?lpPfN0;ML>Z^0o3c6W-!(|^b&7RJun;C&AInSwO} z+!jD>Lyz_&nc1X86boO4wfNZG9c8D1MpX`GvA}}-qPa8xok`pA%7mu+iR^^2m z=f-J)n4F0~QcvP~ErbPAo0c=zMxja}vHq~wfnm~5P3 z+X$BawM@ZNT-{4fJc@EWIsH;=PD!J05FWsy160 z3y1cwKxa@&ir8qlK>-yJ;6_(t!Q6vLQzT2#e=E;!8pQ@J6k)$>!rB3z&CZz)g01x5 z$&MdP>SXz_dE$=}A{d6o=z>bRj+J#$81?S9XBm*tj{-^o&8wDA!ho1h+4}G0GyVM_ zyL6*34QBvadv|is!@!b(LoiMP7j6xXqd&`hMyf|NysjD>u6I52J6rK4k-`^Q)}Y)~ z;kIKT)$o7G-H50e!@eJ{JU1h%iG(rmLx4FS0fHc!dHoMFA{mr7AgSTjIZBS4vQ>RA zn|uQWN_iA$4PEz9wZ_vRavppD36S_mZY_7{&@KWn1 zux|qHyQp!;Ee27<1$Pwpec#u(FYF-V=Tuc-W*pP@e-aX3CM?sxuD)Hj?m6fG`+a}j zR=N3ii#OSSeA}a%%<=?0yMVxxVe0$iba!-ng07}6wbKOV9=T3+x+kjS<{2`HnUKgA zur#K7qe_j!brko4{m?OBv3g%rsbUAs9SpSukat3n?vE;AKaV;kEpf%6nM8UZs#J5|1erYA3RYkmG8+TLUQP&#SKA&Vn%J+sHq;h!gS%*DC9G1dO(c{H%o zA9_r4w6B?GPh{rVd4RPcM!Pyp)1`|+N3jrHWt4>#EYjhIO_xQLWEC*{F-Ztn0(aBp zQ6<|`+^>js4$=wMQX#Vc#1+uxFu8b<>8B){Z(LpOFn&kUcQ6*LsX=pWx zrEO0!9Hy(IN=91kQ4s#I`|vb2_NF1IVg-F1t% zPw%bv?09wi_tuf88(ZBX$8cM5B^#eH?J%fgFt9g=FGF{Pe2$`BLt;CoOV@{$Sgk#5 z6y8eEi58_BqDn#tP9p#Sb5z(cKHV5rGDvmS;3S}nKmqwLpzEha^q8bhrjHw!YSyX9S_s4lK za30@L)sgee9{L9d7^wR#qdNFWvJOqVq$y)vpKH<#z~27?wZnNazv zs8UBgl0CcSi|iy@Q+jn&sWs}EzE+jVyr0i|{54S}yV6#Pre%@d&3*0ZwNWJ>thsH9 zp{B62Y<1G>qDtmXfY54D9IxnPZ>86Vl?tu}n^5Kx-H9xpH-(k-I}n(iCZ!-V{|5z8>Tx50>~qDHEJxR7rns7b^>7 z#vZ1t-0{s}r9#h8w!+E2iNJNzTf$0M-!`7PVx-~WIZSVjDgkn6K+2>%N%f2|eOpus zG2V+&nw1TIPWsc^qe>w3YE5-++o@Z{L|v!l0)luJyv zgnxdE;+&~AX=s!U)9w#Mm2^WgnBbiZP{1^-rCXy)5O;pLWxmrTw+zQW7*#UHfV^?C z=Ya-7*-0M?D=An;)wFYUWZY)pyDhAQmnIddRn+M%Crug*9FHdvqZxBnk) zI;)W}UD560EaxNKM*2wfGUymD@X6c7!Alo(65+vYe$g72%QkSXc>Z%ecY{ zaUj8I0rl~yQopHd6XRT`Sks48neJG;b9QPM{eU`a{QS9PgiCdGgaEj{?}<3Q8>rFM zK@Tyca@^rt`b7Aew4KyzgX6w7w?fbSWLOC%7iPgQg5@+X1wR#5s&hw2;qy-ob>+a~ z(~Eaae^2x2&-NcAhf~oXv+_O>zp6|PXAhCXvO5)VO`loJQ0<~|wDaL&V^B=$Rv!Dd za`?75=;<_vxSw~?^ySX*E3jdCYT9s+-5Hc4?*wFoGJSUO1++tH!Nb4HR!3y>Q)HRY z4wF|N(dAvqAbl?SJ6HRbqe*hE&Vvlf)16@@MtuvvMr~-Inz)%hA6Mf2gAMJ-jr+4q zUx+K|M`qJ6a^q=bb8}ZzX#f{HkR5Qkotp=H>5EY%C)?Hqb=L`1Mh^AUm!e8GG&0Km z5JRe)FtA^aDmkZgR?J5eRWT7xd!Hwae7k$(DaSV`M4 z6C}iX(RU!wOW%tswe*vBaz~myj1g%oeLt$?-fdd;mE!_iUm;}rL0Cz68==24@*V4X z7^NSEmHg&=3Yt?I`^`s>DkIiClF-BS9;; zcYYC8%0W~v8G}={L=(zi#+8sx2R$Faxt)nS@mGtt&E|?TtIzgVm1mCkU${0t8<6NZ zo8!G(Et!ttj6&Omw}|e|p*x{%clzt-MG!h2>E?AIilWfKmVUE%>+B-Thz8rg2#0$4 zwy-}P!xd;?aq%MNMnxxsMJxR_dq9w{UX^|yRWi^Y8SK~#Fg)24q(4NJB$L^RH3>UIRfEz@e~c=1I4GQ+GIx|% zk=U_+iYjq}iT$-b6V%}reCmIWDggrM6T*}>61VjorN2a#di4;_rfeC3@xeHJ`D;|k zyjVKI1Poce@41lv7FANjf&AJ5D(xFyDY^c8SjmA<-L(4=%{IFz(?6n0Dz}y1Ik2<% zDfC*WyQ4~u;~8@7rfTn~5vs^NQ6;CN)&^kqwl6)TlXP!XNv~B#9gK03Bpk=hbYE1d zk3Q)42bSC!m9BldKdNNgnn~2I_O!XJpB{)RnaE)GSH`^J>PFD%!Ke}jp2rSmOQu4o z9ZwHMl``&>?=|D=+rVy}^l((kOa~}{8CpxbIs~T2sFI}HP#VdB9N2UP2Kq=;$>P%X zj+=^G-Dw)rbn&LcUPT_LRxxY|3htn%<_ZZyT9S&nm_cS26=jJ+vj3uUX1%T>s_kz-ll)59Y{LhfjrL z!q(5y4`L==yVWS z=7};aqXR=l6vv&ghS7eY+B4pyY{TD5304vsV7WS z?CrveJ8j{#g)^S9^T`JrE6tV0DNjA*{RQy4XHV`YnTGPtxOs=a)y#kESS5l=7lcK@@RcLS>Ct4 TxxBh>xOQMPUIyHJ{O|r77GYv= literal 0 HcmV?d00001 diff --git a/substreams/substreams-trigger-filter/substreams.yaml b/substreams/substreams-trigger-filter/substreams.yaml new file mode 100755 index 00000000000..eb7f1102287 --- /dev/null +++ b/substreams/substreams-trigger-filter/substreams.yaml @@ -0,0 +1,36 @@ +specVersion: v0.1.0 +package: + name: substreams_trigger_filter + version: v0.1.0 + +imports: + near: https://github.com/streamingfast/firehose-near/releases/download/v1.1.0/substreams-near-v1.1.0.spkg + +protobuf: + files: + - receipts.proto + - near.proto + importPaths: + - ./proto + +binaries: + default: + type: wasm/rust-v1 + file: ../../target/wasm32-unknown-unknown/release/substreams.wasm + +modules: + - name: near_filter + kind: map + inputs: + - params: string + - source: sf.near.type.v1.Block + output: + type: proto:receipts.v1.Receipts + - name: graph_out + kind: map + initialBlock: 9820214 + inputs: + - source: sf.near.type.v1.Block + output: + type: proto:sf.substreams.entity.v1.EntityChanges + diff --git a/substreams/trigger-filters/Cargo.toml b/substreams/trigger-filters/Cargo.toml new file mode 100644 index 00000000000..b1f2db07772 --- /dev/null +++ b/substreams/trigger-filters/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "trigger-filters" +version.workspace = true +edition.workspace = true + +[dependencies] +anyhow = "1" diff --git a/substreams/trigger-filters/src/lib.rs b/substreams/trigger-filters/src/lib.rs new file mode 100644 index 00000000000..81bb423f7f5 --- /dev/null +++ b/substreams/trigger-filters/src/lib.rs @@ -0,0 +1,80 @@ +use anyhow::anyhow; +use std::collections::HashSet; + +#[derive(Debug, Default, PartialEq)] +pub struct NearFilter<'a> { + pub accounts: HashSet<&'a str>, + pub partial_accounts: HashSet<(Option<&'a str>, Option<&'a str>)>, +} + +impl<'a> NearFilter<'a> { + pub fn matches(&self, account: &str) -> bool { + let partial_match = self.partial_accounts.iter().any(|partial| match partial { + (Some(prefix), Some(suffix)) => { + account.starts_with(prefix) && account.ends_with(suffix) + } + (Some(prefix), None) => account.starts_with(prefix), + (None, Some(suffix)) => account.ends_with(suffix), + (None, None) => unreachable!(), + }); + + if !self.accounts.contains(&account) && !partial_match { + return false; + } + + true + } +} + +impl<'a> TryFrom<&'a str> for NearFilter<'a> { + type Error = anyhow::Error; + + fn try_from(params: &'a str) -> Result { + let mut accounts: HashSet<&str> = HashSet::default(); + let mut partial_accounts: HashSet<(Option<&str>, Option<&str>)> = HashSet::default(); + let mut lines = params.lines(); + let mut header = lines + .next() + .ok_or(anyhow!("header line not present"))? + .split(","); + let accs_len: usize = header + .next() + .ok_or(anyhow!("header didn't have the expected format"))? + .parse() + .map_err(|_| anyhow!("accounts len is supposed to be a usize"))?; + let partials_len: usize = header + .next() + .ok_or(anyhow!("header didn't contain patials len"))? + .parse() + .map_err(|_| anyhow!("partials len is supposed to be a usize"))?; + + let accs_line = lines.next(); + if accs_len != 0 { + accounts.extend( + accs_line + .ok_or(anyhow!("full matches line not found"))? + .split(","), + ); + } + + if partials_len != 0 { + partial_accounts.extend(lines.take(partials_len).map(|line| { + let mut parts = line.split(","); + let start = match parts.next() { + Some(x) if x.is_empty() => None, + x => x, + }; + let end = match parts.next() { + Some(x) if x.is_empty() => None, + x => x, + }; + (start, end) + })); + } + + Ok(NearFilter { + accounts, + partial_accounts, + }) + } +} diff --git a/tests/integration-tests/package.json b/tests/integration-tests/package.json index fd134e4d88d..92f775296bd 100644 --- a/tests/integration-tests/package.json +++ b/tests/integration-tests/package.json @@ -1,7 +1,6 @@ { "private": true, "workspaces": [ - "api-version-v0-0-4", "ganache-reverts", "host-exports", "non-fatal-errors", diff --git a/tests/integration-tests/yarn.lock b/tests/integration-tests/yarn.lock index f25f7096987..3e918c20720 100644 --- a/tests/integration-tests/yarn.lock +++ b/tests/integration-tests/yarn.lock @@ -967,31 +967,6 @@ which "2.0.2" yaml "1.10.2" -"@graphprotocol/graph-cli@https://github.com/graphprotocol/graph-cli#v0.21.1": - version "0.21.1" - resolved "https://github.com/graphprotocol/graph-cli#352f34d66e3fc7ebd55fa0a2848ce32e191baf5f" - dependencies: - assemblyscript "git+https://github.com/AssemblyScript/assemblyscript.git#v0.6" - chalk "^3.0.0" - chokidar "^3.0.2" - debug "^4.1.1" - docker-compose "^0.23.2" - dockerode "^2.5.8" - fs-extra "^9.0.0" - glob "^7.1.2" - gluegun "^4.3.1" - graphql "^15.5.0" - immutable "^3.8.2" - ipfs-http-client "^34.0.0" - jayson "^3.0.2" - js-yaml "^3.13.1" - node-fetch "^2.3.0" - pkginfo "^0.4.1" - prettier "^1.13.5" - request "^2.88.0" - tmp-promise "^3.0.2" - yaml "^1.5.1" - "@graphprotocol/graph-ts@0.30.0": version "0.30.0" resolved "https://registry.yarnpkg.com/@graphprotocol/graph-ts/-/graph-ts-0.30.0.tgz#591dee3c7d9fc236ad57ce0712779e94aef9a50a" @@ -1006,12 +981,6 @@ dependencies: assemblyscript "0.19.10" -"@graphprotocol/graph-ts@https://github.com/graphprotocol/graph-ts#v0.21.1": - version "0.20.0" - resolved "https://github.com/graphprotocol/graph-ts#56adb62d9e4233c6fc6c38bc0519a8a566afdd9e" - dependencies: - assemblyscript "https://github.com/AssemblyScript/assemblyscript#36040d5b5312f19a025782b5e36663823494c2f3" - "@graphql-tools/batch-delegate@^6.2.4", "@graphql-tools/batch-delegate@^6.2.6": version "6.2.6" resolved "https://registry.yarnpkg.com/@graphql-tools/batch-delegate/-/batch-delegate-6.2.6.tgz#fbea98dc825f87ef29ea5f3f371912c2a2aa2f2c" @@ -2772,12 +2741,7 @@ asap@~2.0.3, asap@~2.0.6: resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= -asmcrypto.js@^2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/asmcrypto.js/-/asmcrypto.js-2.3.2.tgz#b9f84bd0a1fb82f21f8c29cc284a707ad17bba2e" - integrity sha512-3FgFARf7RupsZETQ1nHnhLUUvpcttcCq1iZCaVAbJZbCZ5VNRrNyvpDyHTOb0KC3llFcsyOT/a99NZcCbeiEsA== - -asn1.js@^5.0.1, asn1.js@^5.2.0: +asn1.js@^5.2.0: version "5.4.1" resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-5.4.1.tgz#11a980b84ebb91781ce35b0fdc2ee294e3783f07" integrity sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA== @@ -2820,28 +2784,6 @@ assemblyscript@0.19.23: long "^5.2.0" source-map-support "^0.5.20" -"assemblyscript@git+https://github.com/AssemblyScript/assemblyscript.git#v0.6": - version "0.6.0" - resolved "git+https://github.com/AssemblyScript/assemblyscript.git#3ed76a97f05335504166fce1653da75f4face28f" - dependencies: - "@protobufjs/utf8" "^1.1.0" - binaryen "77.0.0-nightly.20190407" - glob "^7.1.3" - long "^4.0.0" - opencollective-postinstall "^2.0.0" - source-map-support "^0.5.11" - -"assemblyscript@https://github.com/AssemblyScript/assemblyscript#36040d5b5312f19a025782b5e36663823494c2f3": - version "0.6.0" - resolved "https://github.com/AssemblyScript/assemblyscript#36040d5b5312f19a025782b5e36663823494c2f3" - dependencies: - "@protobufjs/utf8" "^1.1.0" - binaryen "77.0.0-nightly.20190407" - glob "^7.1.3" - long "^4.0.0" - opencollective-postinstall "^2.0.0" - source-map-support "^0.5.11" - assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" @@ -2878,13 +2820,6 @@ async@^2.0.1, async@^2.1.2, async@^2.4.0, async@^2.5.0: dependencies: lodash "^4.17.14" -async@^2.6.1, async@^2.6.2, async@^2.6.3: - version "2.6.4" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" - integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== - dependencies: - lodash "^4.17.14" - async@^3.2.3: version "3.2.4" resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" @@ -3217,11 +3152,6 @@ binaryen@102.0.0-nightly.20211028: resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-102.0.0-nightly.20211028.tgz#8f1efb0920afd34509e342e37f84313ec936afb2" integrity sha512-GCJBVB5exbxzzvyt8MGDv/MeUjs6gkXDvf4xOIItRBptYl0Tz5sm1o/uG95YK0L0VeG5ajDu3hRtkBP2kzqC5w== -binaryen@77.0.0-nightly.20190407: - version "77.0.0-nightly.20190407" - resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-77.0.0-nightly.20190407.tgz#fbe4f8ba0d6bd0809a84eb519d2d5b5ddff3a7d1" - integrity sha512-1mxYNvQ0xywMe582K7V6Vo2zzhZZxMTeGHH8aE/+/AND8f64D8Q1GThVY3RVRwGY/4p+p95ccw9Xbw2ovFXRIg== - bindings@^1.5.0: version "1.5.0" resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" @@ -3229,13 +3159,6 @@ bindings@^1.5.0: dependencies: file-uri-to-path "1.0.0" -bip66@^1.1.5: - version "1.1.5" - resolved "https://registry.yarnpkg.com/bip66/-/bip66-1.1.5.tgz#01fa8748785ca70955d5011217d1b3139969ca22" - integrity sha512-nemMHz95EmS38a26XbbdxIYj5csHd3RMP3H5bwQknX0WYHF01qhpufP42mLOwVICuH2JmhIhXiWs89MfUGL7Xw== - dependencies: - safe-buffer "^5.0.1" - bl@^1.0.0: version "1.2.3" resolved "https://registry.yarnpkg.com/bl/-/bl-1.2.3.tgz#1e8dd80142eac80d7158c9dccc047fb620e035e7" @@ -3244,22 +3167,6 @@ bl@^1.0.0: readable-stream "^2.3.5" safe-buffer "^5.1.1" -bl@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/bl/-/bl-3.0.1.tgz#1cbb439299609e419b5a74d7fce2f8b37d8e5c6f" - integrity sha512-jrCW5ZhfQ/Vt07WX1Ngs+yn9BDqPL/gw28S7s9H6QK/gupnizNzJAss5akW20ISgOrbLTlXOOCTJeNUQqruAWQ== - dependencies: - readable-stream "^3.0.1" - -bl@^4.0.3: - version "4.1.0" - resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" - integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== - dependencies: - buffer "^5.5.0" - inherits "^2.0.4" - readable-stream "^3.4.0" - blakejs@^1.1.0: version "1.2.1" resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" @@ -3325,19 +3232,6 @@ boolbase@^1.0.0, boolbase@~1.0.0: resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= -borc@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/borc/-/borc-2.1.2.tgz#6ce75e7da5ce711b963755117dd1b187f6f8cf19" - integrity sha512-Sy9eoUi4OiKzq7VovMn246iTo17kzuyHJKomCfpWMlI6RpfN1gk95w7d7gH264nApVLg0HZfcpz62/g4VH1Y4w== - dependencies: - bignumber.js "^9.0.0" - buffer "^5.5.0" - commander "^2.15.0" - ieee754 "^1.1.13" - iso-url "~0.4.7" - json-text-sequence "~0.1.0" - readable-stream "^3.6.0" - brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -3384,7 +3278,7 @@ browser-stdout@1.3.1: resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== -browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.0.6, browserify-aes@^1.2.0: +browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== @@ -3449,7 +3343,7 @@ browserslist@^4.14.5, browserslist@^4.16.3: escalade "^3.1.1" node-releases "^1.1.70" -bs58@^4.0.0, bs58@^4.0.1: +bs58@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/bs58/-/bs58-4.0.1.tgz#be161e76c354f6f788ae4071f63f34e8c4f0a42a" integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== @@ -3520,7 +3414,7 @@ buffer-xor@^1.0.3: resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== -buffer@^5.0.5, buffer@^5.2.1, buffer@^5.4.2, buffer@^5.4.3, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: +buffer@^5.0.5, buffer@^5.2.1, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: version "5.7.1" resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== @@ -3543,11 +3437,6 @@ bufferutil@^4.0.1: dependencies: node-gyp-build "^4.2.0" -builtin-status-codes@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" - integrity sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ== - busboy@^0.3.1: version "0.3.1" resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.3.1.tgz#170899274c5bf38aae27d5c62b71268cd585fd1b" @@ -3807,7 +3696,7 @@ chokidar@3.4.2: optionalDependencies: fsevents "~2.1.2" -chokidar@3.5.3, chokidar@^3.0.2: +chokidar@3.5.3: version "3.5.3" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== @@ -3832,7 +3721,7 @@ chownr@^2.0.0: resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== -cids@^0.7.1, cids@~0.7.0, cids@~0.7.1: +cids@^0.7.1: version "0.7.5" resolved "https://registry.yarnpkg.com/cids/-/cids-0.7.5.tgz#60a08138a99bfb69b6be4ceb63bfef7a396b28b2" integrity sha512-zT7mPeghoWAu+ppn8+BS1tQ5qGmbMfB4AregnQjA/qHY3GC1m1ptI9GkWNlgeu38r7CuRdXB47uY2XgAYt6QVA== @@ -3843,17 +3732,6 @@ cids@^0.7.1, cids@~0.7.0, cids@~0.7.1: multicodec "^1.0.0" multihashes "~0.4.15" -cids@~0.8.0: - version "0.8.3" - resolved "https://registry.yarnpkg.com/cids/-/cids-0.8.3.tgz#aaf48ac8ed857c3d37dad94d8db1d8c9407b92db" - integrity sha512-yoXTbV3llpm+EBGWKeL9xKtksPE/s6DPoDSY4fn8I8TEW1zehWXPSB0pwAXVDlLaOlrw+sNynj995uD9abmPhA== - dependencies: - buffer "^5.6.0" - class-is "^1.1.0" - multibase "^1.0.0" - multicodec "^1.0.1" - multihashes "^1.0.1" - cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" @@ -4050,7 +3928,7 @@ commander@3.0.2: resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== -commander@^2.15.0, commander@^2.20.3: +commander@^2.20.3: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== @@ -4084,13 +3962,6 @@ concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: readable-stream "^2.2.2" typedarray "^0.0.6" -"concat-stream@github:hugomrdias/concat-stream#feat/smaller": - version "2.0.0" - resolved "https://codeload.github.com/hugomrdias/concat-stream/tar.gz/057bc7b5d6d8df26c8cf00a3f151b6721a0a8034" - dependencies: - inherits "^2.0.3" - readable-stream "^3.0.2" - configstore@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/configstore/-/configstore-4.0.0.tgz#5933311e95d3687efb592c528b922d9262d227e7" @@ -4529,11 +4400,6 @@ delegates@^1.0.0: resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= -delimit-stream@0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/delimit-stream/-/delimit-stream-0.1.0.tgz#9b8319477c0e5f8aeb3ce357ae305fc25ea1cd2b" - integrity sha512-a02fiQ7poS5CnjiJBAsjGLPp5EwVoGHNeu9sziBd9huppRfsAFIpv5zNLv0V1gbop53ilngAf5Kf331AwcoRBQ== - depd@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" @@ -4584,11 +4450,6 @@ detect-newline@2.X: resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2" integrity sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I= -detect-node@^2.0.4: - version "2.1.0" - resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.1.0.tgz#c9c70775a49c3d03bc2c06d9a73be550f978f8b1" - integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g== - dicer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/dicer/-/dicer-0.3.0.tgz#eacd98b3bfbf92e8ab5c2fdb71aaac44bb06b872" @@ -4626,7 +4487,7 @@ dns-over-http-resolver@^1.2.3: native-fetch "^3.0.0" receptacle "^1.3.2" -docker-compose@0.23.19, docker-compose@^0.23.2: +docker-compose@0.23.19: version "0.23.19" resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.19.tgz#9947726e2fe67bdfa9e8efe1ff15aa0de2e10eb8" integrity sha512-v5vNLIdUqwj4my80wxFDkNH+4S85zsRuH29SO7dCWVWPCMt/ohZBsGN6g6KXWifT0pzQ7uOxqEKCYCDPJ8Vz4g== @@ -4643,7 +4504,7 @@ docker-modem@^1.0.8: readable-stream "~1.0.26-4" split-ca "^1.0.0" -dockerode@2.5.8, dockerode@^2.5.8: +dockerode@2.5.8: version "2.5.8" resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-2.5.8.tgz#1b661e36e1e4f860e25f56e0deabe9f87f1d0acc" integrity sha512-+7iOUYBeDTScmOmQqpUYQaE7F4vvIt6+gIZNHWhqAQEI887tiPFB9OvXI/HzQYqfUNvukMK+9myLW63oTJPZpw== @@ -4757,15 +4618,6 @@ double-ended-queue@2.1.0-0: resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c" integrity sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw= -drbg.js@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/drbg.js/-/drbg.js-1.0.1.tgz#3e36b6c42b37043823cdbc332d58f31e2445480b" - integrity sha512-F4wZ06PvqxYLFEZKkFxTDcns9oFNk34hvmJSEwdzsxVQ8YI5YaxtACgQatkYgv2VI2CFkUd2Y+xosPQnHv809g== - dependencies: - browserify-aes "^1.0.6" - create-hash "^1.1.2" - create-hmac "^1.1.4" - duplexer3@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" @@ -4888,7 +4740,7 @@ encoding@^0.1.11, encoding@^0.1.13: dependencies: iconv-lite "^0.6.2" -end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: +end-of-stream@^1.0.0, end-of-stream@^1.1.0: version "1.4.4" resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== @@ -4936,16 +4788,6 @@ entities@~2.1.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" integrity sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w== -err-code@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/err-code/-/err-code-1.1.2.tgz#06e0116d3028f6aef4806849eb0ea6a748ae6960" - integrity sha512-CJAN+O0/yA1CKfRn9SXOGctSpEM7DCon/r/5r2eXFMY2zCCJBasFhcM5I+1kh3Ap11FsQCX+vGHceNPvpWKhoA== - -err-code@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" - integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA== - err-code@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/err-code/-/err-code-3.0.1.tgz#a444c7b992705f2b120ee320b09972eef331c920" @@ -5543,11 +5385,6 @@ expand-range@^1.8.1: dependencies: fill-range "^2.1.0" -explain-error@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/explain-error/-/explain-error-1.0.4.tgz#a793d3ac0cad4c6ab571e9968fbbab6cb2532929" - integrity sha512-/wSgNMxFusiYRy1rd19LT2SQlIXDppHpumpWo06wxjflD1OYxDLbl6rMVw+U3bxD5Nuhex4TKqv9Aem4D0lVzQ== - express@^4.0.0, express@^4.14.0, express@^4.17.1: version "4.18.2" resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" @@ -5874,11 +5711,6 @@ flat@^4.1.0: dependencies: is-buffer "~2.0.3" -flatmap@0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/flatmap/-/flatmap-0.0.3.tgz#1f18a4d938152d495965f9c958d923ab2dd669b4" - integrity sha512-OuR+o7kHVe+x9RtIujPay7Uw3bvDZBZFSBXClEphZuSDLmZTqMdclasf4vFSsogC8baDz0eaC2NdO/2dlXHBKQ== - follow-redirects@^1.12.1: version "1.14.8" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.8.tgz#016996fb9a11a100566398b1c6839337d7bfa8fc" @@ -5988,7 +5820,7 @@ fs-extra@5.0.0: jsonfile "^4.0.0" universalify "^0.1.0" -fs-extra@9.1.0, fs-extra@^9.0.0, fs-extra@^9.1.0: +fs-extra@9.1.0, fs-extra@^9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== @@ -6251,7 +6083,7 @@ glob@^5.0.3: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^7.1.2, glob@^7.1.3: +glob@^7.1.3: version "7.2.3" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== @@ -6341,42 +6173,6 @@ gluegun@5.1.2: which "2.0.2" yargs-parser "^21.0.0" -gluegun@^4.3.1: - version "4.7.1" - resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.7.1.tgz#89477f155b79c16e63e7386819b01943942a7993" - integrity sha512-5iLbLCU+jCf34zHrl+AKC39mDIpVKn/Z5B2uIS8TjHVaPBaDPnRD/VspiHy9dyF5mjr7Ogg1/gOt8yeWo7MEug== - dependencies: - apisauce "^2.1.5" - app-module-path "^2.2.0" - cli-table3 "~0.5.0" - colors "^1.3.3" - cosmiconfig "6.0.0" - cross-spawn "^7.0.0" - ejs "^2.6.1" - enquirer "2.3.4" - execa "^3.0.0" - fs-jetpack "^2.2.2" - lodash.camelcase "^4.3.0" - lodash.kebabcase "^4.1.1" - lodash.lowercase "^4.3.0" - lodash.lowerfirst "^4.3.1" - lodash.pad "^4.5.1" - lodash.padend "^4.6.1" - lodash.padstart "^4.6.1" - lodash.repeat "^4.1.0" - lodash.snakecase "^4.1.1" - lodash.startcase "^4.4.0" - lodash.trim "^4.5.1" - lodash.trimend "^4.5.1" - lodash.trimstart "^4.5.1" - lodash.uppercase "^4.3.0" - lodash.upperfirst "^4.3.1" - ora "^4.0.0" - pluralize "^8.0.0" - semver "^7.0.0" - which "^2.0.0" - yargs-parser "^16.1.0" - gluegun@^4.6.1: version "4.6.1" resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" @@ -6557,11 +6353,6 @@ graphql@15.5.0, graphql@^15.3.0: resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.5.0.tgz#39d19494dbe69d1ea719915b578bf920344a69d5" integrity sha512-OmaM7y0kaK31NKG31q4YbD2beNYa6jBBKtMFT6gLYJljHLJr42IqJ8KX08u3Li/0ifzTU5HjmoOOrwa5BRLeDA== -graphql@^15.5.0: - version "15.8.0" - resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.8.0.tgz#33410e96b012fa3bdb1091cc99a94769db212b38" - integrity sha512-5gghUc24tP9HRznNpV2+FIoq3xKkj5dTQqf4v0CpdPbFVwFkWoxOM+o+2OC9ZSvjEMTjfmG9QT+gcvggTwW1zw== - graphql@^16.6.0: version "16.6.0" resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.6.0.tgz#c2dcffa4649db149f6282af726c8c83f1c7c5fdb" @@ -6703,11 +6494,6 @@ header-case@^1.0.0: no-case "^2.2.0" upper-case "^1.1.3" -hi-base32@~0.5.0: - version "0.5.1" - resolved "https://registry.yarnpkg.com/hi-base32/-/hi-base32-0.5.1.tgz#1279f2ddae2673219ea5870c2121d2a33132857e" - integrity sha512-EmBBpvdYh/4XxsnUybsPag6VikPYnN30td+vQk+GI3qpahVEG9+gTkG0aXVxTjBqQ5T6ijbWIu77O+C5WFWsnA== - highlight.js@^10.4.0, highlight.js@^10.4.1: version "10.6.0" resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.6.0.tgz#0073aa71d566906965ba6e1b7be7b2682f5e18b6" @@ -6924,11 +6710,6 @@ immutable@4.2.1: resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.2.1.tgz#8a4025691018c560a40c67e43d698f816edc44d4" integrity sha512-7WYV7Q5BTs0nlQm7tl92rDYYoyELLKHoDMBKhrxEoiV4mrfVdRz8hzPiYOzH7yWjzoVEamxRuAqhxL2PLRwZYQ== -immutable@^3.8.2: - version "3.8.2" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" - integrity sha512-15gZoQ38eYjEjxkorfbcgBKBL6R7T459OuK+CpcWt7O3KF4uPCx2tD0uFETlUDIyo+1789crbMhTvQBSR5yBMg== - immutable@~3.7.6: version "3.7.6" resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" @@ -7017,34 +6798,16 @@ invert-kv@^1.0.0: resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" integrity sha1-EEqOSqym09jNFXqO+L+rLXo//bY= -ip-regex@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" - integrity sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw== - ip-regex@^4.0.0: version "4.3.0" resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== -ip@^1.1.5: - version "1.1.8" - resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.8.tgz#ae05948f6b075435ed3307acce04629da8cdbf48" - integrity sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg== - ipaddr.js@1.9.1: version "1.9.1" resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== -ipfs-block@~0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/ipfs-block/-/ipfs-block-0.8.1.tgz#05e1068832775e8f1c2da5b64106cc837fd2acb9" - integrity sha512-0FaCpmij+jZBoUYhjoB5ptjdl9QzvrdRIoBmUU5JiBnK2GA+4YM/ifklaB8ePRhA/rRzhd+KYBjvMFMAL4NrVQ== - dependencies: - cids "~0.7.0" - class-is "^1.1.0" - ipfs-core-types@^0.9.0: version "0.9.0" resolved "https://registry.yarnpkg.com/ipfs-core-types/-/ipfs-core-types-0.9.0.tgz#cb201ff7a9470651ba14c4e7fae56661a55bf37e" @@ -7105,63 +6868,6 @@ ipfs-http-client@55.0.0: stream-to-it "^0.2.2" uint8arrays "^3.0.0" -ipfs-http-client@^34.0.0: - version "34.0.0" - resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-34.0.0.tgz#8804d06a11c22306332a8ffa0949b6f672a0c9c8" - integrity sha512-4RCkk8ix4Dqn6sxqFVwuXWCZ1eLFPsVaj6Ijvu1fs9VYgxgVudsW9PWwarlr4mw1xUCmPWYyXnEbGgzBrfMy0Q== - dependencies: - abort-controller "^3.0.0" - async "^2.6.1" - bignumber.js "^9.0.0" - bl "^3.0.0" - bs58 "^4.0.1" - buffer "^5.4.2" - cids "~0.7.1" - concat-stream "github:hugomrdias/concat-stream#feat/smaller" - debug "^4.1.0" - detect-node "^2.0.4" - end-of-stream "^1.4.1" - err-code "^2.0.0" - explain-error "^1.0.4" - flatmap "0.0.3" - glob "^7.1.3" - ipfs-block "~0.8.1" - ipfs-utils "~0.0.3" - ipld-dag-cbor "~0.15.0" - ipld-dag-pb "~0.17.3" - ipld-raw "^4.0.0" - is-ipfs "~0.6.1" - is-pull-stream "0.0.0" - is-stream "^2.0.0" - iso-stream-http "~0.1.2" - iso-url "~0.4.6" - iterable-ndjson "^1.1.0" - just-kebab-case "^1.1.0" - just-map-keys "^1.1.0" - kind-of "^6.0.2" - ky "^0.11.2" - ky-universal "^0.2.2" - lru-cache "^5.1.1" - multiaddr "^6.0.6" - multibase "~0.6.0" - multicodec "~0.5.1" - multihashes "~0.4.14" - ndjson "github:hugomrdias/ndjson#feat/readable-stream3" - once "^1.4.0" - peer-id "~0.12.3" - peer-info "~0.15.1" - promise-nodeify "^3.0.1" - promisify-es6 "^1.0.3" - pull-defer "~0.2.3" - pull-stream "^3.6.9" - pull-to-stream "~0.1.1" - pump "^3.0.0" - qs "^6.5.2" - readable-stream "^3.1.1" - stream-to-pull-stream "^1.7.2" - tar-stream "^2.0.1" - through2 "^3.0.1" - ipfs-unixfs@^6.0.3: version "6.0.9" resolved "https://registry.yarnpkg.com/ipfs-unixfs/-/ipfs-unixfs-6.0.9.tgz#f6613b8e081d83faa43ed96e016a694c615a9374" @@ -7192,52 +6898,6 @@ ipfs-utils@^9.0.2: react-native-fetch-api "^3.0.0" stream-to-it "^0.2.2" -ipfs-utils@~0.0.3: - version "0.0.4" - resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-0.0.4.tgz#946114cfeb6afb4454b4ccb10d2327cd323b0cce" - integrity sha512-7cZf6aGj2FG3XJWhCNwn4mS93Q0GEWjtBZvEHqzgI43U2qzNDCyzfS1pei1Y5F+tw/zDJ5U4XG0G9reJxR53Ig== - dependencies: - buffer "^5.2.1" - is-buffer "^2.0.3" - is-electron "^2.2.0" - is-pull-stream "0.0.0" - is-stream "^2.0.0" - kind-of "^6.0.2" - readable-stream "^3.4.0" - -ipld-dag-cbor@~0.15.0: - version "0.15.3" - resolved "https://registry.yarnpkg.com/ipld-dag-cbor/-/ipld-dag-cbor-0.15.3.tgz#283afdb81d5b07db8e4fff7a10ef5e517e87f299" - integrity sha512-m23nG7ZyoVFnkK55/bLAErc7EfiMgaEQlqHWDTGzPI+O5r6bPfp+qbL5zTVSIT8tpbHmu174dwerVtLoVgeVyA== - dependencies: - borc "^2.1.2" - buffer "^5.5.0" - cids "~0.8.0" - is-circular "^1.0.2" - multicodec "^1.0.0" - multihashing-async "~0.8.0" - -ipld-dag-pb@~0.17.3: - version "0.17.4" - resolved "https://registry.yarnpkg.com/ipld-dag-pb/-/ipld-dag-pb-0.17.4.tgz#080841cfdd014d996f8da7f3a522ec8b1f6b6494" - integrity sha512-YwCxETEMuXVspOKOhjIOHJvKvB/OZfCDkpSFiYBQN2/JQjM9y/RFCYzIQGm0wg7dCFLrhvfjAZLTSaKs65jzWA== - dependencies: - cids "~0.7.0" - class-is "^1.1.0" - multicodec "~0.5.1" - multihashing-async "~0.7.0" - protons "^1.0.1" - stable "~0.1.8" - -ipld-raw@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/ipld-raw/-/ipld-raw-4.0.1.tgz#49a6f58cdfece5a4d581925b19ee19255be2a29d" - integrity sha512-WjIdtZ06jJEar8zh+BHB84tE6ZdbS/XNa7+XCArOYfmeJ/c01T9VQpeMwdJQYn5c3s5UvvCu7y4VIi3vk2g1bA== - dependencies: - cids "~0.7.0" - multicodec "^1.0.0" - multihashing-async "~0.8.0" - is-arguments@^1.0.4, is-arguments@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.0.tgz#62353031dfbee07ceb34656a6bde59efecae8dd9" @@ -7277,7 +6937,7 @@ is-buffer@^1.1.5: resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== -is-buffer@^2.0.3, is-buffer@~2.0.3: +is-buffer@~2.0.3: version "2.0.5" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== @@ -7292,11 +6952,6 @@ is-callable@^1.2.3: resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.3.tgz#8b1e0500b73a1d76c70487636f368e519de8db8e" integrity sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ== -is-circular@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-circular/-/is-circular-1.0.2.tgz#2e0ab4e9835f4c6b0ea2b9855a84acd501b8366c" - integrity sha512-YttjnrswnUYRVJvxCvu8z+PGMUSzC2JttP0OEXezlAEdp3EXzhf7IZ3j0gRAybJBQupedIZFhY61Tga6E0qASA== - is-core-module@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.2.0.tgz#97037ef3d52224d85163f5597b2b63d9afed981a" @@ -7423,13 +7078,6 @@ is-interactive@^1.0.0: resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== -is-ip@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-2.0.0.tgz#68eea07e8a0a0a94c2d080dd674c731ab2a461ab" - integrity sha512-9MTn0dteHETtyUx8pxqMwg5hMBi3pvlyglJ+b79KOCca0po23337LbVV2Hl4xmMvfw++ljnO0/+5G6G+0Szh6g== - dependencies: - ip-regex "^2.0.0" - is-ip@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-3.1.0.tgz#2ae5ddfafaf05cb8008a62093cf29734f657c5d8" @@ -7437,18 +7085,6 @@ is-ip@^3.1.0: dependencies: ip-regex "^4.0.0" -is-ipfs@~0.6.1: - version "0.6.3" - resolved "https://registry.yarnpkg.com/is-ipfs/-/is-ipfs-0.6.3.tgz#82a5350e0a42d01441c40b369f8791e91404c497" - integrity sha512-HyRot1dvLcxImtDqPxAaY1miO6WsiP/z7Yxpg2qpaLWv5UdhAPtLvHJ4kMLM0w8GSl8AFsVF23PHe1LzuWrUlQ== - dependencies: - bs58 "^4.0.1" - cids "~0.7.0" - mafmt "^7.0.0" - multiaddr "^7.2.1" - multibase "~0.6.0" - multihashes "~0.4.13" - is-lower-case@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/is-lower-case/-/is-lower-case-1.1.3.tgz#7e147be4768dc466db3bfb21cc60b31e6ad69393" @@ -7525,16 +7161,6 @@ is-promise@4.0.0: resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-4.0.0.tgz#42ff9f84206c1991d26debf520dd5c01042dd2f3" integrity sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ== -is-promise@~1, is-promise@~1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-1.0.1.tgz#31573761c057e33c2e91aab9e96da08cefbe76e5" - integrity sha512-mjWH5XxnhMA8cFnDchr6qRP9S/kLntKuEfIYku+PaN1CnS8v+OG9O/BKpRCVRJvpIkgAZm0Pf5Is3iSSOILlcg== - -is-pull-stream@0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/is-pull-stream/-/is-pull-stream-0.0.0.tgz#a3bc3d1c6d3055151c46bde6f399efed21440ca9" - integrity sha512-NWLwqCc95I6m8FZDYLAmVJc9Xgk8O+8pPOoDKFTC293FH4S7FBcbLCw3WWPCdiT8uUSdzPy47VM08WPDMJJrag== - is-regex@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.2.tgz#81c8ebde4db142f2cf1c53fc86d6a45788266251" @@ -7657,33 +7283,11 @@ isexe@^2.0.0: resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== -iso-random-stream@^1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/iso-random-stream/-/iso-random-stream-1.1.2.tgz#c703da2c518db573277c5678cc43c5298283d64c" - integrity sha512-7y0tsBBgQs544iTYjyrMp5xvgrbYR8b+plQq1Bryp+03p0LssrxC9C1M0oHv4QESDt7d95c74XvMk/yawKqX+A== - dependencies: - buffer "^6.0.3" - readable-stream "^3.4.0" - -iso-stream-http@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/iso-stream-http/-/iso-stream-http-0.1.2.tgz#b3dfea4c9f23ff26d078d40c539cfc0dfebacd37" - integrity sha512-oHEDNOysIMTNypbg2f1SlydqRBvjl4ZbSE9+0awVxnkx3K2stGTFwB/kpVqnB6UEfF8QD36kAjDwZvqyXBLMnQ== - dependencies: - builtin-status-codes "^3.0.0" - inherits "^2.0.1" - readable-stream "^3.1.1" - iso-url@^1.1.5: version "1.2.1" resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-1.2.1.tgz#db96a49d8d9a64a1c889fc07cc525d093afb1811" integrity sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng== -iso-url@~0.4.6, iso-url@~0.4.7: - version "0.4.7" - resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-0.4.7.tgz#de7e48120dae46921079fe78f325ac9e9217a385" - integrity sha512-27fFRDnPAMnHGLq36bWTpKET+eiXct3ENlCcdcMdk+mjXrb2kw3mhBUg1B7ewAC0kVzlOPhADzQgz1SE6Tglog== - isobject@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" @@ -7759,13 +7363,6 @@ it-to-stream@^1.0.0: p-fifo "^1.0.0" readable-stream "^3.6.0" -iterable-ndjson@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/iterable-ndjson/-/iterable-ndjson-1.1.0.tgz#36f7e8a5bb04fd087d384f29e44fc4280fc014fc" - integrity sha512-OOp1Lb0o3k5MkXHx1YaIY5Z0ELosZfTnBaas9f8opJVcZGBIONA2zY/6CYE+LKkqrSDooIneZbrBGgOZnHPkrg== - dependencies: - string_decoder "^1.2.0" - iterall@^1.1.3, iterall@^1.2.1, iterall@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/iterall/-/iterall-1.3.0.tgz#afcb08492e2915cbd8a0884eb93a8c94d0d72fea" @@ -7812,31 +7409,12 @@ jayson@4.0.0: uuid "^8.3.2" ws "^7.4.5" -jayson@^3.0.2: - version "3.7.0" - resolved "https://registry.yarnpkg.com/jayson/-/jayson-3.7.0.tgz#b735b12d06d348639ae8230d7a1e2916cb078f25" - integrity sha512-tfy39KJMrrXJ+mFcMpxwBvFDetS8LAID93+rycFglIQM4kl3uNR3W4lBLE/FFhsoUCEox5Dt2adVpDm/XtebbQ== - dependencies: - "@types/connect" "^3.4.33" - "@types/node" "^12.12.54" - "@types/ws" "^7.4.4" - JSONStream "^1.3.5" - commander "^2.20.3" - delay "^5.0.0" - es6-promisify "^5.0.0" - eyes "^0.1.8" - isomorphic-ws "^4.0.1" - json-stringify-safe "^5.0.1" - lodash "^4.17.20" - uuid "^8.3.2" - ws "^7.4.5" - js-sha3@0.5.7, js-sha3@^0.5.7: version "0.5.7" resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" integrity sha1-DU/9gALVMzqrr0oj7tL2N0yfKOc= -js-sha3@0.8.0, js-sha3@^0.8.0, js-sha3@~0.8.0: +js-sha3@0.8.0, js-sha3@^0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== @@ -7859,7 +7437,7 @@ js-yaml@3.14.0: argparse "^1.0.7" esprima "^4.0.0" -js-yaml@3.14.1, js-yaml@^3.13.1, js-yaml@^3.14.1: +js-yaml@3.14.1, js-yaml@^3.14.1: version "3.14.1" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== @@ -7972,13 +7550,6 @@ json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1: resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== -json-text-sequence@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/json-text-sequence/-/json-text-sequence-0.1.1.tgz#a72f217dc4afc4629fff5feb304dc1bd51a2f3d2" - integrity sha512-L3mEegEWHRekSHjc7+sc8eJhba9Clq1PZ8kMkzf8OxElhXc8O4TS5MwcVlj9aEbm5dr81N90WHC5nAz3UO971w== - dependencies: - delimit-stream "0.1.0" - json5@^0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" @@ -8042,16 +7613,6 @@ jsprim@^1.2.2: json-schema "0.4.0" verror "1.10.0" -just-kebab-case@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/just-kebab-case/-/just-kebab-case-1.1.0.tgz#ebe854fde84b0afa4e597fcd870b12eb3c026755" - integrity sha512-QkuwuBMQ9BQHMUEkAtIA4INLrkmnnveqlFB1oFi09gbU0wBdZo6tTnyxNWMR84zHxBuwK7GLAwqN8nrvVxOLTA== - -just-map-keys@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/just-map-keys/-/just-map-keys-1.2.1.tgz#ef6e16133b7d34329962dfae9101d581abb1b143" - integrity sha512-Dmyz1Cy2SWM+PpqDPB1kdDglyexdzMthnAsvOIE9w4OPj8NDRuY1mh20x/JfG5w6fCGw9F0WmcofJhYZ4MiuyA== - keccak@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.2.tgz#4c2c6e8c54e04f2670ee49fa734eb9da152206e0" @@ -8061,11 +7622,6 @@ keccak@^3.0.0: node-gyp-build "^4.2.0" readable-stream "^3.6.0" -keypair@^1.0.1: - version "1.0.4" - resolved "https://registry.yarnpkg.com/keypair/-/keypair-1.0.4.tgz#a749a45f388593f3950f18b3757d32a93bd8ce83" - integrity sha512-zwhgOhhniaL7oxMgUMKKw5219PWWABMO+dgMnzJOQ2/5L3XJtTJGhW2PEXlxXj9zaccdReZJZ83+4NPhVfNVDg== - keyv@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" @@ -8080,7 +7636,7 @@ kind-of@^3.0.2: dependencies: is-buffer "^1.1.5" -kind-of@^6.0.0, kind-of@^6.0.2: +kind-of@^6.0.0: version "6.0.3" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== @@ -8092,19 +7648,6 @@ klaw@^1.0.0: optionalDependencies: graceful-fs "^4.1.9" -ky-universal@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/ky-universal/-/ky-universal-0.2.2.tgz#7a36e1a75641a98f878157463513965f799f5bfe" - integrity sha512-fb32o/fKy/ux2ALWa9HU2hvGtfOq7/vn2nH0FpVE+jwNzyTeORlAbj3Fiw+WLMbUlmVqZIWupnLZ2USHvqwZHw== - dependencies: - abort-controller "^3.0.0" - node-fetch "^2.3.0" - -ky@^0.11.2: - version "0.11.2" - resolved "https://registry.yarnpkg.com/ky/-/ky-0.11.2.tgz#4ffe6621d9d9ab61bf0f5500542e3a96d1ba0815" - integrity sha512-5Aou5BWue5/mkPqIRqzSWW+0Hkl403pr/2AIrCKYw7cVl/Xoe8Xe4KLBO0PRjbz7GnRe1/8wW1KhqQNFFE7/GQ== - lazy-debug-legacy@0.0.X: version "0.0.1" resolved "https://registry.yarnpkg.com/lazy-debug-legacy/-/lazy-debug-legacy-0.0.1.tgz#537716c0776e4cf79e3ed1b621f7658c2911b1b1" @@ -8298,40 +7841,6 @@ levn@~0.3.0: prelude-ls "~1.1.2" type-check "~0.3.2" -libp2p-crypto-secp256k1@~0.3.0: - version "0.3.1" - resolved "https://registry.yarnpkg.com/libp2p-crypto-secp256k1/-/libp2p-crypto-secp256k1-0.3.1.tgz#4cbeb857f5cfe5fefb1253e6b2994420c0ca166e" - integrity sha512-evrfK/CeUSd/lcELUdDruyPBvxDmLairth75S32OLl3H+++2m2fV24JEtxzdFS9JH3xEFw0h6JFO8DBa1bP9dA== - dependencies: - async "^2.6.2" - bs58 "^4.0.1" - multihashing-async "~0.6.0" - nodeify "^1.0.1" - safe-buffer "^5.1.2" - secp256k1 "^3.6.2" - -libp2p-crypto@~0.16.1: - version "0.16.4" - resolved "https://registry.yarnpkg.com/libp2p-crypto/-/libp2p-crypto-0.16.4.tgz#fb1a4ba39d56789303947784b5b0d6cefce12fdc" - integrity sha512-II8HxKc9jbmQp34pprlluNxsBCWJDjHRPYJzuRy7ragztNip9Zb7uJ4lCje6gGzz4DNAcHkAUn+GqCIK1592iA== - dependencies: - asmcrypto.js "^2.3.2" - asn1.js "^5.0.1" - async "^2.6.1" - bn.js "^4.11.8" - browserify-aes "^1.2.0" - bs58 "^4.0.1" - iso-random-stream "^1.1.0" - keypair "^1.0.1" - libp2p-crypto-secp256k1 "~0.3.0" - multihashing-async "~0.5.1" - node-forge "^0.10.0" - pem-jwk "^2.0.0" - protons "^1.0.1" - rsa-pem-to-jwk "^1.1.3" - tweetnacl "^1.0.0" - ursa-optional "~0.10.0" - lines-and-columns@^1.1.6: version "1.2.4" resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" @@ -8572,7 +8081,7 @@ lodash.zipwith@^4.2.0: resolved "https://registry.yarnpkg.com/lodash.zipwith/-/lodash.zipwith-4.2.0.tgz#afacf03fd2f384af29e263c3c6bda3b80e3f51fd" integrity sha1-r6zwP9LzhK8p4mPDxr2juA4/Uf0= -lodash@4.17.21, lodash@^4.1.0, lodash@^4.15.0, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.4, lodash@^4.2.1: +lodash@4.17.21, lodash@^4.1.0, lodash@^4.15.0, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.4, lodash@^4.2.1: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -8613,11 +8122,6 @@ long@^5.2.0: resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== -looper@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/looper/-/looper-3.0.0.tgz#2efa54c3b1cbaba9b94aee2e5914b0be57fbb749" - integrity sha512-LJ9wplN/uSn72oJRsXTx+snxPet5c8XiZmOKCm906NVYu+ag6SB6vUcnJcWxgnl2NfbIyeobAn7Bwv6xRj2XJg== - loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -8654,13 +8158,6 @@ lowercase-keys@^2.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== -lru-cache@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" - integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== - dependencies: - yallist "^3.0.2" - lru-cache@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" @@ -8678,20 +8175,6 @@ ltgt@2.2.1, ltgt@^2.1.2, ltgt@~2.2.0: resolved "https://registry.yarnpkg.com/ltgt/-/ltgt-2.2.1.tgz#f35ca91c493f7b73da0e07495304f17b31f87ee5" integrity sha1-81ypHEk/e3PaDgdJUwTxezH4fuU= -mafmt@^6.0.2: - version "6.0.10" - resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-6.0.10.tgz#3ad251c78f14f8164e66f70fd3265662da41113a" - integrity sha512-FjHDnew6dW9lUu3eYwP0FvvJl9uvNbqfoJM+c1WJcSyutNEIlyu6v3f/rlPnD1cnmue38IjuHlhBdIh3btAiyw== - dependencies: - multiaddr "^6.1.0" - -mafmt@^7.0.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-7.1.0.tgz#4126f6d0eded070ace7dbbb6fb04977412d380b5" - integrity sha512-vpeo9S+hepT3k2h5iFxzEHvvR0GPBx9uKaErmnRzYNcaKb03DgOArjEMlgG4a9LcuZZ89a3I8xbeto487n26eA== - dependencies: - multiaddr "^7.3.0" - make-dir@^1.0.0: version "1.3.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" @@ -9106,30 +8589,6 @@ multiaddr@^10.0.0: uint8arrays "^3.0.0" varint "^6.0.0" -multiaddr@^6.0.3, multiaddr@^6.0.6, multiaddr@^6.1.0: - version "6.1.1" - resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-6.1.1.tgz#9aae57b3e399089b9896d9455afa8f6b117dff06" - integrity sha512-Q1Ika0F9MNhMtCs62Ue+GWIJtRFEhZ3Xz8wH7/MZDVZTWhil1/H2bEGN02kUees3hkI3q1oHSjmXYDM0gxaFjQ== - dependencies: - bs58 "^4.0.1" - class-is "^1.1.0" - hi-base32 "~0.5.0" - ip "^1.1.5" - is-ip "^2.0.0" - varint "^5.0.0" - -multiaddr@^7.2.1, multiaddr@^7.3.0: - version "7.5.0" - resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-7.5.0.tgz#976c88e256e512263445ab03b3b68c003d5f485e" - integrity sha512-GvhHsIGDULh06jyb6ev+VfREH9evJCFIRnh3jUt9iEZ6XDbyoisZRFEI9bMvK/AiR6y66y6P+eoBw9mBYMhMvw== - dependencies: - buffer "^5.5.0" - cids "~0.8.0" - class-is "^1.1.0" - is-ip "^3.1.0" - multibase "^0.7.0" - varint "^5.0.0" - multibase@^0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/multibase/-/multibase-0.7.0.tgz#1adfc1c50abe05eefeb5091ac0c2728d6b84581b" @@ -9138,14 +8597,6 @@ multibase@^0.7.0: base-x "^3.0.8" buffer "^5.5.0" -multibase@^1.0.0, multibase@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/multibase/-/multibase-1.0.1.tgz#4adbe1de0be8a1ab0274328b653c3f1903476724" - integrity sha512-KcCxpBVY8fdVKu4dJMAahq4F/2Z/9xqEjIiR7PiMe7LRGeorFn2NLmicN6nLBCqQvft6MG2Lc9X5P0IdyvnxEw== - dependencies: - base-x "^3.0.8" - buffer "^5.5.0" - multibase@~0.6.0: version "0.6.1" resolved "https://registry.yarnpkg.com/multibase/-/multibase-0.6.1.tgz#b76df6298536cc17b9f6a6db53ec88f85f8cc12b" @@ -9154,14 +8605,14 @@ multibase@~0.6.0: base-x "^3.0.8" buffer "^5.5.0" -multicodec@^0.5.5, multicodec@~0.5.1: +multicodec@^0.5.5: version "0.5.7" resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-0.5.7.tgz#1fb3f9dd866a10a55d226e194abba2dcc1ee9ffd" integrity sha512-PscoRxm3f+88fAtELwUnZxGDkduE2HD9Q6GHUOywQLjOGT/HAdhjLDYNZ1e7VR0s0TP0EwZ16LNUTFpoBGivOA== dependencies: varint "^5.0.0" -multicodec@^1.0.0, multicodec@^1.0.1: +multicodec@^1.0.0: version "1.0.4" resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-1.0.4.tgz#46ac064657c40380c28367c90304d8ed175a714f" integrity sha512-NDd7FeS3QamVtbgfvu5h7fd1IlbaC4EQ0/pgU4zqE2vdHCmBGsUa0TiM8/TdSeG6BMPC92OOCf8F1ocE/Wkrrg== @@ -9174,7 +8625,7 @@ multiformats@^9.4.13, multiformats@^9.4.2, multiformats@^9.4.5, multiformats@^9. resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-9.9.0.tgz#c68354e7d21037a8f1f8833c8ccd68618e8f1d37" integrity sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg== -multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4.15: +multihashes@^0.4.15, multihashes@~0.4.15: version "0.4.21" resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-0.4.21.tgz#dc02d525579f334a7909ade8a122dabb58ccfcb5" integrity sha512-uVSvmeCWf36pU2nB4/1kzYZjsXD9vofZKpgudqkceYY5g2aZZXJ5r9lxuzoRLl1OAp28XljXsEJ/X/85ZsKmKw== @@ -9183,71 +8634,6 @@ multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4. multibase "^0.7.0" varint "^5.0.0" -multihashes@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-1.0.1.tgz#a89415d68283cf6287c6e219e304e75ce7fb73fe" - integrity sha512-S27Tepg4i8atNiFaU5ZOm3+gl3KQlUanLs/jWcBxQHFttgq+5x1OgbQmf2d8axJ/48zYGBd/wT9d723USMFduw== - dependencies: - buffer "^5.6.0" - multibase "^1.0.1" - varint "^5.0.0" - -multihashing-async@~0.5.1: - version "0.5.2" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.5.2.tgz#4af40e0dde2f1dbb12a7c6b265181437ac26b9de" - integrity sha512-mmyG6M/FKxrpBh9xQDUvuJ7BbqT93ZeEeH5X6LeMYKoYshYLr9BDdCsvDtZvn+Egf+/Xi+aOznrWL4vp3s+p0Q== - dependencies: - blakejs "^1.1.0" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js "^3.0.1" - nodeify "^1.0.1" - -multihashing-async@~0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.6.0.tgz#c1fc6696a624b9bf39b160b0c4c4e7ba3f394453" - integrity sha512-Qv8pgg99Lewc191A5nlXy0bSd2amfqlafNJZmarU6Sj7MZVjpR94SCxQjf4DwPtgWZkiLqsjUQBXA2RSq+hYyA== - dependencies: - blakejs "^1.1.0" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js "^3.0.1" - nodeify "^1.0.1" - -multihashing-async@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.7.0.tgz#3234fb98295be84386b85bfd20377d3e5be20d6b" - integrity sha512-SCbfl3f+DzJh+/5piukga9ofIOxwfT05t8R4jfzZIJ88YE9zU9+l3K2X+XB19MYyxqvyK9UJRNWbmQpZqQlbRA== - dependencies: - blakejs "^1.1.0" - buffer "^5.2.1" - err-code "^1.1.2" - js-sha3 "~0.8.0" - multihashes "~0.4.13" - murmurhash3js-revisited "^3.0.0" - -multihashing-async@~0.8.0: - version "0.8.2" - resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.8.2.tgz#3d5da05df27d83be923f6d04143a0954ff87f27f" - integrity sha512-2lKa1autuCy8x7KIEj9aVNbAb3aIMRFYIwN7mq/zD4pxgNIVgGlm+f6GKY4880EOF2Y3GktHYssRy7TAJQ2DyQ== - dependencies: - blakejs "^1.1.0" - buffer "^5.4.3" - err-code "^2.0.0" - js-sha3 "^0.8.0" - multihashes "^1.0.1" - murmurhash3js-revisited "^3.0.0" - -murmurhash3js-revisited@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/murmurhash3js-revisited/-/murmurhash3js-revisited-3.0.0.tgz#6bd36e25de8f73394222adc6e41fa3fac08a5869" - integrity sha512-/sF3ee6zvScXMb1XFJ8gDsSnY+X8PbOyjIuBhtgis10W2Jx4ZjIhikUCIF9c4gpJxVnQIsPAFrSwTCuAjicP6g== - -murmurhash3js@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/murmurhash3js/-/murmurhash3js-3.0.1.tgz#3e983e5b47c2a06f43a713174e7e435ca044b998" - integrity sha512-KL8QYUaxq7kUbcl0Yto51rMcYt7E/4N4BG3/c96Iqw1PQrTRspu8Cpx4TZ4Nunib1d4bEkIH3gjCYlP2RLBdow== - mute-stream@0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" @@ -9258,11 +8644,6 @@ nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== -nan@^2.14.0, nan@^2.14.2: - version "2.17.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.17.0.tgz#c0150a2368a182f033e9aa5195ec76ea41a199cb" - integrity sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ== - nano-json-stream-parser@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/nano-json-stream-parser/-/nano-json-stream-parser-0.1.2.tgz#0cc8f6d0e2b622b479c40d499c46d64b755c6f5f" @@ -9303,15 +8684,6 @@ natural-orderby@^2.0.3: resolved "https://registry.yarnpkg.com/natural-orderby/-/natural-orderby-2.0.3.tgz#8623bc518ba162f8ff1cdb8941d74deb0fdcc016" integrity sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q== -"ndjson@github:hugomrdias/ndjson#feat/readable-stream3": - version "1.5.0" - resolved "https://codeload.github.com/hugomrdias/ndjson/tar.gz/4db16da6b42e5b39bf300c3a7cde62abb3fa3a11" - dependencies: - json-stringify-safe "^5.0.1" - minimist "^1.2.0" - split2 "^3.1.0" - through2 "^3.0.0" - needle@^2.2.1: version "2.6.0" resolved "https://registry.yarnpkg.com/needle/-/needle-2.6.0.tgz#24dbb55f2509e2324b4a99d61f413982013ccdbe" @@ -9384,13 +8756,6 @@ node-fetch@2.6.1: resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== -node-fetch@^2.3.0, node-fetch@^2.6.8: - version "2.6.11" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25" - integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w== - dependencies: - whatwg-url "^5.0.0" - node-fetch@^2.6.1: version "2.6.6" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.6.tgz#1751a7c01834e8e1697758732e9efb6eeadfaf89" @@ -9398,10 +8763,12 @@ node-fetch@^2.6.1: dependencies: whatwg-url "^5.0.0" -node-forge@^0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" - integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA== +node-fetch@^2.6.8: + version "2.6.11" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25" + integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w== + dependencies: + whatwg-url "^5.0.0" node-gyp-build@^4.2.0: version "4.5.0" @@ -9451,14 +8818,6 @@ node-releases@^1.1.70: resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== -nodeify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/nodeify/-/nodeify-1.0.1.tgz#64ab69a7bdbaf03ce107b4f0335c87c0b9e91b1d" - integrity sha512-n7C2NyEze8GCo/z73KdbjRsBiLbv6eBn1FxwYKQ23IqGo7pQY3mhQan61Sv7eEDJCiyUjTVrVkXTzJCo1dW7Aw== - dependencies: - is-promise "~1.0.0" - promise "~1.3.0" - nofilter@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/nofilter/-/nofilter-1.0.4.tgz#78d6f4b6a613e7ced8b015cec534625f7667006e" @@ -9589,11 +8948,6 @@ object-assign@4.1.0: resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0" integrity sha1-ejs9DpgGPUP0wD8uiubNUahog6A= -object-assign@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa" - integrity sha512-CdsOUYIh5wIiozhJ3rLQgmUTgcyzFwZZrqhkKhODMoGtPKM+wt0h0CNIoauJWMsS9822EdzPsF/6mb4nLvPN5g== - object-assign@^4, object-assign@^4.0.0, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" @@ -9721,13 +9075,6 @@ optimism@^0.14.0: "@wry/context" "^0.5.2" "@wry/trie" "^0.2.1" -optimist@~0.3.5: - version "0.3.7" - resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9" - integrity sha512-TCx0dXQzVtSCg2OgY/bO9hjM9cV4XYx09TVK+s3+FhkjT6LovsLe+pPMzpWf+6yXK/hUizs2gUoTw3jHM0VaTQ== - dependencies: - wordwrap "~0.0.2" - optionator@^0.8.1: version "0.8.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" @@ -10146,33 +9493,6 @@ pbkdf2@^3.0.3: safe-buffer "^5.0.1" sha.js "^2.4.8" -peer-id@~0.12.2, peer-id@~0.12.3: - version "0.12.5" - resolved "https://registry.yarnpkg.com/peer-id/-/peer-id-0.12.5.tgz#b22a1edc5b4aaaa2bb830b265ba69429823e5179" - integrity sha512-3xVWrtIvNm9/OPzaQBgXDrfWNx63AftgFQkvqO6YSZy7sP3Fuadwwbn54F/VO9AnpyW/26i0WRQz9FScivXrmw== - dependencies: - async "^2.6.3" - class-is "^1.1.0" - libp2p-crypto "~0.16.1" - multihashes "~0.4.15" - -peer-info@~0.15.1: - version "0.15.1" - resolved "https://registry.yarnpkg.com/peer-info/-/peer-info-0.15.1.tgz#21254a7c516d0dd046b150120b9aaf1b9ad02146" - integrity sha512-Y91Q2tZRC0CpSTPd1UebhGqniOrOAk/aj60uYUcWJXCoLTAnGu+4LJGoiay8ayudS6ice7l3SKhgL/cS62QacA== - dependencies: - mafmt "^6.0.2" - multiaddr "^6.0.3" - peer-id "~0.12.2" - unique-by "^1.0.0" - -pem-jwk@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/pem-jwk/-/pem-jwk-2.0.0.tgz#1c5bb264612fc391340907f5c1de60c06d22f085" - integrity sha512-rFxu7rVoHgQ5H9YsP50dDWf0rHjreVA2z0yPiWr5WdH/UHb29hKtF7h6l8vNd1cbYR1t0QL+JKhW55a2ZV4KtA== - dependencies: - asn1.js "^5.0.1" - performance-now@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" @@ -10220,11 +9540,6 @@ pkg-conf@^1.1.2: object-assign "^4.0.1" symbol "^0.2.1" -pkginfo@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/pkginfo/-/pkginfo-0.4.1.tgz#b5418ef0439de5425fc4995042dced14fb2a84ff" - integrity sha512-8xCNE/aT/EXKenuMDZ+xTVwkT8gsoHN2z/Q29l80u0ppGEXVvsKRzNMbtKhg8LS8k1tJLAHHylf6p4VFmP6XUQ== - pluralize@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-8.0.0.tgz#1a6fa16a38d12a1901e0320fa017051c539ce3b1" @@ -10527,7 +9842,7 @@ preserve@^0.2.0: resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" integrity sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks= -prettier@1.19.1, prettier@^1.13.5: +prettier@1.19.1: version "1.19.1" resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.19.1.tgz#f7d7f5ff8a9cd872a7be4ca142095956a60797cb" integrity sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew== @@ -10552,11 +9867,6 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= -promise-nodeify@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/promise-nodeify/-/promise-nodeify-3.0.1.tgz#f0f5d9720ee9ec71dd2bfa92667be504c10229c2" - integrity sha512-ghsSuzZXJX8iO7WVec2z7GI+Xk/EyiD+JZK7AZKhUqYfpLa/Zs4ylUD+CwwnKlG6G3HnkUPMAi6PO7zeqGKssg== - promise-to-callback@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/promise-to-callback/-/promise-to-callback-1.0.0.tgz#5d2a749010bfb67d963598fcd3960746a68feef7" @@ -10590,18 +9900,6 @@ promise@^8.0.0: dependencies: asap "~2.0.6" -promise@~1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/promise/-/promise-1.3.0.tgz#e5cc9a4c8278e4664ffedc01c7da84842b040175" - integrity sha512-R9WrbTF3EPkVtWjp7B7umQGVndpsi+rsDAfrR4xAALQpFLa/+2OriecLhawxzvii2gd9+DZFwROWDuUUaqS5yA== - dependencies: - is-promise "~1" - -promisify-es6@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/promisify-es6/-/promisify-es6-1.0.3.tgz#b012668c4df3c965ce13daac2b3a4d1726a96346" - integrity sha512-N9iVG+CGJsI4b4ZGazjwLnxErD2d9Pe4DPvvXSxYA9tFNu8ymXME4Qs5HIQ0LMJpNM7zj+m0NlNnNeqFpKzqnA== - prop-types@^15.7.2: version "15.7.2" resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" @@ -10630,21 +9928,6 @@ protobufjs@^6.10.2: "@types/node" ">=13.7.0" long "^4.0.0" -protocol-buffers-schema@^3.3.1: - version "3.6.0" - resolved "https://registry.yarnpkg.com/protocol-buffers-schema/-/protocol-buffers-schema-3.6.0.tgz#77bc75a48b2ff142c1ad5b5b90c94cd0fa2efd03" - integrity sha512-TdDRD+/QNdrCGCE7v8340QyuXd4kIWIgapsE2+n/SaGiSSbomYl4TjHlvIoCWRpE7wFt02EpB35VVA2ImcBVqw== - -protons@^1.0.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/protons/-/protons-1.2.1.tgz#5f1e0db8b2139469cd1c3b4e332a4c2d95d0a218" - integrity sha512-2oqDyc/SN+tNcJf8XxrXhYL7sQn2/OMl8mSdD7NVGsWjMEmAbks4eDVnCyf0vAoRbBWyWTEXWk4D8XfuKVl3zg== - dependencies: - buffer "^5.5.0" - protocol-buffers-schema "^3.3.1" - signed-varint "^2.0.1" - varint "^5.0.0" - proxy-addr@~2.0.7: version "2.0.7" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" @@ -10680,23 +9963,6 @@ public-encrypt@^4.0.0: randombytes "^2.0.1" safe-buffer "^5.1.2" -pull-defer@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/pull-defer/-/pull-defer-0.2.3.tgz#4ee09c6d9e227bede9938db80391c3dac489d113" - integrity sha512-/An3KE7mVjZCqNhZsr22k1Tx8MACnUnHZZNPSJ0S62td8JtYr/AiRG42Vz7Syu31SoTLUzVIe61jtT/pNdjVYA== - -pull-stream@^3.2.3, pull-stream@^3.6.9: - version "3.7.0" - resolved "https://registry.yarnpkg.com/pull-stream/-/pull-stream-3.7.0.tgz#85de0e44ff38a4d2ad08cc43fc458e1922f9bf0b" - integrity sha512-Eco+/R004UaCK2qEDE8vGklcTG2OeZSVm1kTUQNrykEjDwcFXDZhygFDsW49DbXyJMEhHeRL3z5cRVqPAhXlIw== - -pull-to-stream@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/pull-to-stream/-/pull-to-stream-0.1.1.tgz#fa2058528528e3542b81d6f17cbc42288508ff37" - integrity sha512-thZkMv6F9PILt9zdvpI2gxs19mkDrlixYKX6cOBxAW16i1NZH+yLAmF4r8QfJ69zuQh27e01JZP9y27tsH021w== - dependencies: - readable-stream "^3.1.1" - pump@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.3.tgz#5dfe8311c33bbf6fc18261f9f34702c47c08a954" @@ -10752,13 +10018,6 @@ qs@6.11.0, qs@^6.4.0: dependencies: side-channel "^1.0.4" -qs@^6.5.2: - version "6.11.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.2.tgz#64bea51f12c1f5da1bc01496f48ffcff7c69d7d9" - integrity sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA== - dependencies: - side-channel "^1.0.4" - qs@~6.5.2: version "6.5.3" resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" @@ -10950,15 +10209,6 @@ readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.4, readable string_decoder "~1.1.1" util-deprecate "~1.0.1" -readable-stream@^3.0.0, readable-stream@^3.0.1, readable-stream@^3.0.2: - version "3.6.2" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" - integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== - dependencies: - inherits "^2.0.3" - string_decoder "^1.1.1" - util-deprecate "^1.0.1" - readable-stream@~0.0.2: version "0.0.4" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-0.0.4.tgz#f32d76e3fb863344a548d79923007173665b3b8d" @@ -11153,7 +10403,7 @@ replace-ext@0.0.1: resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-0.0.1.tgz#29bbd92078a739f0bcce2b4ee41e837953522924" integrity sha1-KbvZIHinOfC8zitO5B6DeVNSKSQ= -request@2.88.2, request@^2.55.0, request@^2.79.0, request@^2.85.0, request@^2.88.0: +request@2.88.2, request@^2.55.0, request@^2.79.0, request@^2.85.0: version "2.88.2" resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== @@ -11322,21 +10572,6 @@ rn-host-detect@^1.1.5: resolved "https://registry.yarnpkg.com/rn-host-detect/-/rn-host-detect-1.2.0.tgz#8b0396fc05631ec60c1cb8789e5070cdb04d0da0" integrity sha512-btNg5kzHcjZZ7t7mvvV/4wNJ9e3MPgrWivkRgWURzXL0JJ0pwWlU4zrbmdlz3HHzHOxhBhHB4D+/dbMFfu4/4A== -rsa-pem-to-jwk@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/rsa-pem-to-jwk/-/rsa-pem-to-jwk-1.1.3.tgz#245e76bdb7e7234cfee7ca032d31b54c38fab98e" - integrity sha512-ZlVavEvTnD8Rzh/pdB8NH4VF5GNEtF6biGQcTtC4GKFMsbZR08oHtOYefbhCN+JnJIuMItiCDCMycdcMrw6blA== - dependencies: - object-assign "^2.0.0" - rsa-unpack "0.0.6" - -rsa-unpack@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/rsa-unpack/-/rsa-unpack-0.0.6.tgz#f50ebd56a628378e631f297161026ce9ab4eddba" - integrity sha512-HRrl8GHjjPziPFRDJPq/v5OxZ3IPdksV5h3cime/oHgcgM1k1toO5OdtzClgBqRf5dF6IgptOB0g/zFb0w5zQw== - dependencies: - optimist "~0.3.5" - run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" @@ -11403,20 +10638,6 @@ scrypt-js@^3.0.0, scrypt-js@^3.0.1: resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== -secp256k1@^3.6.2: - version "3.8.0" - resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-3.8.0.tgz#28f59f4b01dbee9575f56a47034b7d2e3b3b352d" - integrity sha512-k5ke5avRZbtl9Tqx/SA7CbY3NF6Ro+Sj9cZxezFzuBlLDmyqPiL8hJJ+EmzD8Ig4LUDByHJ3/iPOVoRixs/hmw== - dependencies: - bindings "^1.5.0" - bip66 "^1.1.5" - bn.js "^4.11.8" - create-hash "^1.2.0" - drbg.js "^1.0.1" - elliptic "^6.5.2" - nan "^2.14.0" - safe-buffer "^5.1.2" - secp256k1@^4.0.1: version "4.0.3" resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-4.0.3.tgz#c4559ecd1b8d3c1827ed2d1b94190d69ce267303" @@ -11622,13 +10843,6 @@ signal-exit@^3.0.2, signal-exit@^3.0.3: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== -signed-varint@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/signed-varint/-/signed-varint-2.0.1.tgz#50a9989da7c98c2c61dad119bc97470ef8528129" - integrity sha512-abgDPg1106vuZZOvw7cFwdCABddfJRz5akcCcchzTbhyhYnsG31y4AlZEgp315T7W3nQq5P4xeOm186ZiPVFzw== - dependencies: - varint "~5.0.0" - signedsource@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/signedsource/-/signedsource-1.0.0.tgz#1ddace4981798f93bd833973803d80d52e93ad6a" @@ -11714,7 +10928,7 @@ source-map-support@^0.4.15: dependencies: source-map "^0.5.6" -source-map-support@^0.5.11, source-map-support@^0.5.19, source-map-support@^0.5.3: +source-map-support@^0.5.19, source-map-support@^0.5.3: version "0.5.19" resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== @@ -11786,13 +11000,6 @@ split-ca@^1.0.0: resolved "https://registry.yarnpkg.com/split-ca/-/split-ca-1.0.1.tgz#6c83aff3692fa61256e0cd197e05e9de157691a6" integrity sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ== -split2@^3.1.0: - version "3.2.2" - resolved "https://registry.yarnpkg.com/split2/-/split2-3.2.2.tgz#bf2cf2a37d838312c249c89206fd7a17dd12365f" - integrity sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg== - dependencies: - readable-stream "^3.0.0" - sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -11826,11 +11033,6 @@ sshpk@^1.7.0: safer-buffer "^2.0.2" tweetnacl "~0.14.0" -stable@~0.1.8: - version "0.1.8" - resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" - integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== - statuses@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" @@ -11858,14 +11060,6 @@ stream-to-it@^0.2.2: dependencies: get-iterator "^1.0.2" -stream-to-pull-stream@^1.7.2: - version "1.7.3" - resolved "https://registry.yarnpkg.com/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz#4161aa2d2eb9964de60bfa1af7feaf917e874ece" - integrity sha512-6sNyqJpr5dIOQdgNy/xcDWwDuzAsAwVzhzrWlAPAQ7Lkjx/rv0wgvxEyKwTq6FmNd5rjTrELt/CLmaSw7crMGg== - dependencies: - looper "^3.0.0" - pull-stream "^3.2.3" - streamsearch@0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" @@ -11941,7 +11135,7 @@ string.prototype.trimstart@^1.0.4: call-bind "^1.0.2" define-properties "^1.1.3" -string_decoder@^1.1.1, string_decoder@^1.2.0: +string_decoder@^1.1.1: version "1.3.0" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== @@ -12194,17 +11388,6 @@ tar-stream@^1.1.2: to-buffer "^1.1.1" xtend "^4.0.0" -tar-stream@^2.0.1: - version "2.2.0" - resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-2.2.0.tgz#acad84c284136b060dc3faa64474aa9aebd77287" - integrity sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ== - dependencies: - bl "^4.0.3" - end-of-stream "^1.4.1" - fs-constants "^1.0.0" - inherits "^2.0.3" - readable-stream "^3.1.1" - tar@^4, tar@^4.0.2: version "4.4.19" resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" @@ -12278,7 +11461,7 @@ through2@3.0.1: dependencies: readable-stream "2 || 3" -through2@3.0.2, through2@^3.0.0, through2@^3.0.1: +through2@3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/through2/-/through2-3.0.2.tgz#99f88931cfc761ec7678b41d5d7336b5b6a07bf4" integrity sha512-enaDQ4MUyP2W6ZyT6EsMzqBPZaM/avg8iuo+l2d3QCs0J+6RaqkHV/2/lOwDTueBHeJ/2LG9lrLW3d5rWPucuQ== @@ -12333,7 +11516,7 @@ title-case@^2.1.0: no-case "^2.2.0" upper-case "^1.0.3" -tmp-promise@3.0.3, tmp-promise@^3.0.2: +tmp-promise@3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.3.tgz#60a1a1cc98c988674fcbfd23b6e3367bdeac4ce7" integrity sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ== @@ -12509,11 +11692,6 @@ tweetnacl@^0.14.3, tweetnacl@~0.14.0: resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== -tweetnacl@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596" - integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== - type-check@~0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" @@ -12622,11 +11800,6 @@ underscore@^1.8.3: resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.12.0.tgz#4814940551fc80587cef7840d1ebb0f16453be97" integrity sha512-21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ== -unique-by@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unique-by/-/unique-by-1.0.0.tgz#5220c86ba7bc572fb713ad74651470cb644212bd" - integrity sha512-rJRXK5V0zL6TiSzhoGNpJp5dr+TZBLoPJFC06rLn17Ug++7Aa0Qnve5v+skXeQxx6/sI7rBsSesa6MAcmFi8Ew== - unique-stream@^2.0.2: version "2.3.1" resolved "https://registry.yarnpkg.com/unique-stream/-/unique-stream-2.3.1.tgz#c65d110e9a4adf9a6c5948b28053d9a8d04cbeac" @@ -12725,14 +11898,6 @@ urlpattern-polyfill@^8.0.0: resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-8.0.2.tgz#99f096e35eff8bf4b5a2aa7d58a1523d6ebc7ce5" integrity sha512-Qp95D4TPJl1kC9SKigDcqgyM2VDVO4RiJc2d4qe5GrYm+zbIQCWWKAFaJNQ4BhdFeDGwBmAxqJBwWSJDb9T3BQ== -ursa-optional@~0.10.0: - version "0.10.2" - resolved "https://registry.yarnpkg.com/ursa-optional/-/ursa-optional-0.10.2.tgz#bd74e7d60289c22ac2a69a3c8dea5eb2817f9681" - integrity sha512-TKdwuLboBn7M34RcvVTuQyhvrA8gYKapuVdm0nBP0mnBc7oECOfUQZrY91cefL3/nm64ZyrejSRrhTVdX7NG/A== - dependencies: - bindings "^1.5.0" - nan "^2.14.2" - utf-8-validate@^5.0.2: version "5.0.4" resolved "https://registry.yarnpkg.com/utf-8-validate/-/utf-8-validate-5.0.4.tgz#72a1735983ddf7a05a43a9c6b67c5ce1c910f9b8" @@ -12831,7 +11996,7 @@ validate-npm-package-license@^3.0.1: spdx-correct "^3.0.0" spdx-expression-parse "^3.0.0" -varint@^5.0.0, varint@~5.0.0: +varint@^5.0.0: version "5.0.2" resolved "https://registry.yarnpkg.com/varint/-/varint-5.0.2.tgz#5b47f8a947eb668b848e034dcfa87d0ff8a7f7a4" integrity sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow== @@ -13545,11 +12710,6 @@ wordwrap@^1.0.0: resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== -wordwrap@~0.0.2: - version "0.0.3" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" - integrity sha512-1tMA907+V4QmxV7dbRvb4/8MaRALK6q9Abid3ndMYnbyo8piisCmeONVqVSXqQA3KaP4SLt5b7ud6E2sqP8TFw== - workerpool@6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.0.0.tgz#85aad67fa1a2c8ef9386a1b43539900f61d03d58" @@ -13736,7 +12896,7 @@ yaeti@^0.0.6: resolved "https://registry.yarnpkg.com/yaeti/-/yaeti-0.0.6.tgz#f26f484d72684cf42bedfb76970aa1608fbf9577" integrity sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc= -yallist@^3.0.0, yallist@^3.0.2, yallist@^3.1.1: +yallist@^3.0.0, yallist@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== @@ -13746,7 +12906,7 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.5.1, yaml@^1.7.2: +yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: version "1.10.2" resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index 74fecf942fa..6fb244d0af8 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -31,7 +31,7 @@ use tokio::process::{Child, Command}; /// require us to filter out `node_modules`, support files, etc.. Hardly worth /// it. pub const INTEGRATION_TEST_DIRS: &[&str] = &[ - "api-version-v0-0-4", + // "api-version-v0-0-4", "ganache-reverts", "host-exports", "non-fatal-errors", From d2a3219f088d026283b144a615c302ed380ec5af Mon Sep 17 00:00:00 2001 From: Jonathan LEI Date: Fri, 29 Sep 2023 20:45:16 +0800 Subject: [PATCH 0457/2104] feat: initial Starknet support (#4895) * feat: initial Starknet support * feat: resolve PR comments 1. Removes unnecessary TODO comments 2. Replaces `starknet-core` with `starknet-ff` to minimize deps 3. Fixes expensive `transaction` clone 4. Other misc changes --- Cargo.lock | 196 ++++++-- chain/starknet/Cargo.toml | 19 + chain/starknet/build.rs | 7 + chain/starknet/proto/starknet.proto | 37 ++ chain/starknet/src/adapter.rs | 27 ++ chain/starknet/src/chain.rs | 427 ++++++++++++++++++ chain/starknet/src/codec.rs | 31 ++ chain/starknet/src/data_source.rs | 347 ++++++++++++++ chain/starknet/src/lib.rs | 9 + .../src/protobuf/zklend.starknet.r#type.v1.rs | 69 +++ chain/starknet/src/runtime/abi.rs | 106 +++++ chain/starknet/src/runtime/generated.rs | 100 ++++ chain/starknet/src/runtime/mod.rs | 3 + chain/starknet/src/trigger.rs | 118 +++++ core/Cargo.toml | 1 + core/src/subgraph/instance_manager.rs | 14 + core/src/subgraph/registrar.rs | 18 + graph/src/blockchain/mod.rs | 4 + graph/src/runtime/mod.rs | 15 +- node/Cargo.toml | 1 + node/src/main.rs | 25 +- server/index-node/Cargo.toml | 1 + server/index-node/src/resolver.rs | 4 +- 23 files changed, 1545 insertions(+), 34 deletions(-) create mode 100644 chain/starknet/Cargo.toml create mode 100644 chain/starknet/build.rs create mode 100644 chain/starknet/proto/starknet.proto create mode 100644 chain/starknet/src/adapter.rs create mode 100644 chain/starknet/src/chain.rs create mode 100644 chain/starknet/src/codec.rs create mode 100644 chain/starknet/src/data_source.rs create mode 100644 chain/starknet/src/lib.rs create mode 100644 chain/starknet/src/protobuf/zklend.starknet.r#type.v1.rs create mode 100644 chain/starknet/src/runtime/abi.rs create mode 100644 chain/starknet/src/runtime/generated.rs create mode 100644 chain/starknet/src/runtime/mod.rs create mode 100644 chain/starknet/src/trigger.rs diff --git a/Cargo.lock b/Cargo.lock index 48250d72590..62019be9ed4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -72,6 +72,70 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e906254e445520903e7fc9da4f709886c84ae4bc4ddaf0e093188d66df4dc820" +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.4", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.107", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.4", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.107", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std", + "digest 0.10.7", + "num-bigint 0.4.4", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "arrayref" version = "0.3.6" @@ -187,7 +251,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.1", + "itoa", "matchit", "memchr", "mime", @@ -297,6 +361,7 @@ dependencies = [ "num-bigint 0.4.4", "num-integer", "num-traits", + "serde", ] [[package]] @@ -859,6 +924,15 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +dependencies = [ + "subtle", +] + [[package]] name = "crypto-common" version = "0.1.3" @@ -921,6 +995,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "647605a6345d5e89c3950a36a638c56478af9b414c55c6f2477c73b115f9acde" +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.107", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -1443,13 +1528,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi", + "wasm-bindgen", ] [[package]] @@ -1658,6 +1745,22 @@ dependencies = [ "trigger-filters", ] +[[package]] +name = "graph-chain-starknet" +version = "0.32.0" +dependencies = [ + "graph", + "graph-runtime-derive", + "graph-runtime-wasm", + "hex", + "prost", + "prost-types", + "serde", + "sha3", + "starknet-ff", + "tonic-build", +] + [[package]] name = "graph-chain-substreams" version = "0.32.0" @@ -1701,6 +1804,7 @@ dependencies = [ "graph-chain-cosmos", "graph-chain-ethereum", "graph-chain-near", + "graph-chain-starknet", "graph-chain-substreams", "graph-runtime-wasm", "ipfs-api", @@ -1749,6 +1853,7 @@ dependencies = [ "graph-chain-cosmos", "graph-chain-ethereum", "graph-chain-near", + "graph-chain-starknet", "graph-chain-substreams", "graph-core", "graph-graphql", @@ -1848,6 +1953,7 @@ dependencies = [ "graph-chain-cosmos", "graph-chain-ethereum", "graph-chain-near", + "graph-chain-starknet", "graph-graphql", "graphql-parser", "http", @@ -2068,6 +2174,9 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-literal" @@ -2099,7 +2208,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -2161,7 +2270,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.1", + "itoa", "pin-project-lite", "socket2 0.4.9", "tokio", @@ -2464,12 +2573,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" - [[package]] name = "itoa" version = "1.0.1" @@ -2594,9 +2697,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures 0.2.2", +] [[package]] name = "lazy_static" @@ -2769,7 +2875,7 @@ checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.45.0", ] @@ -2912,9 +3018,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -3915,11 +4021,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.66" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "itoa 0.4.7", + "itoa", "ryu", "serde", ] @@ -3970,7 +4076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -3998,7 +4104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" dependencies = [ "indexmap 1.9.3", - "itoa 1.0.1", + "itoa", "ryu", "serde", "unsafe-libyaml", @@ -4054,9 +4160,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", @@ -4241,6 +4347,20 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "starknet-ff" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2cb1d9c0a50380cddab99cb202c6bfb3332728a2769bd0ca2ee80b0b390dd4" +dependencies = [ + "ark-ff", + "bigdecimal 0.3.1", + "crypto-bigint", + "getrandom", + "hex", + "serde", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -4525,7 +4645,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.1", + "itoa", "serde", "time-core", "time-macros", @@ -5186,12 +5306,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5839,6 +5953,26 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "zstd" version = "0.6.1+zstd.1.4.9" diff --git a/chain/starknet/Cargo.toml b/chain/starknet/Cargo.toml new file mode 100644 index 00000000000..89be8117148 --- /dev/null +++ b/chain/starknet/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "graph-chain-starknet" +version.workspace = true +edition.workspace = true + +[build-dependencies] +tonic-build = { workspace = true } + +[dependencies] +graph = { path = "../../graph" } +hex = { version = "0.4.3", features = ["serde"] } +prost = { workspace = true } +prost-types = { workspace = true } +serde = "1.0" +sha3 = "0.10.8" +starknet-ff = "0.3.4" + +graph-runtime-wasm = { path = "../../runtime/wasm" } +graph-runtime-derive = { path = "../../runtime/derive" } diff --git a/chain/starknet/build.rs b/chain/starknet/build.rs new file mode 100644 index 00000000000..8a67809dfca --- /dev/null +++ b/chain/starknet/build.rs @@ -0,0 +1,7 @@ +fn main() { + println!("cargo:rerun-if-changed=proto"); + tonic_build::configure() + .out_dir("src/protobuf") + .compile(&["proto/starknet.proto"], &["proto"]) + .expect("Failed to compile Firehose StarkNet proto(s)"); +} diff --git a/chain/starknet/proto/starknet.proto b/chain/starknet/proto/starknet.proto new file mode 100644 index 00000000000..073b8c2c569 --- /dev/null +++ b/chain/starknet/proto/starknet.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package zklend.starknet.type.v1; + +option go_package = "github.com/starknet-graph/firehose-starknet/types/pb/zklend/starknet/type/v1;pbacme"; + +// This file only contains the bare minimum types for the POC. It's far from a complete +// representation of a StarkNet network's history as required by the Firehose protocol. As a result, +// any future changes to this schema would require a full re-sync of the StarkNet node. + +message Block { + uint64 height = 1; + bytes hash = 2; + bytes prevHash = 3; + uint64 timestamp = 4; + repeated Transaction transactions = 5; +} + +message Transaction { + TransactionType type = 1; + bytes hash = 2; + repeated Event events = 3; +} + +enum TransactionType { + DEPLOY = 0; + INVOKE_FUNCTION = 1; + DECLARE = 2; + L1_HANDLER = 3; + DEPLOY_ACCOUNT = 4; +} + +message Event { + bytes fromAddr = 1; + repeated bytes keys = 2; + repeated bytes data = 3; +} diff --git a/chain/starknet/src/adapter.rs b/chain/starknet/src/adapter.rs new file mode 100644 index 00000000000..e04df8e979c --- /dev/null +++ b/chain/starknet/src/adapter.rs @@ -0,0 +1,27 @@ +use graph::blockchain::{EmptyNodeCapabilities, TriggerFilter as TriggerFilterTrait}; + +use crate::{ + data_source::{DataSource, DataSourceTemplate}, + Chain, +}; + +#[derive(Default, Clone)] +pub struct TriggerFilter; + +impl TriggerFilterTrait for TriggerFilter { + #[allow(unused)] + fn extend_with_template(&mut self, data_source: impl Iterator) { + todo!() + } + + #[allow(unused)] + fn extend<'a>(&mut self, data_sources: impl Iterator + Clone) {} + + fn node_capabilities(&self) -> EmptyNodeCapabilities { + todo!() + } + + fn to_firehose_filter(self) -> Vec { + todo!() + } +} diff --git a/chain/starknet/src/chain.rs b/chain/starknet/src/chain.rs new file mode 100644 index 00000000000..e3d77c91a00 --- /dev/null +++ b/chain/starknet/src/chain.rs @@ -0,0 +1,427 @@ +use graph::{ + anyhow::Result, + blockchain::{ + block_stream::{ + BlockStream, BlockStreamBuilder, BlockStreamEvent, BlockWithTriggers, FirehoseCursor, + FirehoseError, FirehoseMapper as FirehoseMapperTrait, + TriggersAdapter as TriggersAdapterTrait, + }, + client::ChainClient, + firehose_block_ingestor::FirehoseBlockIngestor, + firehose_block_stream::FirehoseBlockStream, + BasicBlockchainBuilder, Block, BlockIngestor, BlockPtr, Blockchain, BlockchainBuilder, + BlockchainKind, EmptyNodeCapabilities, IngestorError, NoopRuntimeAdapter, + RuntimeAdapter as RuntimeAdapterTrait, + }, + cheap_clone::CheapClone, + components::store::{DeploymentCursorTracker, DeploymentLocator}, + data::subgraph::UnifiedMappingApiVersion, + env::EnvVars, + firehose::{self, FirehoseEndpoint, ForkStep}, + prelude::{ + async_trait, BlockHash, BlockNumber, ChainStore, Error, Logger, LoggerFactory, + MetricsRegistry, TryFutureExt, + }, + schema::InputSchema, + slog::o, +}; +use prost::Message; +use std::sync::Arc; + +use crate::{ + adapter::TriggerFilter, + codec, + data_source::{ + DataSource, DataSourceTemplate, UnresolvedDataSource, UnresolvedDataSourceTemplate, + }, + trigger::{StarknetBlockTrigger, StarknetEventTrigger, StarknetTrigger}, +}; + +pub struct Chain { + logger_factory: LoggerFactory, + name: String, + client: Arc>, + chain_store: Arc, + metrics_registry: Arc, + block_stream_builder: Arc>, +} + +pub struct StarknetStreamBuilder; + +pub struct FirehoseMapper { + adapter: Arc>, + filter: Arc, +} + +pub struct TriggersAdapter; + +impl BlockchainBuilder for BasicBlockchainBuilder { + fn build(self, _config: &Arc) -> Chain { + Chain { + logger_factory: self.logger_factory, + name: self.name, + chain_store: self.chain_store, + client: Arc::new(ChainClient::new_firehose(self.firehose_endpoints)), + metrics_registry: self.metrics_registry, + block_stream_builder: Arc::new(StarknetStreamBuilder {}), + } + } +} + +impl std::fmt::Debug for Chain { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "chain: starknet") + } +} + +#[async_trait] +impl Blockchain for Chain { + const KIND: BlockchainKind = BlockchainKind::Starknet; + + type Client = (); + type Block = codec::Block; + type DataSource = DataSource; + type UnresolvedDataSource = UnresolvedDataSource; + + type DataSourceTemplate = DataSourceTemplate; + type UnresolvedDataSourceTemplate = UnresolvedDataSourceTemplate; + + type TriggerData = crate::trigger::StarknetTrigger; + + type MappingTrigger = crate::trigger::StarknetTrigger; + + type TriggerFilter = crate::adapter::TriggerFilter; + + type NodeCapabilities = EmptyNodeCapabilities; + + fn triggers_adapter( + &self, + _log: &DeploymentLocator, + _capabilities: &Self::NodeCapabilities, + _unified_api_version: UnifiedMappingApiVersion, + ) -> Result>, Error> { + Ok(Arc::new(TriggersAdapter)) + } + + async fn new_block_stream( + &self, + deployment: DeploymentLocator, + store: impl DeploymentCursorTracker, + start_blocks: Vec, + filter: Arc, + unified_api_version: UnifiedMappingApiVersion, + ) -> Result>, Error> { + self.block_stream_builder + .build_firehose( + self, + deployment, + store.firehose_cursor(), + start_blocks, + store.block_ptr(), + filter, + unified_api_version, + ) + .await + } + + fn is_refetch_block_required(&self) -> bool { + false + } + + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } + + fn chain_store(&self) -> Arc { + self.chain_store.clone() + } + + async fn block_pointer_from_number( + &self, + logger: &Logger, + number: BlockNumber, + ) -> Result { + let firehose_endpoint = self.client.firehose_endpoint()?; + + firehose_endpoint + .block_ptr_for_number::(logger, number) + .map_err(Into::into) + .await + } + + fn runtime_adapter(&self) -> Arc> { + Arc::new(NoopRuntimeAdapter::default()) + } + + fn chain_client(&self) -> Arc> { + self.client.clone() + } + + fn block_ingestor(&self) -> Result> { + let ingestor = FirehoseBlockIngestor::::new( + self.chain_store.cheap_clone(), + self.chain_client(), + self.logger_factory + .component_logger("StarknetFirehoseBlockIngestor", None), + self.name.clone(), + ); + Ok(Box::new(ingestor)) + } +} + +#[async_trait] +impl BlockStreamBuilder for StarknetStreamBuilder { + async fn build_substreams( + &self, + _chain: &Chain, + _schema: InputSchema, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _subgraph_current_block: Option, + _filter: Arc<::TriggerFilter>, + ) -> Result>> { + unimplemented!() + } + + async fn build_firehose( + &self, + chain: &Chain, + deployment: DeploymentLocator, + block_cursor: FirehoseCursor, + start_blocks: Vec, + subgraph_current_block: Option, + filter: Arc, + unified_api_version: UnifiedMappingApiVersion, + ) -> Result>> { + let adapter = chain + .triggers_adapter( + &deployment, + &EmptyNodeCapabilities::default(), + unified_api_version, + ) + .unwrap_or_else(|_| panic!("no adapter for network {}", chain.name)); + + let logger = chain + .logger_factory + .subgraph_logger(&deployment) + .new(o!("component" => "FirehoseBlockStream")); + + let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); + + Ok(Box::new(FirehoseBlockStream::new( + deployment.hash, + chain.chain_client(), + subgraph_current_block, + block_cursor, + firehose_mapper, + start_blocks, + logger, + chain.metrics_registry.clone(), + ))) + } + + async fn build_polling( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _start_blocks: Vec, + _subgraph_current_block: Option, + _filter: Arc, + _unified_api_version: UnifiedMappingApiVersion, + ) -> Result>> { + panic!("StarkNet does not support polling block stream") + } +} + +#[async_trait] +impl FirehoseMapperTrait for FirehoseMapper { + fn trigger_filter(&self) -> &TriggerFilter { + self.filter.as_ref() + } + + async fn to_block_stream_event( + &self, + logger: &Logger, + response: &firehose::Response, + ) -> Result, FirehoseError> { + let step = ForkStep::from_i32(response.step).unwrap_or_else(|| { + panic!( + "unknown step i32 value {}, maybe you forgot update & re-regenerate the protobuf definitions?", + response.step + ) + }); + + let any_block = response + .block + .as_ref() + .expect("block payload information should always be present"); + + // Right now, this is done in all cases but in reality, with how the BlockStreamEvent::Revert + // is defined right now, only block hash and block number is necessary. However, this information + // is not part of the actual bstream::BlockResponseV2 payload. As such, we need to decode the full + // block which is useless. + // + // Check about adding basic information about the block in the bstream::BlockResponseV2 or maybe + // define a slimmed down stuct that would decode only a few fields and ignore all the rest. + let block = codec::Block::decode(any_block.value.as_ref())?; + + use ForkStep::*; + match step { + StepNew => Ok(BlockStreamEvent::ProcessBlock( + self.adapter + .triggers_in_block(logger, block, &self.filter) + .await?, + FirehoseCursor::from(response.cursor.clone()), + )), + + StepUndo => { + let parent_ptr = block + .parent_ptr() + .expect("Genesis block should never be reverted"); + + Ok(BlockStreamEvent::Revert( + parent_ptr, + FirehoseCursor::from(response.cursor.clone()), + )) + } + + StepFinal => { + panic!("irreversible step is not handled and should not be requested in the Firehose request") + } + + StepUnset => { + panic!("unknown step should not happen in the Firehose response") + } + } + } + + /// Returns the [BlockPtr] value for this given block number. This is the block pointer + /// of the longuest according to Firehose view of the blockchain state. + /// + /// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make + /// it chain agnostic and callable from chain agnostic [FirehoseBlockStream]. + async fn block_ptr_for_number( + &self, + logger: &Logger, + endpoint: &Arc, + number: BlockNumber, + ) -> Result { + endpoint + .block_ptr_for_number::(logger, number) + .await + } + + /// Returns the closest final block ptr to the block ptr received. + /// On probablitics chain like Ethereum, final is determined by + /// the confirmations threshold configured for the Firehose stack (currently + /// hard-coded to 200). + /// + /// On some other chain like NEAR, the actual final block number is determined + /// from the block itself since it contains information about which block number + /// is final against the current block. + /// + /// To take an example, assuming we are on Ethereum, the final block pointer + /// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012). + async fn final_block_ptr_for( + &self, + logger: &Logger, + endpoint: &Arc, + block: &codec::Block, + ) -> Result { + // Firehose for Starknet has an hard-coded confirmations for finality sets to 100 block + // behind the current block. The magic value 100 here comes from this hard-coded Firehose + // value. + let final_block_number = match block.number() { + x if x >= 100 => x - 100, + _ => 0, + }; + + self.block_ptr_for_number(logger, endpoint, final_block_number) + .await + } +} + +#[async_trait] +impl TriggersAdapterTrait for TriggersAdapter { + // Return the block that is `offset` blocks before the block pointed to + // by `ptr` from the local cache. An offset of 0 means the block itself, + // an offset of 1 means the block's parent etc. If the block is not in + // the local cache, return `None` + async fn ancestor_block( + &self, + _ptr: BlockPtr, + _offset: BlockNumber, + ) -> Result, Error> { + panic!("Should never be called since FirehoseBlockStream cannot resolve it") + } + + // Returns a sequence of blocks in increasing order of block number. + // Each block will include all of its triggers that match the given `filter`. + // The sequence may omit blocks that contain no triggers, + // but all returned blocks must part of a same chain starting at `chain_base`. + // At least one block will be returned, even if it contains no triggers. + // `step_size` is the suggested number blocks to be scanned. + async fn scan_triggers( + &self, + _from: BlockNumber, + _to: BlockNumber, + _filter: &crate::adapter::TriggerFilter, + ) -> Result>, Error> { + panic!("Should never be called since not used by FirehoseBlockStream") + } + + #[allow(unused)] + async fn triggers_in_block( + &self, + logger: &Logger, + block: codec::Block, + filter: &crate::adapter::TriggerFilter, + ) -> Result, Error> { + let shared_block = Arc::new(block.clone()); + + let mut triggers: Vec<_> = shared_block + .transactions + .iter() + .flat_map(|transaction| -> Vec { + let transaction = Arc::new(transaction.clone()); + transaction + .events + .iter() + .map(|event| { + StarknetTrigger::Event(StarknetEventTrigger { + event: Arc::new(event.clone()), + block: shared_block.clone(), + transaction: transaction.clone(), + }) + }) + .collect() + }) + .collect(); + + triggers.push(StarknetTrigger::Block(StarknetBlockTrigger { + block: shared_block, + })); + + Ok(BlockWithTriggers::new(block, triggers, logger)) + } + + /// Return `true` if the block with the given hash and number is on the + /// main chain, i.e., the chain going back from the current chain head. + async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { + panic!("Should never be called since not used by FirehoseBlockStream") + } + + /// Get pointer to parent of `block`. This is called when reverting `block`. + async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { + // Panics if `block` is genesis. + // But that's ok since this is only called when reverting `block`. + Ok(Some(BlockPtr { + hash: BlockHash::from(vec![0xff; 32]), + number: block.number.saturating_sub(1), + })) + } +} diff --git a/chain/starknet/src/codec.rs b/chain/starknet/src/codec.rs new file mode 100644 index 00000000000..e213b8fbaef --- /dev/null +++ b/chain/starknet/src/codec.rs @@ -0,0 +1,31 @@ +#[rustfmt::skip] +#[path = "protobuf/zklend.starknet.r#type.v1.rs"] +mod pbcodec; + +use graph::blockchain::{Block as BlockchainBlock, BlockHash, BlockPtr}; + +pub use pbcodec::*; + +impl BlockchainBlock for Block { + fn number(&self) -> i32 { + self.height as i32 + } + + fn ptr(&self) -> BlockPtr { + BlockPtr { + hash: BlockHash(self.hash.clone().into_boxed_slice()), + number: self.height as i32, + } + } + + fn parent_ptr(&self) -> Option { + if self.height == 0 { + None + } else { + Some(BlockPtr { + hash: BlockHash(self.prev_hash.clone().into_boxed_slice()), + number: (self.height - 1) as i32, + }) + } + } +} diff --git a/chain/starknet/src/data_source.rs b/chain/starknet/src/data_source.rs new file mode 100644 index 00000000000..144dcf8560c --- /dev/null +++ b/chain/starknet/src/data_source.rs @@ -0,0 +1,347 @@ +use graph::{ + anyhow::{anyhow, Error}, + blockchain::{self, Block as BlockchainBlock, TriggerWithHandler}, + components::{link_resolver::LinkResolver, store::StoredDynamicDataSource}, + data::subgraph::DataSourceContext, + prelude::{async_trait, BlockNumber, DataSourceTemplateInfo, Deserialize, Link, Logger}, + semver, +}; +use sha3::{Digest, Keccak256}; +use starknet_ff::FieldElement; +use std::{collections::HashSet, sync::Arc}; + +use crate::{ + chain::Chain, + codec, + trigger::{StarknetEventTrigger, StarknetTrigger}, +}; + +const BLOCK_HANDLER_KIND: &str = "block"; +const EVENT_HANDLER_KIND: &str = "event"; + +#[derive(Clone)] +pub struct DataSource { + pub kind: String, + pub network: String, + pub name: String, + pub source: Source, + pub mapping: Mapping, +} + +#[derive(Clone)] +pub struct Mapping { + pub block_handlers: Vec, + pub event_handlers: Vec, + pub runtime: Arc>, +} + +#[derive(Deserialize)] +pub struct UnresolvedDataSource { + pub kind: String, + pub network: String, + pub name: String, + pub source: Source, + pub mapping: UnresolvedMapping, +} + +#[derive(Clone, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Source { + pub start_block: BlockNumber, + #[serde(default, deserialize_with = "deserialize_address")] + pub address: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UnresolvedMapping { + #[serde(default)] + pub block_handlers: Vec, + #[serde(default)] + pub event_handlers: Vec, + pub file: Link, +} + +#[derive(Clone, PartialEq, Eq, Deserialize)] +pub struct MappingBlockHandler { + pub handler: String, +} + +#[derive(Clone, PartialEq, Eq, Deserialize)] +pub struct MappingEventHandler { + pub handler: String, + pub event_selector: FieldElement, +} + +#[derive(Clone, Deserialize)] +pub struct UnresolvedMappingEventHandler { + pub handler: String, + pub event: String, +} + +#[derive(Debug, Clone)] +pub struct DataSourceTemplate; + +#[derive(Clone, Default, Deserialize)] +pub struct UnresolvedDataSourceTemplate; + +impl blockchain::DataSource for DataSource { + fn from_template_info(_template_info: DataSourceTemplateInfo) -> Result { + Err(anyhow!("StarkNet subgraphs do not support templates")) + } + + fn address(&self) -> Option<&[u8]> { + None + } + + fn start_block(&self) -> BlockNumber { + self.source.start_block + } + + fn handler_kinds(&self) -> HashSet<&str> { + let mut kinds = HashSet::new(); + + let Mapping { + block_handlers, + event_handlers, + .. + } = &self.mapping; + + if !block_handlers.is_empty() { + kinds.insert(BLOCK_HANDLER_KIND); + } + if !event_handlers.is_empty() { + kinds.insert(EVENT_HANDLER_KIND); + } + + kinds + } + + fn match_and_decode( + &self, + trigger: &StarknetTrigger, + block: &Arc, + _logger: &Logger, + ) -> Result>, Error> { + if self.start_block() > block.number() { + return Ok(None); + } + + let handler = match trigger { + StarknetTrigger::Block(_) => match self.mapping.block_handlers.first() { + Some(handler) => handler.handler.clone(), + None => return Ok(None), + }, + StarknetTrigger::Event(event) => match self.handler_for_event(event) { + Some(handler) => handler.handler, + None => return Ok(None), + }, + }; + + Ok(Some(TriggerWithHandler::::new( + trigger.clone(), + handler, + block.ptr(), + ))) + } + + fn name(&self) -> &str { + &self.name + } + + fn kind(&self) -> &str { + &self.kind + } + + fn network(&self) -> Option<&str> { + Some(&self.network) + } + + fn context(&self) -> Arc> { + Arc::new(None) + } + + fn creation_block(&self) -> Option { + None + } + + fn is_duplicate_of(&self, other: &Self) -> bool { + let DataSource { + kind, + network, + name, + source, + mapping, + } = self; + + kind == &other.kind + && network == &other.network + && name == &other.name + && source == &other.source + && mapping.event_handlers == other.mapping.event_handlers + && mapping.block_handlers == other.mapping.block_handlers + } + + fn as_stored_dynamic_data_source(&self) -> StoredDynamicDataSource { + // FIXME (Starknet): Implement me! + todo!() + } + + fn from_stored_dynamic_data_source( + _template: &DataSourceTemplate, + _stored: StoredDynamicDataSource, + ) -> Result { + // FIXME (Starknet): Implement me correctly + todo!() + } + + fn validate(&self) -> Vec { + Default::default() + } + + fn api_version(&self) -> semver::Version { + semver::Version::new(0, 0, 5) + } + + fn runtime(&self) -> Option>> { + Some(self.mapping.runtime.clone()) + } +} + +impl DataSource { + /// Returns event trigger if an event.key matches the handler.key and optionally + /// if event.fromAddr matches the source address. Note this only supports the default + /// Starknet behavior of one key per event. + fn handler_for_event(&self, event: &StarknetEventTrigger) -> Option { + let event_key = FieldElement::from_byte_slice_be(event.event.keys.first()?).ok()?; + + // Always deocding first here seems fine as we expect most sources to define an address + // filter anyways. Alternatively we can use lazy init here, which seems unnecessary. + let event_from_addr = FieldElement::from_byte_slice_be(&event.event.from_addr).ok()?; + + return self + .mapping + .event_handlers + .iter() + .find(|handler| { + // No need to compare address if selector doesn't match + if handler.event_selector != event_key { + return false; + } + + match &self.source.address { + Some(addr_filter) => addr_filter == &event_from_addr, + None => true, + } + }) + .cloned(); + } +} + +#[async_trait] +impl blockchain::UnresolvedDataSource for UnresolvedDataSource { + async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + _manifest_idx: u32, + ) -> Result { + let module_bytes = resolver.cat(logger, &self.mapping.file).await?; + + Ok(DataSource { + kind: self.kind, + network: self.network, + name: self.name, + source: self.source, + mapping: Mapping { + block_handlers: self.mapping.block_handlers, + event_handlers: self + .mapping + .event_handlers + .into_iter() + .map(|handler| { + Ok(MappingEventHandler { + handler: handler.handler, + event_selector: get_selector_from_name(&handler.event)?, + }) + }) + .collect::, Error>>()?, + runtime: Arc::new(module_bytes), + }, + }) + } +} + +impl blockchain::DataSourceTemplate for DataSourceTemplate { + fn api_version(&self) -> semver::Version { + todo!() + } + + fn runtime(&self) -> Option>> { + todo!() + } + + fn name(&self) -> &str { + todo!() + } + + fn manifest_idx(&self) -> u32 { + todo!() + } + + fn kind(&self) -> &str { + todo!() + } +} + +#[async_trait] +impl blockchain::UnresolvedDataSourceTemplate for UnresolvedDataSourceTemplate { + #[allow(unused)] + async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + manifest_idx: u32, + ) -> Result { + todo!() + } +} + +// Adapted from: +// https://github.com/xJonathanLEI/starknet-rs/blob/f16271877c9dbf08bc7bf61e4fc72decc13ff73d/starknet-core/src/utils.rs#L110-L121 +fn get_selector_from_name(func_name: &str) -> graph::anyhow::Result { + const DEFAULT_ENTRY_POINT_NAME: &str = "__default__"; + const DEFAULT_L1_ENTRY_POINT_NAME: &str = "__l1_default__"; + + if func_name == DEFAULT_ENTRY_POINT_NAME || func_name == DEFAULT_L1_ENTRY_POINT_NAME { + Ok(FieldElement::ZERO) + } else { + let name_bytes = func_name.as_bytes(); + if name_bytes.is_ascii() { + Ok(starknet_keccak(name_bytes)) + } else { + Err(anyhow!("the provided name contains non-ASCII characters")) + } + } +} + +// Adapted from: +// https://github.com/xJonathanLEI/starknet-rs/blob/f16271877c9dbf08bc7bf61e4fc72decc13ff73d/starknet-core/src/utils.rs#L98-L108 +fn starknet_keccak(data: &[u8]) -> FieldElement { + let mut hasher = Keccak256::new(); + hasher.update(data); + let mut hash = hasher.finalize(); + + // Remove the first 6 bits + hash[0] &= 0b00000011; + + // Because we know hash is always 32 bytes + FieldElement::from_bytes_be(unsafe { &*(hash[..].as_ptr() as *const [u8; 32]) }).unwrap() +} + +fn deserialize_address<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::de::Deserializer<'de>, +{ + Ok(Some(serde::Deserialize::deserialize(deserializer)?)) +} diff --git a/chain/starknet/src/lib.rs b/chain/starknet/src/lib.rs new file mode 100644 index 00000000000..2595cd6f1dc --- /dev/null +++ b/chain/starknet/src/lib.rs @@ -0,0 +1,9 @@ +mod adapter; +mod chain; +pub mod codec; +mod data_source; +mod runtime; +mod trigger; + +pub use crate::chain::{Chain, StarknetStreamBuilder}; +pub use codec::Block; diff --git a/chain/starknet/src/protobuf/zklend.starknet.r#type.v1.rs b/chain/starknet/src/protobuf/zklend.starknet.r#type.v1.rs new file mode 100644 index 00000000000..e580ad30143 --- /dev/null +++ b/chain/starknet/src/protobuf/zklend.starknet.r#type.v1.rs @@ -0,0 +1,69 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Block { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(bytes = "vec", tag = "2")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub prev_hash: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "4")] + pub timestamp: u64, + #[prost(message, repeated, tag = "5")] + pub transactions: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Transaction { + #[prost(enumeration = "TransactionType", tag = "1")] + pub r#type: i32, + #[prost(bytes = "vec", tag = "2")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(bytes = "vec", tag = "1")] + pub from_addr: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", repeated, tag = "2")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(bytes = "vec", repeated, tag = "3")] + pub data: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TransactionType { + Deploy = 0, + InvokeFunction = 1, + Declare = 2, + L1Handler = 3, + DeployAccount = 4, +} +impl TransactionType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TransactionType::Deploy => "DEPLOY", + TransactionType::InvokeFunction => "INVOKE_FUNCTION", + TransactionType::Declare => "DECLARE", + TransactionType::L1Handler => "L1_HANDLER", + TransactionType::DeployAccount => "DEPLOY_ACCOUNT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DEPLOY" => Some(Self::Deploy), + "INVOKE_FUNCTION" => Some(Self::InvokeFunction), + "DECLARE" => Some(Self::Declare), + "L1_HANDLER" => Some(Self::L1Handler), + "DEPLOY_ACCOUNT" => Some(Self::DeployAccount), + _ => None, + } + } +} diff --git a/chain/starknet/src/runtime/abi.rs b/chain/starknet/src/runtime/abi.rs new file mode 100644 index 00000000000..b306d9eb5f8 --- /dev/null +++ b/chain/starknet/src/runtime/abi.rs @@ -0,0 +1,106 @@ +use graph::{ + prelude::BigInt, + runtime::{asc_new, gas::GasCounter, AscHeap, HostExportError, ToAscObj}, +}; +use graph_runtime_wasm::asc_abi::class::{Array, AscEnum, EnumPayload}; + +use crate::{ + codec, + trigger::{StarknetBlockTrigger, StarknetEventTrigger}, +}; + +pub(crate) use super::generated::*; + +impl ToAscObj for codec::Block { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + Ok(AscBlock { + number: asc_new(heap, &BigInt::from(self.height), gas)?, + hash: asc_new(heap, self.hash.as_slice(), gas)?, + prev_hash: asc_new(heap, self.prev_hash.as_slice(), gas)?, + timestamp: asc_new(heap, &BigInt::from(self.timestamp), gas)?, + }) + } +} + +impl ToAscObj for codec::Transaction { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + Ok(AscTransaction { + r#type: asc_new( + heap, + &codec::TransactionType::from_i32(self.r#type) + .expect("invalid TransactionType value"), + gas, + )?, + hash: asc_new(heap, self.hash.as_slice(), gas)?, + }) + } +} + +impl ToAscObj for codec::TransactionType { + fn to_asc_obj( + &self, + _heap: &mut H, + _gas: &GasCounter, + ) -> Result { + Ok(AscTransactionTypeEnum(AscEnum { + kind: match self { + codec::TransactionType::Deploy => AscTransactionType::Deploy, + codec::TransactionType::InvokeFunction => AscTransactionType::InvokeFunction, + codec::TransactionType::Declare => AscTransactionType::Declare, + codec::TransactionType::L1Handler => AscTransactionType::L1Handler, + codec::TransactionType::DeployAccount => AscTransactionType::DeployAccount, + }, + _padding: 0, + payload: EnumPayload(0), + })) + } +} + +impl ToAscObj for Vec> { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + let content: Result, _> = self + .iter() + .map(|x| asc_new(heap, x.as_slice(), gas)) + .collect(); + + Ok(AscBytesArray(Array::new(&content?, heap, gas)?)) + } +} + +impl ToAscObj for StarknetBlockTrigger { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + self.block.to_asc_obj(heap, gas) + } +} + +impl ToAscObj for StarknetEventTrigger { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + Ok(AscEvent { + from_addr: asc_new(heap, self.event.from_addr.as_slice(), gas)?, + keys: asc_new(heap, &self.event.keys, gas)?, + data: asc_new(heap, &self.event.data, gas)?, + block: asc_new(heap, self.block.as_ref(), gas)?, + transaction: asc_new(heap, self.transaction.as_ref(), gas)?, + }) + } +} diff --git a/chain/starknet/src/runtime/generated.rs b/chain/starknet/src/runtime/generated.rs new file mode 100644 index 00000000000..59932ae576e --- /dev/null +++ b/chain/starknet/src/runtime/generated.rs @@ -0,0 +1,100 @@ +use graph::runtime::{ + AscIndexId, AscPtr, AscType, AscValue, DeterministicHostError, IndexForAscTypeId, +}; +use graph::semver::Version; +use graph_runtime_derive::AscType; +use graph_runtime_wasm::asc_abi::class::{Array, AscBigInt, AscEnum, Uint8Array}; + +pub struct AscBytesArray(pub(crate) Array>); + +impl AscType for AscBytesArray { + fn to_asc_bytes(&self) -> Result, DeterministicHostError> { + self.0.to_asc_bytes() + } + + fn from_asc_bytes( + asc_obj: &[u8], + api_version: &Version, + ) -> Result { + Ok(Self(Array::from_asc_bytes(asc_obj, api_version)?)) + } +} + +impl AscIndexId for AscBytesArray { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::StarknetArrayBytes; +} + +pub struct AscTransactionTypeEnum(pub(crate) AscEnum); + +impl AscType for AscTransactionTypeEnum { + fn to_asc_bytes(&self) -> Result, DeterministicHostError> { + self.0.to_asc_bytes() + } + + fn from_asc_bytes( + asc_obj: &[u8], + api_version: &Version, + ) -> Result { + Ok(Self(AscEnum::from_asc_bytes(asc_obj, api_version)?)) + } +} + +impl AscIndexId for AscTransactionTypeEnum { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::StarknetTransactionTypeEnum; +} + +#[repr(C)] +#[derive(AscType)] +pub(crate) struct AscBlock { + pub number: AscPtr, + pub hash: AscPtr, + pub prev_hash: AscPtr, + pub timestamp: AscPtr, +} + +impl AscIndexId for AscBlock { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::StarknetBlock; +} + +#[repr(C)] +#[derive(AscType)] +pub(crate) struct AscTransaction { + pub r#type: AscPtr, + pub hash: AscPtr, +} + +impl AscIndexId for AscTransaction { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::StarknetTransaction; +} + +#[repr(u32)] +#[derive(AscType, Copy, Clone)] +pub(crate) enum AscTransactionType { + Deploy, + InvokeFunction, + Declare, + L1Handler, + DeployAccount, +} + +impl AscValue for AscTransactionType {} + +impl Default for AscTransactionType { + fn default() -> Self { + Self::Deploy + } +} + +#[repr(C)] +#[derive(AscType)] +pub(crate) struct AscEvent { + pub from_addr: AscPtr, + pub keys: AscPtr, + pub data: AscPtr, + pub block: AscPtr, + pub transaction: AscPtr, +} + +impl AscIndexId for AscEvent { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::StarknetEvent; +} diff --git a/chain/starknet/src/runtime/mod.rs b/chain/starknet/src/runtime/mod.rs new file mode 100644 index 00000000000..31e18de7dd8 --- /dev/null +++ b/chain/starknet/src/runtime/mod.rs @@ -0,0 +1,3 @@ +pub mod abi; + +mod generated; diff --git a/chain/starknet/src/trigger.rs b/chain/starknet/src/trigger.rs new file mode 100644 index 00000000000..f449d8293be --- /dev/null +++ b/chain/starknet/src/trigger.rs @@ -0,0 +1,118 @@ +use graph::{ + blockchain::{MappingTriggerTrait, TriggerData}, + runtime::{asc_new, gas::GasCounter, AscPtr, HostExportError}, +}; +use graph_runtime_wasm::module::ToAscPtr; +use starknet_ff::FieldElement; +use std::{cmp::Ordering, sync::Arc}; + +use crate::codec; + +#[derive(Debug, Clone)] +pub enum StarknetTrigger { + Block(StarknetBlockTrigger), + Event(StarknetEventTrigger), +} + +#[derive(Debug, Clone)] +pub struct StarknetBlockTrigger { + pub(crate) block: Arc, +} + +#[derive(Debug, Clone)] +pub struct StarknetEventTrigger { + pub(crate) event: Arc, + pub(crate) block: Arc, + pub(crate) transaction: Arc, +} + +impl PartialEq for StarknetTrigger { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Block(l), Self::Block(r)) => l.block == r.block, + (Self::Event(l), Self::Event(r)) => { + // Without event index we can't really tell if they're the same + // TODO: implement add event index to trigger data + l.block.hash == r.block.hash + && l.transaction.hash == r.transaction.hash + && l.event == r.event + } + _ => false, + } + } +} + +impl Eq for StarknetTrigger {} + +impl PartialOrd for StarknetTrigger { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for StarknetTrigger { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Block(l), Self::Block(r)) => l.block.height.cmp(&r.block.height), + + // Block triggers always come last + (Self::Block(..), _) => Ordering::Greater, + (_, Self::Block(..)) => Ordering::Less, + + // Keep the order when comparing two event triggers + // TODO: compare block hash, tx index, and event index + (Self::Event(..), Self::Event(..)) => Ordering::Equal, + } + } +} + +impl TriggerData for StarknetTrigger { + fn error_context(&self) -> String { + match self { + Self::Block(block) => format!("block #{}", block.block.height), + Self::Event(event) => { + format!( + "event from {}", + match FieldElement::from_byte_slice_be(&event.event.from_addr) { + Ok(from_addr) => format!("{from_addr:#x}"), + Err(_) => "[unable to parse source address]".into(), + } + ) + } + } + } + + fn address_match(&self) -> Option<&[u8]> { + None + } +} + +impl ToAscPtr for StarknetTrigger { + fn to_asc_ptr( + self, + heap: &mut H, + gas: &GasCounter, + ) -> Result, HostExportError> { + Ok(match self { + StarknetTrigger::Block(block) => asc_new(heap, &block, gas)?.erase(), + StarknetTrigger::Event(event) => asc_new(heap, &event, gas)?.erase(), + }) + } +} + +impl MappingTriggerTrait for StarknetTrigger { + fn error_context(&self) -> String { + match self { + Self::Block(block) => format!("block #{}", block.block.height), + Self::Event(event) => { + format!( + "event from {}", + match FieldElement::from_byte_slice_be(&event.event.from_addr) { + Ok(from_addr) => format!("{from_addr:#x}"), + Err(_) => "[unable to parse source address]".into(), + } + ) + } + } + } +} diff --git a/core/Cargo.toml b/core/Cargo.toml index bfcf4212b5a..a5dc1b03150 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -18,6 +18,7 @@ graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-near = { path = "../chain/near" } graph-chain-cosmos = { path = "../chain/cosmos" } graph-chain-substreams = { path = "../chain/substreams" } +graph-chain-starknet = { path = "../chain/starknet" } lazy_static = "1.2.0" lru_time_cache = "0.11" semver = "1.0.18" diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index c91a6889a09..80751e7105c 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -119,6 +119,20 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< self.start_subgraph_inner(logger, loc, runner).await } + BlockchainKind::Starknet => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(SubgraphTriggerProcessor {}), + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } } }; diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index a0196cd2c83..1bd9948b1de 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -406,6 +406,24 @@ where ) .await? } + BlockchainKind::Starknet => { + create_subgraph_version::( + &logger, + self.store.clone(), + self.chains.cheap_clone(), + name.clone(), + hash.cheap_clone(), + start_block_override, + graft_block_override, + raw, + node_id, + debug_fork, + self.version_switching_mode, + &self.resolver, + history_blocks, + ) + .await? + } }; debug!( diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index a0352de8aaf..837037db019 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -395,6 +395,8 @@ pub enum BlockchainKind { Cosmos, Substreams, + + Starknet, } impl fmt::Display for BlockchainKind { @@ -405,6 +407,7 @@ impl fmt::Display for BlockchainKind { BlockchainKind::Near => "near", BlockchainKind::Cosmos => "cosmos", BlockchainKind::Substreams => "substreams", + BlockchainKind::Starknet => "starknet", }; write!(f, "{}", value) } @@ -420,6 +423,7 @@ impl FromStr for BlockchainKind { "near" => Ok(BlockchainKind::Near), "cosmos" => Ok(BlockchainKind::Cosmos), "substreams" => Ok(BlockchainKind::Substreams), + "starknet" => Ok(BlockchainKind::Starknet), _ => Err(anyhow!("unknown blockchain kind {}", s)), } } diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index 917f4d85d40..d20d1eccde3 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -355,7 +355,20 @@ pub enum IndexForAscTypeId { // ... // LastArweaveType = 3499, - // Reserved discriminant space for a future blockchain type IDs: [3,500, 4,499] + // StarkNet types + StarknetBlock = 3500, + StarknetTransaction = 3501, + StarknetTransactionTypeEnum = 3502, + StarknetEvent = 3503, + StarknetArrayBytes = 3504, + // Continue to add more StarkNet type IDs here. + // e.g.: + // NextStarknetType = 3505, + // AnotherStarknetType = 3506, + // ... + // LastStarknetType = 4499, + + // Reserved discriminant space for a future blockchain type IDs: [4,500, 5,499] // // Generated with the following shell script: // diff --git a/node/Cargo.toml b/node/Cargo.toml index 3d67b08682e..b28fe137b2f 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -27,6 +27,7 @@ graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-near = { path = "../chain/near" } graph-chain-cosmos = { path = "../chain/cosmos" } graph-chain-substreams = { path = "../chain/substreams" } +graph-chain-starknet = { path = "../chain/starknet" } graph-graphql = { path = "../graphql" } graph-runtime-wasm = { path = "../runtime/wasm" } graph-server-http = { path = "../server/http" } diff --git a/node/src/main.rs b/node/src/main.rs index 0251e91cdc3..4429283661c 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -24,6 +24,7 @@ use graph_chain_arweave::{self as arweave, Block as ArweaveBlock}; use graph_chain_cosmos::{self as cosmos, Block as CosmosFirehoseBlock}; use graph_chain_ethereum as ethereum; use graph_chain_near::{self as near, HeaderOnlyBlock as NearFirehoseHeaderOnlyBlock}; +use graph_chain_starknet::{self as starknet, Block as StarknetBlock}; use graph_chain_substreams as substreams; use graph_core::polling_monitor::{arweave_service, ipfs_service}; use graph_core::{ @@ -373,6 +374,15 @@ async fn main() { .remove(&BlockchainKind::Substreams) .unwrap_or_else(FirehoseNetworks::new); + let (starknet_networks, starknet_idents) = connect_firehose_networks::( + &logger, + firehose_networks_by_kind + .remove(&BlockchainKind::Starknet) + .unwrap_or_else(FirehoseNetworks::new), + ) + .await + .unwrap(); + let substream_idents = substreams_networks .networks .keys() @@ -397,6 +407,7 @@ async fn main() { network_identifiers.extend(near_idents); network_identifiers.extend(cosmos_idents); network_identifiers.extend(substream_idents); + network_identifiers.extend(starknet_idents); let network_store = store_builder.network_store(network_identifiers); @@ -476,6 +487,17 @@ async fn main() { metrics_registry.clone(), ); + let starknet_chains = networks_as_chains::( + &env_vars, + &mut blockchain_map, + &logger, + &starknet_networks, + substreams_networks_by_kind.get(&BlockchainKind::Starknet), + network_store.as_ref(), + &logger_factory, + metrics_registry.clone(), + ); + let blockchain_map = Arc::new(blockchain_map); let shards: Vec<_> = config.stores.keys().cloned().collect(); @@ -515,7 +537,8 @@ async fn main() { arweave_chains, near_chains, cosmos_chains, - substreams_chains + substreams_chains, + starknet_chains ); ingestors.into_iter().for_each(|ingestor| { diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index eb6ae794852..b862bf04cb0 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -13,6 +13,7 @@ graph-chain-arweave = { path = "../../chain/arweave" } graph-chain-ethereum = { path = "../../chain/ethereum" } graph-chain-near = { path = "../../chain/near" } graph-chain-cosmos = { path = "../../chain/cosmos" } +graph-chain-starknet = { path = "../../chain/starknet" } graphql-parser = "0.4.0" http = "0.2" hyper = "0.14" diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 6f2b2d4e365..ea995ed5f0e 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -552,6 +552,7 @@ impl IndexNodeResolver { try_resolve_for_chain!(graph_chain_arweave::Chain); try_resolve_for_chain!(graph_chain_cosmos::Chain); try_resolve_for_chain!(graph_chain_near::Chain); + try_resolve_for_chain!(graph_chain_starknet::Chain); // If you're adding support for a new chain and this `match` clause just // gave you a compiler error, then this message is for you! You need to @@ -563,7 +564,8 @@ impl IndexNodeResolver { | BlockchainKind::Arweave | BlockchainKind::Ethereum | BlockchainKind::Cosmos - | BlockchainKind::Near => (), + | BlockchainKind::Near + | BlockchainKind::Starknet => (), } // The given network does not exist. From 2bfd348fef41e2f607545f6d75a0c075953e3a3e Mon Sep 17 00:00:00 2001 From: Leonardo Yvens Date: Tue, 10 Oct 2023 14:40:32 +0100 Subject: [PATCH 0458/2104] fix(runtime): improve error message for timeouts (#4686) --- runtime/test/src/test/abi.rs | 13 ++++++------- runtime/wasm/src/lib.rs | 3 --- runtime/wasm/src/module/mod.rs | 16 ++++++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 43cda3ccdac..3c7c2ea13f4 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -1,10 +1,6 @@ use graph::prelude::{ethabi::Token, web3::types::U256}; -use graph_runtime_wasm::{ - asc_abi::class::{ - ArrayBuffer, AscAddress, AscEnum, AscEnumArray, EthereumValueKind, StoreValueKind, - TypedArray, - }, - TRAP_TIMEOUT, +use graph_runtime_wasm::asc_abi::class::{ + ArrayBuffer, AscAddress, AscEnum, AscEnumArray, EthereumValueKind, StoreValueKind, TypedArray, }; use super::*; @@ -23,7 +19,10 @@ async fn test_unbounded_loop(api_version: Version) { .await .0; let res: Result<(), _> = module.get_func("loop").typed().unwrap().call(()); - assert!(res.unwrap_err().to_string().contains(TRAP_TIMEOUT)); + assert_eq!( + res.unwrap_err().to_string().lines().next().unwrap(), + "wasm trap: interrupt" + ); } #[tokio::test(flavor = "multi_thread")] diff --git a/runtime/wasm/src/lib.rs b/runtime/wasm/src/lib.rs index 2a365b9468f..a9b28f872f1 100644 --- a/runtime/wasm/src/lib.rs +++ b/runtime/wasm/src/lib.rs @@ -21,6 +21,3 @@ pub use host::RuntimeHostBuilder; pub use host_exports::HostExports; pub use mapping::{MappingContext, ValidModule}; pub use module::{ExperimentalFeatures, WasmInstance}; - -#[cfg(debug_assertions)] -pub use module::TRAP_TIMEOUT; diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 85aa8c151aa..ee17e5b5533 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -14,7 +14,7 @@ use graph::data::value::Word; use graph::slog::SendSyncRefUnwindSafeKV; use never::Never; use semver::Version; -use wasmtime::{Memory, Trap}; +use wasmtime::{Memory, Trap, TrapCode}; use graph::blockchain::{Blockchain, HostFnCtx}; use graph::data::store; @@ -43,8 +43,6 @@ use crate::mapping::ValidModule; mod into_wasm_ret; pub mod stopwatch; -pub const TRAP_TIMEOUT: &str = "trap: interrupt"; - // Convenience for a 'top-level' asc_get, with depth 0. fn asc_get( heap: &H, @@ -266,8 +264,14 @@ impl WasmInstance { return Err(MappingError::PossibleReorg(trap.into())); } - // Treat as a special case to have a better error message. - Err(trap) if trap.to_string().contains(TRAP_TIMEOUT) => { + // Treat timeouts anywhere in the error chain as a special case to have a better error + // message. Any `TrapCode::Interrupt` is assumed to be a timeout. + Err(trap) + if Error::from(trap.clone()).chain().any(|e| { + e.downcast_ref::().and_then(|t| t.trap_code()) + == Some(TrapCode::Interrupt) + }) => + { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(Error::from(trap).context(format!( "Handler '{}' hit the timeout of '{}' seconds", @@ -280,7 +284,7 @@ impl WasmInstance { is_trap_deterministic(&trap) || self.instance_ctx().deterministic_host_trap; let e = Error::from(trap); match trap_is_deterministic { - true => Some(Error::from(e)), + true => Some(e), false => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(e)); From 17f94a3c74287173c0e61b91ee51320e3918b266 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 11:10:40 +0100 Subject: [PATCH 0459/2104] build(deps): bump indexmap from 2.0.0 to 2.0.2 (#4907) Bumps [indexmap](https://github.com/bluss/indexmap) from 2.0.0 to 2.0.2. - [Changelog](https://github.com/bluss/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/bluss/indexmap/compare/2.0.0...2.0.2) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62019be9ed4..261cbe93b6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1832,7 +1832,7 @@ dependencies = [ "graph", "graphql-parser", "graphql-tools", - "indexmap 2.0.0", + "indexmap 2.0.2", "lazy_static", "parking_lot 0.12.1", "stable-hash 0.3.4", @@ -2109,9 +2109,9 @@ checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" [[package]] name = "hdrhistogram" @@ -2443,12 +2443,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "serde", ] @@ -3234,7 +3234,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.0", + "indexmap 2.0.2", ] [[package]] @@ -4091,7 +4091,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_json", "time", @@ -4908,7 +4908,7 @@ version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", From 0679b32b2aacfbde257686864b7c81bdd0978238 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 11:10:58 +0100 Subject: [PATCH 0460/2104] build(deps): bump atomic_refcell from 0.1.11 to 0.1.12 (#4906) Bumps [atomic_refcell](https://github.com/bholley/atomic_refcell) from 0.1.11 to 0.1.12. - [Commits](https://github.com/bholley/atomic_refcell/commits) --- updated-dependencies: - dependency-name: atomic_refcell dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- core/Cargo.toml | 2 +- graph/Cargo.toml | 2 +- runtime/wasm/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 261cbe93b6c..b64ad08daee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "atomic_refcell" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112ef6b3f6cb3cb6fc5b6b494ef7a848492cff1ab0ef4de10b0f7d572861c905" +checksum = "76f2bfe491d41d45507b8431da8274f7feeca64a49e86d980eed2937ec2ff020" [[package]] name = "atty" diff --git a/core/Cargo.toml b/core/Cargo.toml index a5dc1b03150..a2be5a6e33e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.11" +atomic_refcell = "0.1.12" async-stream = "0.3" bytes = "1.0" futures01 = { package = "futures", version = "0.1.31" } diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 60d7a31f251..20c2fb5275e 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true anyhow = "1.0" async-trait = "0.1.50" async-stream = "0.3" -atomic_refcell = "0.1.11" +atomic_refcell = "0.1.12" bigdecimal = { version = "0.1.0", features = ["serde"] } bytes = "1.0.1" cid = "0.10.1" diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index 928832c198b..7fc0fb3dc02 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.11" +atomic_refcell = "0.1.12" ethabi = "17.2" futures = "0.1.21" hex = "0.4.3" From 19fd41bb48511f889dc94f5d82e16cd492f29da1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 11:11:15 +0100 Subject: [PATCH 0461/2104] build(deps): bump webpki from 0.22.1 to 0.22.2 (#4905) Bumps [webpki](https://github.com/briansmith/webpki) from 0.22.1 to 0.22.2. - [Commits](https://github.com/briansmith/webpki/commits) --- updated-dependencies: - dependency-name: webpki dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b64ad08daee..e8c618a798c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5689,9 +5689,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", From 2db455e655c454433cbaf69f423c3db0a44fbd16 Mon Sep 17 00:00:00 2001 From: Krishnanand V P <44740264+incrypto32@users.noreply.github.com> Date: Tue, 24 Oct 2023 17:54:30 +0530 Subject: [PATCH 0462/2104] Allow querying subgraphs with more than one '/' in their name (#4926) * server: Remove the limit of subgraph name structure * server: Remove `network` endpoint from server --- server/http/src/service.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/server/http/src/service.rs b/server/http/src/service.rs index f32d67cf0e4..d589ddbcd47 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -378,21 +378,14 @@ where (Method::POST, &["subgraphs", "name", subgraph_name]) => self .handle_graphql_query_by_name(subgraph_name.to_owned(), req) .boxed(), - (Method::POST, ["subgraphs", "name", subgraph_name_part1, subgraph_name_part2]) => { - let subgraph_name = format!("{}/{}", subgraph_name_part1, subgraph_name_part2); - self.handle_graphql_query_by_name(subgraph_name, req) - .boxed() - } - (Method::POST, ["subgraphs", "network", subgraph_name_part1, subgraph_name_part2]) => { - let subgraph_name = - format!("network/{}/{}", subgraph_name_part1, subgraph_name_part2); + (Method::POST, ["subgraphs", "name", ..]) => { + let subgraph_name = path_segments[2..].join("/"); self.handle_graphql_query_by_name(subgraph_name, req) .boxed() } (Method::OPTIONS, ["subgraphs", "name", _]) - | (Method::OPTIONS, ["subgraphs", "name", _, _]) - | (Method::OPTIONS, ["subgraphs", "network", _, _]) => self.handle_graphql_options(req), + | (Method::OPTIONS, ["subgraphs", "name", _, _]) => self.handle_graphql_options(req), _ => self.handle_not_found(), } From 886af97d9f5235e4067e7c7e02834116dcd8a28b Mon Sep 17 00:00:00 2001 From: Krishnanand V P <44740264+incrypto32@users.noreply.github.com> Date: Tue, 24 Oct 2023 21:22:13 +0530 Subject: [PATCH 0463/2104] Graphman deploy command (#4930) * graphman: Add deploy command * node/manager: add create flag to graphman deploy --- node/src/bin/manager.rs | 26 +++++++ node/src/manager/commands/deploy.rs | 105 ++++++++++++++++++++++++++++ node/src/manager/commands/mod.rs | 1 + 3 files changed, 132 insertions(+) create mode 100644 node/src/manager/commands/deploy.rs diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 77e78d45c28..28741b72d97 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -344,6 +344,20 @@ pub enum Command { #[clap(long, short)] force: bool, }, + + // Deploy a subgraph + Deploy { + name: DeploymentSearch, + deployment: DeploymentSearch, + + /// The url of the graph-node + #[clap(long, short, default_value = "http://localhost:8020")] + url: String, + + /// Create the subgraph name if it does not exist + #[clap(long, short)] + create: bool, + }, } impl Command { @@ -1513,6 +1527,18 @@ async fn main() -> anyhow::Result<()> { ) .await } + + Deploy { + deployment, + name, + url, + create, + } => { + let store = ctx.store(); + let subgraph_store = store.subgraph_store(); + + commands::deploy::run(subgraph_store, deployment, name, url, create).await + } } } diff --git a/node/src/manager/commands/deploy.rs b/node/src/manager/commands/deploy.rs new file mode 100644 index 00000000000..5fa187615a5 --- /dev/null +++ b/node/src/manager/commands/deploy.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use graph::prelude::{ + anyhow::{anyhow, bail, Result}, + reqwest, + serde_json::{json, Value}, + SubgraphName, SubgraphStore, +}; + +use crate::manager::deployment::DeploymentSearch; + +// Function to send an RPC request and handle errors +async fn send_rpc_request(url: &str, payload: Value) -> Result<()> { + let client = reqwest::Client::new(); + let response = client.post(url).json(&payload).send().await?; + + if response.status().is_success() { + Ok(()) + } else { + Err(response + .error_for_status() + .expect_err("Failed to parse error response") + .into()) + } +} + +// Function to send subgraph_create request +async fn send_create_request(name: &str, url: &str) -> Result<()> { + // Construct the JSON payload for subgraph_create + let create_payload = json!({ + "jsonrpc": "2.0", + "method": "subgraph_create", + "params": { + "name": name, + }, + "id": "1" + }); + + // Send the subgraph_create request + send_rpc_request(url, create_payload) + .await + .map_err(|e| e.context(format!("Failed to create subgraph with name `{}`", name))) +} + +// Function to send subgraph_deploy request +async fn send_deploy_request(name: &str, deployment: &str, url: &str) -> Result<()> { + // Construct the JSON payload for subgraph_deploy + let deploy_payload = json!({ + "jsonrpc": "2.0", + "method": "subgraph_deploy", + "params": { + "name": name, + "ipfs_hash": deployment, + }, + "id": "1" + }); + + // Send the subgraph_deploy request + send_rpc_request(url, deploy_payload).await.map_err(|e| { + e.context(format!( + "Failed to deploy subgraph `{}` to `{}`", + deployment, name + )) + }) +} +pub async fn run( + subgraph_store: Arc, + deployment: DeploymentSearch, + search: DeploymentSearch, + url: String, + create: bool, +) -> Result<()> { + let hash = match deployment { + DeploymentSearch::Hash { hash, shard: _ } => hash, + _ => bail!("The `deployment` argument must be a valid IPFS hash"), + }; + + let name = match search { + DeploymentSearch::Name { name } => name, + _ => bail!("The `name` must be a valid subgraph name"), + }; + + if create { + println!("Creating subgraph `{}`", name); + let subgraph_name = + SubgraphName::new(name.clone()).map_err(|_| anyhow!("Invalid subgraph name"))?; + + let exists = subgraph_store.subgraph_exists(&subgraph_name)?; + + if exists { + bail!("Subgraph with name `{}` already exists", name); + } + + // Send the subgraph_create request + send_create_request(&name, &url).await?; + println!("Subgraph `{}` created", name); + } + + // Send the subgraph_deploy request + println!("Deploying subgraph `{}` to `{}`", hash, name); + send_deploy_request(&name, &hash, &url).await?; + println!("Subgraph `{}` deployed to `{}`", name, url); + + Ok(()) +} diff --git a/node/src/manager/commands/mod.rs b/node/src/manager/commands/mod.rs index de7267da828..14fd7632d59 100644 --- a/node/src/manager/commands/mod.rs +++ b/node/src/manager/commands/mod.rs @@ -5,6 +5,7 @@ pub mod config; pub mod copy; pub mod create; pub mod database; +pub mod deploy; pub mod drop; pub mod index; pub mod info; From e9ce0df364695e8c7f64846c16a94ee276c94401 Mon Sep 17 00:00:00 2001 From: Saihajpreet Singh Date: Tue, 24 Oct 2023 12:43:14 -0500 Subject: [PATCH 0464/2104] feat(graphql): update`graphiql` to `v2` (#4677) * graphql: update graphiql to v2 * update to latest version * fix logo --- server/http/assets/index.html | 87053 +------------------------------- 1 file changed, 21 insertions(+), 87032 deletions(-) diff --git a/server/http/assets/index.html b/server/http/assets/index.html index 7b049eaa2b7..0039849933c 100644 --- a/server/http/assets/index.html +++ b/server/http/assets/index.html @@ -3,87049 +3,38 @@ The GraphiQL - + +
- - - - + + From b0d7ca8bf08c5274d0a2ec45f6286801c82b68e9 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Tue, 24 Oct 2023 13:51:44 -0700 Subject: [PATCH 0465/2104] graph, store: Do not create GIN indexes on array attributes --- graph/src/env/store.rs | 6 +++++ store/postgres/src/relational/ddl.rs | 30 ++++++++++++---------- store/postgres/src/relational/ddl_tests.rs | 8 ------ 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 48150df9f4c..1548a1f1ba6 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -109,6 +109,9 @@ pub struct EnvVarsStore { /// is 10_000 which corresponds to 10MB. Setting this to 0 disables /// write batching. pub write_batch_size: usize, + /// Whether to create GIN indexes for array attributes. Set by + /// `GRAPH_STORE_CREATE_GIN_INDEXES`. The default is `false` + pub create_gin_indexes: bool, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -150,6 +153,7 @@ impl From for EnvVarsStore { history_slack_factor: x.history_slack_factor.0, write_batch_duration: Duration::from_secs(x.write_batch_duration_in_secs), write_batch_size: x.write_batch_size * 1_000, + create_gin_indexes: x.create_gin_indexes, } } } @@ -203,6 +207,8 @@ pub struct InnerStore { write_batch_duration_in_secs: u64, #[envconfig(from = "GRAPH_STORE_WRITE_BATCH_SIZE", default = "10000")] write_batch_size: usize, + #[envconfig(from = "GRAPH_STORE_CREATE_GIN_INDEXES", default = "false")] + create_gin_indexes: bool, } #[derive(Clone, Copy, Debug)] diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index 8c6a2ceab90..a63dd525a31 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -3,7 +3,7 @@ use std::{ iter, }; -use graph::prelude::BLOCK_NUMBER_MAX; +use graph::prelude::{BLOCK_NUMBER_MAX, ENV_VARS}; use crate::block_range::CAUSALITY_REGION_COLUMN; use crate::relational::{ @@ -225,7 +225,7 @@ impl Table { .filter(not_immutable_pk) .filter(not_numeric_list); - for (i, column) in columns.enumerate() { + for (column_index, column) in columns.enumerate() { let (method, index_expr) = if column.is_reference() && !column.is_list() { // For foreign keys, index the key together with the block range // since we almost always also have a block_range clause in @@ -268,17 +268,21 @@ impl Table { (method, index_expr) }; - write!( - out, - "create index attr_{table_index}_{column_index}_{table_name}_{column_name}\n on {qname} using {method}({index_expr});\n", - table_index = self.position, - table_name = self.name, - column_index = i, - column_name = column.name, - qname = self.qualified_name, - method = method, - index_expr = index_expr, - )?; + // If `create_gin_indexes` is set to false, we don't create + // indexes on array attributes. Experience has shown that these + // indexes are very expensive to update and can have a very bad + // impact on the write performance of the database, but are + // hardly ever used or needed by queries. + if !column.is_list() || ENV_VARS.store.create_gin_indexes { + write!( + out, + "create index attr_{table_index}_{column_index}_{table_name}_{column_name}\n on {qname} using {method}({index_expr});\n", + table_index = self.position, + table_name = self.name, + column_name = column.name, + qname = self.qualified_name, + )?; + } } writeln!(out) } diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index b3e08c42468..03efa93fae2 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -346,8 +346,6 @@ create index attr_0_1_musician_name on "sgd0815"."musician" using btree(left("name", 256)); create index attr_0_2_musician_main_band on "sgd0815"."musician" using gist("main_band", block_range); -create index attr_0_3_musician_bands - on "sgd0815"."musician" using gin("bands"); create table "sgd0815"."band" ( vid bigserial primary key, @@ -368,8 +366,6 @@ create index attr_1_0_band_id on "sgd0815"."band" using btree("id"); create index attr_1_1_band_name on "sgd0815"."band" using btree(left("name", 256)); -create index attr_1_2_band_original_songs - on "sgd0815"."band" using gin("original_songs"); create table "sgd0815"."song" ( vid bigserial primary key, @@ -484,8 +480,6 @@ create index attr_2_0_habitat_id on "sgd0815"."habitat" using btree("id"); create index attr_2_1_habitat_most_common on "sgd0815"."habitat" using gist("most_common", block_range); -create index attr_2_2_habitat_dwellers - on "sgd0815"."habitat" using gin("dwellers"); "#; const FULLTEXT_GQL: &str = r#" @@ -583,8 +577,6 @@ create index attr_2_0_habitat_id on "sgd0815"."habitat" using btree("id"); create index attr_2_1_habitat_most_common on "sgd0815"."habitat" using gist("most_common", block_range); -create index attr_2_2_habitat_dwellers - on "sgd0815"."habitat" using gin("dwellers"); "#; From 246cde635e9d319966be297d63bc324f568fdb37 Mon Sep 17 00:00:00 2001 From: Krishnanand V P <44740264+incrypto32@users.noreply.github.com> Date: Wed, 25 Oct 2023 18:16:23 +0530 Subject: [PATCH 0466/2104] v0.33.0 (#4886) * cargo: update workspace crates' version to v0.33.0 * update NEWS.md for v0.33.0 * Add new API Version to validate when setting fields not defined in the schema (#4894) * build(deps): bump chrono from 0.4.26 to 0.4.31 (#4876) Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.26 to 0.4.31. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.26...v0.4.31) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump webpki from 0.22.0 to 0.22.1 (#4857) Bumps [webpki](https://github.com/briansmith/webpki) from 0.22.0 to 0.22.1. - [Commits](https://github.com/briansmith/webpki/commits) --- updated-dependencies: - dependency-name: webpki dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * runtime: only include valid fields in entity for store_set * graph, runtime: add new apiVersion to validate fields not defined in the schema * graph: update tests for setting invalid field * tests: add runner tests for undefined field setting validation in apiVersion 0.0.8 * graph: add check_invalid_fields method to HostExports --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * update NEWS.md * tests: add .gitignore for api-version test --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 48 ++++----- Cargo.toml | 2 +- NEWS.md | 43 ++++++++ graph/src/data/store/mod.rs | 11 -- graph/src/data/subgraph/api_version.rs | 3 + graph/src/env/mappings.rs | 4 +- graph/src/schema/entity_type.rs | 2 +- graph/src/schema/input_schema.rs | 8 ++ runtime/test/src/test.rs | 50 +++++++-- runtime/wasm/src/host_exports.rs | 61 ++++++++++- tests/runner-tests/api-version/.gitignore | 1 + .../api-version/abis/Contract.abi | 15 +++ .../runner-tests/api-version/data.0.0.7.json | 3 + .../runner-tests/api-version/data.0.0.8.json | 3 + tests/runner-tests/api-version/package.json | 29 +++++ tests/runner-tests/api-version/schema.graphql | 4 + tests/runner-tests/api-version/src/mapping.ts | 15 +++ .../api-version/subgraph.template.yaml | 23 ++++ tests/runner-tests/api-version/subgraph.yaml | 23 ++++ tests/tests/runner_tests.rs | 100 +++++++++++++++++- 20 files changed, 396 insertions(+), 52 deletions(-) create mode 100644 tests/runner-tests/api-version/.gitignore create mode 100644 tests/runner-tests/api-version/abis/Contract.abi create mode 100644 tests/runner-tests/api-version/data.0.0.7.json create mode 100644 tests/runner-tests/api-version/data.0.0.8.json create mode 100644 tests/runner-tests/api-version/package.json create mode 100644 tests/runner-tests/api-version/schema.graphql create mode 100644 tests/runner-tests/api-version/src/mapping.ts create mode 100644 tests/runner-tests/api-version/subgraph.template.yaml create mode 100644 tests/runner-tests/api-version/subgraph.yaml diff --git a/Cargo.lock b/Cargo.lock index e8c618a798c..fe26cb85071 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1594,7 +1594,7 @@ dependencies = [ [[package]] name = "graph" -version = "0.32.0" +version = "0.33.0" dependencies = [ "Inflector", "anyhow", @@ -1662,7 +1662,7 @@ dependencies = [ [[package]] name = "graph-chain-arweave" -version = "0.32.0" +version = "0.33.0" dependencies = [ "base64-url", "diesel", @@ -1678,7 +1678,7 @@ dependencies = [ [[package]] name = "graph-chain-common" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "heck", @@ -1688,7 +1688,7 @@ dependencies = [ [[package]] name = "graph-chain-cosmos" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "graph", @@ -1704,7 +1704,7 @@ dependencies = [ [[package]] name = "graph-chain-ethereum" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "base64 0.20.0", @@ -1730,7 +1730,7 @@ dependencies = [ [[package]] name = "graph-chain-near" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "base64 0.20.0", @@ -1747,7 +1747,7 @@ dependencies = [ [[package]] name = "graph-chain-starknet" -version = "0.32.0" +version = "0.33.0" dependencies = [ "graph", "graph-runtime-derive", @@ -1763,7 +1763,7 @@ dependencies = [ [[package]] name = "graph-chain-substreams" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "async-stream", @@ -1789,7 +1789,7 @@ dependencies = [ [[package]] name = "graph-core" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "async-stream", @@ -1822,7 +1822,7 @@ dependencies = [ [[package]] name = "graph-graphql" -version = "0.32.0" +version = "0.33.0" dependencies = [ "Inflector", "anyhow", @@ -1841,7 +1841,7 @@ dependencies = [ [[package]] name = "graph-node" -version = "0.32.0" +version = "0.33.0" dependencies = [ "clap", "diesel", @@ -1877,7 +1877,7 @@ dependencies = [ [[package]] name = "graph-runtime-derive" -version = "0.32.0" +version = "0.33.0" dependencies = [ "heck", "proc-macro2", @@ -1887,7 +1887,7 @@ dependencies = [ [[package]] name = "graph-runtime-test" -version = "0.32.0" +version = "0.33.0" dependencies = [ "graph", "graph-chain-ethereum", @@ -1902,7 +1902,7 @@ dependencies = [ [[package]] name = "graph-runtime-wasm" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "async-trait", @@ -1928,7 +1928,7 @@ dependencies = [ [[package]] name = "graph-server-http" -version = "0.32.0" +version = "0.33.0" dependencies = [ "futures 0.1.31", "graph", @@ -1942,7 +1942,7 @@ dependencies = [ [[package]] name = "graph-server-index-node" -version = "0.32.0" +version = "0.33.0" dependencies = [ "blake3 1.5.0", "either", @@ -1964,7 +1964,7 @@ dependencies = [ [[package]] name = "graph-server-json-rpc" -version = "0.32.0" +version = "0.33.0" dependencies = [ "graph", "jsonrpsee", @@ -1973,7 +1973,7 @@ dependencies = [ [[package]] name = "graph-server-metrics" -version = "0.32.0" +version = "0.33.0" dependencies = [ "graph", "hyper", @@ -1981,7 +1981,7 @@ dependencies = [ [[package]] name = "graph-server-websocket" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "futures 0.1.31", @@ -1997,7 +1997,7 @@ dependencies = [ [[package]] name = "graph-store-postgres" -version = "0.32.0" +version = "0.33.0" dependencies = [ "Inflector", "anyhow", @@ -2032,7 +2032,7 @@ dependencies = [ [[package]] name = "graph-tests" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "assert-json-diff", @@ -4464,7 +4464,7 @@ dependencies = [ [[package]] name = "substreams-trigger-filter" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", "hex", @@ -4586,7 +4586,7 @@ dependencies = [ [[package]] name = "test-store" -version = "0.32.0" +version = "0.33.0" dependencies = [ "diesel", "graph", @@ -5102,7 +5102,7 @@ dependencies = [ [[package]] name = "trigger-filters" -version = "0.32.0" +version = "0.33.0" dependencies = [ "anyhow", ] diff --git a/Cargo.toml b/Cargo.toml index 6d6142b5e83..1b49e8b2194 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ members = [ ] [workspace.package] -version = "0.32.0" +version = "0.33.0" edition = "2021" authors = ["The Graph core developers & contributors"] readme = "README.md" diff --git a/NEWS.md b/NEWS.md index 31f7c0bbf87..98fcc2a4b6d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,6 +2,49 @@ ## Unreleased +## v0.33.0 + +### What's New + +- **Arweave file data sources** - Arweave file data sources allow subgraph developers to access offchain data from Arweave from within the subgraph mappings.[(#4789)](https://github.com/graphprotocol/graph-node/pull/4789) +- **Major performance boost for substreams-based subgraphs** - Significant performance improvements have been achieved for substreams-based subgraphs by moving substreams processing to the block stream.[(#4851)](https://github.com/graphprotocol/graph-node/pull/4851) +- **Polling block handler** - A new block handler filter `polling` for `ethereum` data sources which enables subgraph developers to run a block handler at defined block intervals. This is useful for use cases such as taking periodic snapshots of the contract state.[(#4725)](https://github.com/graphprotocol/graph-node/pull/4725) +- **Initialization handler** - A new block handler filter `once` for `ethereum` data sources which enables subgraph developers to create a handler which will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. [(#4725)](https://github.com/graphprotocol/graph-node/pull/4725) +- **DataSourceContext in manifest** - `DataSourceContext` in Manifest - DataSourceContext can now be defined in the subgraph manifest. It's a free-form map accessible from the mapping. This feature is useful for templating chain-specific data in subgraphs that use the same codebase across multiple chains.[(#4848)](https://github.com/graphprotocol/graph-node/pull/4848) +- `graph-node` version in index node API - The Index Node API now features a new query, Version, which can be used to query the current graph-node version and commit. [(#4852)](https://github.com/graphprotocol/graph-node/pull/4852) +- Added a '`paused`' field to Index Node API, a boolean indicating the subgraph’s pause status. [(#4779)](https://github.com/graphprotocol/graph-node/pull/4779) +- Proof of Indexing logs now include block number [(#4798)](https://github.com/graphprotocol/graph-node/pull/4798) +- `subgraph_features` table now tracks details about handlers used in a subgraph [(#4820)](https://github.com/graphprotocol/graph-node/pull/4820) +- Configurable SSL for Postgres in Dockerfile - ssl-mode for Postgres can now be configured via the connection string when deploying through Docker, offering enhanced flexibility in database security settings.[(#4840)](https://github.com/graphprotocol/graph-node/pull/4840) +- Introspection Schema Update - The introspection schema has been updated to align with the October 2021 GraphQL specification update.[(#4676)](https://github.com/graphprotocol/graph-node/pull/4676) +- `trace_id` Added to Substreams Logger [(#4868)](https://github.com/graphprotocol/graph-node/pull/4868) +- New apiVersion for Mapping Validation - The latest apiVersion 0.0.8 validates that fields set in entities from the mappings are actually defined in the schema. This fixes a source of non-deterministic PoI. Subgraphs using this new API version will fail if they try to set undefined schema fields in the mappings. Its strongly recommended updating to 0.0.8 to avoid these issues. [(#4894)](https://github.com/graphprotocol/graph-node/pull/4894) +- Substreams Block Ingestor Support - Added the ability to run a pure substreams chain by introducing a block ingestor for substreams-only chains. This feature allows users to run a chain with just a single substreams endpoint, enhancing support beyond RPC and firehose. Prior to this, a pure substreams chain couldn’t be synced.[(#4839)](https://github.com/graphprotocol/graph-node/pull/4839) + +### Bug fixes + +- Fix for rewinding dynamic data source - Resolved an issue where a rewind would fail to properly remove dynamic data sources when using `graphman rewind`. This has been fixed to ensure correct behavior.[(#4810)](https://github.com/graphprotocol/graph-node/pull/4810) +- Improved Deployment Reliability with Retry Mechanism - A retry feature has been added to the block_pointer_from_number function to enhance the robustness of subgraph deployments. This resolves occasional failures encountered during deployment processes.[(#4812)](https://github.com/graphprotocol/graph-node/pull/4812) +- Fixed Cross-Shard Grafting Issue - Addressed a bug that prevented cross-shard grafting from starting, causing the copy operation to stall at 0% progress. This issue occurred when a new shard was added after the primary shard had already been configured. The fix ensures that foreign tables and schemas are correctly set up in new shards. For existing installations experiencing this issue, it can be resolved by running `graphman database remap`.[(#4845)](https://github.com/graphprotocol/graph-node/pull/4845) +- Fixed a Full-text search regression - Reverted a previous commit (ad1c6ea) that inadvertently limited the number of populated search indexes per entity.[(#4808)](https://github.com/graphprotocol/graph-node/pull/4808) +- Attestable Error for Nested Child Filters - Nested child filter queries now return an attestable `ChildFilterNestingNotSupportedError`, improving error reporting for users.[(#4828)](https://github.com/graphprotocol/graph-node/pull/4828) + +### Graphman +- **Index on prefixed fields** - The graphman index create command now correctly indexes prefixed fields of type String and Bytes for more query-efficient combined indexes. Note: For fields that are references to entities, the behavior may differ. The command may create an index using left(..) when it should index the column directly. +- **Partial Indexing for Recent Blocks** - The graphman index create command now includes a `--after $recent_block` flag for creating partial indexes focused on recent blocks. This enhances query performance similar to the effects of pruning. Queries using these partial indexes must include a specific clause for optimal performance.[(#4830)](https://github.com/graphprotocol/graph-node/pull/4830) + + + +**Full Changelog**: https://github.com/graphprotocol/graph-node/compare/v0.33.0...e253ee14cda2d8456a86ae8f4e3f74a1a7979953 + ## v0.32.0 ### What's New diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 395898b37dd..5ffffd7edcd 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -657,9 +657,6 @@ impl>> TryIntoEntityIterator< #[derive(Debug, Error, PartialEq, Eq, Clone)] pub enum EntityValidationError { - #[error("The provided entity has fields not defined in the schema for entity `{entity}`")] - FieldsNotDefined { entity: String }, - #[error("Entity {entity}[{id}]: unknown entity type `{entity}`")] UnknownEntityType { entity: String, id: String }, @@ -918,14 +915,6 @@ impl Entity { } })?; - for field in self.0.atoms() { - if !key.entity_type.has_field(field) { - return Err(EntityValidationError::FieldsNotDefined { - entity: key.entity_type.to_string(), - }); - } - } - for field in &object_type.fields { let is_derived = field.is_derived(); match (self.get(&field.name), is_derived) { diff --git a/graph/src/data/subgraph/api_version.rs b/graph/src/data/subgraph/api_version.rs index f4a62b512dd..5e642719d95 100644 --- a/graph/src/data/subgraph/api_version.rs +++ b/graph/src/data/subgraph/api_version.rs @@ -15,6 +15,9 @@ pub const API_VERSION_0_0_6: Version = Version::new(0, 0, 6); /// Enables event handlers to require transaction receipts in the runtime. pub const API_VERSION_0_0_7: Version = Version::new(0, 0, 7); +/// Enables validation for fields that doesnt exist in the schema for an entity. +pub const API_VERSION_0_0_8: Version = Version::new(0, 0, 8); + /// Before this check was introduced, there were already subgraphs in the wild with spec version /// 0.0.3, due to confusion with the api version. To avoid breaking those, we accept 0.0.3 though it /// doesn't exist. diff --git a/graph/src/env/mappings.rs b/graph/src/env/mappings.rs index 25c224bb229..6f7e5022ab3 100644 --- a/graph/src/env/mappings.rs +++ b/graph/src/env/mappings.rs @@ -16,7 +16,7 @@ pub struct EnvVarsMapping { /// kilobytes). The default value is 10 megabytes. pub entity_cache_size: usize, /// Set by the environment variable `GRAPH_MAX_API_VERSION`. The default - /// value is `0.0.7`. + /// value is `0.0.8`. pub max_api_version: Version, /// Set by the environment variable `GRAPH_MAPPING_HANDLER_TIMEOUT` /// (expressed in seconds). No default is provided. @@ -93,7 +93,7 @@ pub struct InnerMappingHandlers { entity_cache_dead_weight: EnvVarBoolean, #[envconfig(from = "GRAPH_ENTITY_CACHE_SIZE", default = "10000")] entity_cache_size_in_kb: usize, - #[envconfig(from = "GRAPH_MAX_API_VERSION", default = "0.0.7")] + #[envconfig(from = "GRAPH_MAX_API_VERSION", default = "0.0.8")] max_api_version: Version, #[envconfig(from = "GRAPH_MAPPING_HANDLER_TIMEOUT")] mapping_handler_timeout_in_secs: Option, diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index b57f0cf757e..4fa597fe48d 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -23,7 +23,7 @@ use super::{input_schema::POI_OBJECT, EntityKey, InputSchema}; #[derive(Clone)] pub struct EntityType { schema: InputSchema, - atom: Atom, + pub atom: Atom, } impl EntityType { diff --git a/graph/src/schema/input_schema.rs b/graph/src/schema/input_schema.rs index c4ffe1b3fda..2e275dfb0c4 100644 --- a/graph/src/schema/input_schema.rs +++ b/graph/src/schema/input_schema.rs @@ -467,6 +467,14 @@ impl InputSchema { ) }) } + pub fn has_field_with_name(&self, entity_type: &EntityType, field: &str) -> bool { + let field = self.inner.pool.lookup(field); + + match field { + Some(field) => self.has_field(entity_type.atom, field), + None => false, + } + } } /// Create a new pool that contains the names of all the types defined diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 984a298fefc..b2574aebe1f 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1226,8 +1226,13 @@ struct Host { } impl Host { - async fn new(schema: &str, deployment_hash: &str, wasm_file: &str) -> Host { - let version = ENV_VARS.mappings.max_api_version.clone(); + async fn new( + schema: &str, + deployment_hash: &str, + wasm_file: &str, + api_version: Option, + ) -> Host { + let version = api_version.unwrap_or(ENV_VARS.mappings.max_api_version.clone()); let wasm_file = wasm_file_path(wasm_file, API_VERSION_0_0_5); let ds = mock_data_source(&wasm_file, version.clone()); @@ -1325,7 +1330,7 @@ async fn test_store_set_id() { name: String, }"; - let mut host = Host::new(schema, "hostStoreSetId", "boolean.wasm").await; + let mut host = Host::new(schema, "hostStoreSetId", "boolean.wasm", None).await; host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) .expect("setting with same id works"); @@ -1428,7 +1433,13 @@ async fn test_store_set_invalid_fields() { test2: String }"; - let mut host = Host::new(schema, "hostStoreSetInvalidFields", "boolean.wasm").await; + let mut host = Host::new( + schema, + "hostStoreSetInvalidFields", + "boolean.wasm", + Some(API_VERSION_0_0_8), + ) + .await; host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) .unwrap(); @@ -1451,8 +1462,7 @@ async fn test_store_set_invalid_fields() { // So we just check the string contains them let err_string = err.to_string(); dbg!(err_string.as_str()); - assert!(err_string - .contains("The provided entity has fields not defined in the schema for entity `User`")); + assert!(err_string.contains("Attempted to set undefined fields [test, test2] for the entity type `User`. Make sure those fields are defined in the schema.")); let err = host .store_set( @@ -1463,8 +1473,30 @@ async fn test_store_set_invalid_fields() { .err() .unwrap(); - err_says( - err, - "Unknown key `test3`. It probably is not part of the schema", + err_says(err, "Attempted to set undefined fields [test3] for the entity type `User`. Make sure those fields are defined in the schema."); + + // For apiVersion below 0.0.8, we should not error out + let mut host2 = Host::new( + schema, + "hostStoreSetInvalidFields", + "boolean.wasm", + Some(API_VERSION_0_0_7), ) + .await; + + let err_is_none = host2 + .store_set( + USER, + UID, + vec![ + ("id", "u1"), + ("name", "user1"), + ("test", "invalid_field"), + ("test2", "invalid_field"), + ], + ) + .err() + .is_none(); + + assert!(err_is_none); } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 7edef87be78..4fa8181b482 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -4,6 +4,7 @@ use std::ops::Deref; use std::str::FromStr; use std::time::{Duration, Instant}; +use graph::data::subgraph::API_VERSION_0_0_8; use graph::data::value::Word; use graph::schema::EntityType; @@ -151,6 +152,54 @@ impl HostExports { ))) } + fn check_invalid_fields( + &self, + api_version: Version, + data: &HashMap, + state: &BlockState, + entity_type: &EntityType, + ) -> Result<(), HostExportError> { + if api_version >= API_VERSION_0_0_8 { + let has_invalid_fields = data.iter().any(|(field_name, _)| { + !state + .entity_cache + .schema + .has_field_with_name(entity_type, &field_name) + }); + + if has_invalid_fields { + let mut invalid_fields: Vec = data + .iter() + .filter_map(|(field_name, _)| { + if !state + .entity_cache + .schema + .has_field_with_name(entity_type, &field_name) + { + Some(field_name.clone()) + } else { + None + } + }) + .collect(); + + invalid_fields.sort(); + + return Err(HostExportError::Deterministic(anyhow!( + "Attempted to set undefined fields [{}] for the entity type `{}`. Make sure those fields are defined in the schema.", + invalid_fields + .iter() + .map(|f| f.as_str()) + .collect::>() + .join(", "), + entity_type + ))); + } + } + + Ok(()) + } + pub(crate) fn store_set( &self, logger: &Logger, @@ -198,9 +247,19 @@ impl HostExports { } } + self.check_invalid_fields(self.api_version.clone(), &data, state, &key.entity_type)?; + + // Filter out fields that are not in the schema + let filtered_entity_data = data.into_iter().filter(|(field_name, _)| { + state + .entity_cache + .schema + .has_field_with_name(&key.entity_type, field_name) + }); + let entity = state .entity_cache - .make_entity(data.into_iter().map(|(key, value)| (key, value))) + .make_entity(filtered_entity_data) .map_err(|e| HostExportError::Deterministic(anyhow!(e)))?; let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); diff --git a/tests/runner-tests/api-version/.gitignore b/tests/runner-tests/api-version/.gitignore new file mode 100644 index 00000000000..f09b629321f --- /dev/null +++ b/tests/runner-tests/api-version/.gitignore @@ -0,0 +1 @@ +subgraph.yaml \ No newline at end of file diff --git a/tests/runner-tests/api-version/abis/Contract.abi b/tests/runner-tests/api-version/abis/Contract.abi new file mode 100644 index 00000000000..9d9f56b9263 --- /dev/null +++ b/tests/runner-tests/api-version/abis/Contract.abi @@ -0,0 +1,15 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "testCommand", + "type": "string" + } + ], + "name": "TestEvent", + "type": "event" + } +] diff --git a/tests/runner-tests/api-version/data.0.0.7.json b/tests/runner-tests/api-version/data.0.0.7.json new file mode 100644 index 00000000000..d5496551483 --- /dev/null +++ b/tests/runner-tests/api-version/data.0.0.7.json @@ -0,0 +1,3 @@ +{ + "apiVersion": "0.0.7" +} diff --git a/tests/runner-tests/api-version/data.0.0.8.json b/tests/runner-tests/api-version/data.0.0.8.json new file mode 100644 index 00000000000..f01f6e94057 --- /dev/null +++ b/tests/runner-tests/api-version/data.0.0.8.json @@ -0,0 +1,3 @@ +{ + "apiVersion": "0.0.8" +} diff --git a/tests/runner-tests/api-version/package.json b/tests/runner-tests/api-version/package.json new file mode 100644 index 00000000000..503c7595204 --- /dev/null +++ b/tests/runner-tests/api-version/package.json @@ -0,0 +1,29 @@ +{ + "name": "api-version", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/api-version --node $GRAPH_NODE_ADMIN_URI", + "prepare:0-0-7": "mustache data.0.0.7.json subgraph.template.yaml > subgraph.yaml", + "prepare:0-0-8": "mustache data.0.0.8.json subgraph.template.yaml > subgraph.yaml", + "deploy:test-0-0-7": "yarn prepare:0-0-7 && graph deploy test/api-version-0-0-7 --version-label 0.0.7 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI", + "deploy:test-0-0-8": "yarn prepare:0-0-8 && graph deploy test/api-version-0-0-8 --version-label 0.0.8 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.53.0", + "@graphprotocol/graph-ts": "0.31.0", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "mustache": "^4.2.0", + "truffle": "^5.2" + } +} diff --git a/tests/runner-tests/api-version/schema.graphql b/tests/runner-tests/api-version/schema.graphql new file mode 100644 index 00000000000..32db8d43674 --- /dev/null +++ b/tests/runner-tests/api-version/schema.graphql @@ -0,0 +1,4 @@ +type TestResult @entity { + id: ID! + message: String! +} diff --git a/tests/runner-tests/api-version/src/mapping.ts b/tests/runner-tests/api-version/src/mapping.ts new file mode 100644 index 00000000000..7a50ee868e6 --- /dev/null +++ b/tests/runner-tests/api-version/src/mapping.ts @@ -0,0 +1,15 @@ +import { Entity, Value, store } from "@graphprotocol/graph-ts"; +import { TestEvent } from "../generated/Contract/Contract"; +import { TestResult } from "../generated/schema"; + +export function handleTestEvent(event: TestEvent): void { + let testResult = new TestResult(event.params.testCommand); + testResult.message = event.params.testCommand; + let testResultEntity = testResult as Entity; + testResultEntity.set( + "invalid_field", + Value.fromString("This is an invalid field"), + ); + store.set("TestResult", testResult.id, testResult); + testResult.save(); +} diff --git a/tests/runner-tests/api-version/subgraph.template.yaml b/tests/runner-tests/api-version/subgraph.template.yaml new file mode 100644 index 00000000000..c1429c63b90 --- /dev/null +++ b/tests/runner-tests/api-version/subgraph.template.yaml @@ -0,0 +1,23 @@ +specVersion: 0.0.4 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: Contract + network: test + source: + address: "0x0000000000000000000000000000000000000000" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: {{apiVersion}} + language: wasm/assemblyscript + abis: + - name: Contract + file: ./abis/Contract.abi + entities: + - Call + eventHandlers: + - event: TestEvent(string) + handler: handleTestEvent + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/runner-tests/api-version/subgraph.yaml b/tests/runner-tests/api-version/subgraph.yaml new file mode 100644 index 00000000000..464a10d3f0c --- /dev/null +++ b/tests/runner-tests/api-version/subgraph.yaml @@ -0,0 +1,23 @@ +specVersion: 0.0.4 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: Contract + network: test + source: + address: "0x0000000000000000000000000000000000000000" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.8 + language: wasm/assemblyscript + abis: + - name: Contract + file: ./abis/Contract.abi + entities: + - Call + eventHandlers: + - event: TestEvent(string) + handler: handleTestEvent + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index be12c956929..7411e5f3176 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -41,7 +41,24 @@ impl RunnerTestRecipe { let (stores, hash) = tokio::join!( stores("./runner-tests/config.simple.toml"), - build_subgraph(&test_dir) + build_subgraph(&test_dir, None) + ); + + Self { + stores, + subgraph_name, + hash, + } + } + + /// Builds a new test subgraph with a custom deploy command. + async fn new_with_custom_cmd(subgraph_name: &str, deploy_cmd: &str) -> Self { + let subgraph_name = SubgraphName::new(subgraph_name).unwrap(); + let test_dir = format!("./runner-tests/{}", subgraph_name); + + let (stores, hash) = tokio::join!( + stores("./runner-tests/config.simple.toml"), + build_subgraph(&test_dir, Some(deploy_cmd)) ); Self { @@ -150,6 +167,80 @@ async fn typename() -> anyhow::Result<()> { Ok(()) } +#[tokio::test] +async fn api_version_0_0_7() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new_with_custom_cmd("api-version", "deploy:test-0-0-7").await; + + // Before apiVersion 0.0.8 we allowed setting fields not defined in the schema. + // This test tests that it is still possible for lower apiVersion subgraphs + // to set fields not defined in the schema. + + let blocks = { + let block_0 = genesis(); + let mut block_1 = empty_block(block_0.ptr(), test_ptr(1)); + push_test_log(&mut block_1, "0.0.7"); + vec![block_0, block_1] + }; + + let stop_block = blocks.last().unwrap().block.ptr(); + + let chain = chain(blocks, &stores, None).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + + ctx.start_and_sync_to(stop_block).await; + + let query_res = ctx + .query(&format!(r#"{{ testResults{{ id, message }} }}"#,)) + .await + .unwrap(); + + assert_json_eq!( + query_res, + Some(object! { + testResults: vec![ + object! { id: "0.0.7", message: "0.0.7" }, + ] + }) + ); +} + +#[tokio::test] +async fn api_version_0_0_8() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new_with_custom_cmd("api-version", "deploy:test-0-0-8").await; + + // From apiVersion 0.0.8 we disallow setting fields not defined in the schema. + // This test tests that it is not possible to set fields not defined in the schema. + + let blocks = { + let block_0 = genesis(); + let mut block_1 = empty_block(block_0.ptr(), test_ptr(1)); + push_test_log(&mut block_1, "0.0.8"); + vec![block_0, block_1] + }; + + let chain = chain(blocks.clone(), &stores, None).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + let stop_block = blocks.last().unwrap().block.ptr(); + let err = ctx.start_and_sync_to_error(stop_block.clone()).await; + let message = "transaction 0000000000000000000000000000000000000000000000000000000000000000: Attempted to set undefined fields [invalid_field] for the entity type `TestResult`. Make sure those fields are defined in the schema.\twasm backtrace:\t 0: 0x2ebc - !src/mapping/handleTestEvent\t in handler `handleTestEvent` at block #1 (0000000000000000000000000000000000000000000000000000000000000001)".to_string(); + let expected_err = SubgraphError { + subgraph_id: ctx.deployment.hash.clone(), + message, + block_ptr: Some(stop_block), + handler: None, + deterministic: true, + }; + assert_eq!(err, expected_err); +} + #[tokio::test] async fn derived_loaders() { let RunnerTestRecipe { @@ -954,8 +1045,11 @@ async fn poi_for_deterministically_failed_sg() -> anyhow::Result<()> { Ok(()) } -async fn build_subgraph(dir: &str) -> DeploymentHash { - build_subgraph_with_yarn_cmd(dir, "deploy:test").await + +/// deploy_cmd is the command to run to deploy the subgraph. If it is None, the +/// default `yarn deploy:test` is used. +async fn build_subgraph(dir: &str, deploy_cmd: Option<&str>) -> DeploymentHash { + build_subgraph_with_yarn_cmd(dir, deploy_cmd.unwrap_or("deploy:test")).await } async fn build_subgraph_with_yarn_cmd(dir: &str, yarn_cmd: &str) -> DeploymentHash { From 99c6770255443b59a6a143839457638213429b90 Mon Sep 17 00:00:00 2001 From: incrypto32 Date: Thu, 26 Oct 2023 09:57:22 +0530 Subject: [PATCH 0467/2104] docker: upgrade cloudbuild machineType --- docker/cloudbuild.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/cloudbuild.yaml b/docker/cloudbuild.yaml index 39cf2856e62..0bf800cddad 100644 --- a/docker/cloudbuild.yaml +++ b/docker/cloudbuild.yaml @@ -1,5 +1,5 @@ options: - machineType: "N1_HIGHCPU_32" + machineType: "E2_HIGHCPU_32" timeout: 1800s steps: - name: 'gcr.io/cloud-builders/docker' From 84db1c75c74515a650f51a68972f01adbad176d4 Mon Sep 17 00:00:00 2001 From: Saihajpreet Singh Date: Thu, 26 Oct 2023 12:21:39 -0500 Subject: [PATCH 0468/2104] fix(graphql): change CDN to JS Deliver for GraphiQL (#4941) * fix(graphql): change CDN to JS Deliver for GraphiQL * fix(graphql): add crossorigin prop --- server/http/assets/index.html | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/server/http/assets/index.html b/server/http/assets/index.html index 0039849933c..fe93f9ff0d2 100644 --- a/server/http/assets/index.html +++ b/server/http/assets/index.html @@ -5,21 +5,21 @@ The GraphiQL
- - -