1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
18

            
19
#[allow(deprecated)]
20
use {
21
    container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi},
22
    cumulus_client_cli::CollatorOptions,
23
    cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
24
    cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig},
25
    cumulus_client_service::{prepare_node_config, ParachainHostFunctions},
26
    cumulus_primitives_core::{
27
        relay_chain::well_known_keys as RelayWellKnownKeys, CollectCollationInfo, ParaId,
28
    },
29
    fc_consensus::FrontierBlockImport,
30
    fc_db::DatabaseSource,
31
    fc_rpc_core::types::{FeeHistoryCache, FilterPool},
32
    fc_storage::StorageOverrideHandler,
33
    nimbus_primitives::NimbusId,
34
    node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing},
35
    parity_scale_codec::Encode,
36
    polkadot_parachain_primitives::primitives::HeadData,
37
    polkadot_primitives::UpgradeGoAhead,
38
    sc_consensus::BasicQueue,
39
    sc_executor::WasmExecutor,
40
    sc_service::{Configuration, TFullBackend, TFullClient, TaskManager},
41
    sp_api::ProvideRuntimeApi,
42
    sp_blockchain::HeaderBackend,
43
    sp_consensus_slots::{Slot, SlotDuration},
44
    sp_core::{Pair, H256},
45
    std::{
46
        collections::BTreeMap,
47
        sync::{Arc, Mutex},
48
        time::Duration,
49
    },
50
};
51

            
52
type ParachainExecutor = WasmExecutor<ParachainHostFunctions>;
53
type ParachainClient = TFullClient<Block, RuntimeApi, ParachainExecutor>;
54
type ParachainBackend = TFullBackend<Block>;
55
type ParachainBlockImport = TParachainBlockImport<
56
    Block,
57
    FrontierBlockImport<Block, Arc<ParachainClient>, ParachainClient>,
58
    ParachainBackend,
59
>;
60

            
61
pub struct NodeConfig;
62
impl NodeBuilderConfig for NodeConfig {
63
    type Block = Block;
64
    type RuntimeApi = RuntimeApi;
65
    type ParachainExecutor = ParachainExecutor;
66
}
67

            
68
146
pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf {
69
146
    let config_dir = config
70
146
        .base_path
71
146
        .config_dir(config.chain_spec.id())
72
146
        .join("frontier")
73
146
        .join(path);
74
146

            
75
146
    config_dir
76
146
}
77

            
78
// TODO This is copied from frontier. It should be imported instead after
79
// https://github.com/paritytech/frontier/issues/333 is solved
80
146
pub fn open_frontier_backend<C>(
81
146
    client: Arc<C>,
82
146
    config: &Configuration,
83
146
) -> Result<fc_db::kv::Backend<Block, C>, String>
84
146
where
85
146
    C: sp_blockchain::HeaderBackend<Block>,
86
146
{
87
146
    fc_db::kv::Backend::<Block, _>::new(
88
146
        client,
89
146
        &fc_db::kv::DatabaseSettings {
90
146
            source: match config.database {
91
146
                DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
92
146
                    path: frontier_database_dir(config, "db"),
93
146
                    cache_size: 0,
94
146
                },
95
                DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
96
                    path: frontier_database_dir(config, "paritydb"),
97
                },
98
                DatabaseSource::Auto { .. } => DatabaseSource::Auto {
99
                    rocksdb_path: frontier_database_dir(config, "db"),
100
                    paritydb_path: frontier_database_dir(config, "paritydb"),
101
                    cache_size: 0,
102
                },
103
                _ => {
104
                    return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
105
                }
106
            },
107
        },
108
    )
109
146
}
110

            
111
thread_local!(static TIMESTAMP: std::cell::RefCell<u64> = const { std::cell::RefCell::new(0) });
112

            
113
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
114
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
115
struct MockTimestampInherentDataProvider;
116
#[async_trait::async_trait]
117
impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
118
    async fn provide_inherent_data(
119
        &self,
120
        inherent_data: &mut sp_inherents::InherentData,
121
980
    ) -> Result<(), sp_inherents::Error> {
122
980
        TIMESTAMP.with(|x| {
123
980
            *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION;
124
980
            inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
125
980
        })
126
1960
    }
127

            
128
    async fn try_handle_error(
129
        &self,
130
        _identifier: &sp_inherents::InherentIdentifier,
131
        _error: &[u8],
132
    ) -> Option<Result<(), sp_inherents::Error>> {
133
        // The pallet never reports error.
134
        None
135
    }
136
}
137

            
138
146
pub fn import_queue(
139
146
    parachain_config: &Configuration,
140
146
    node_builder: &NodeBuilder<NodeConfig>,
141
146
) -> (ParachainBlockImport, BasicQueue<Block>) {
142
146
    let frontier_block_import =
143
146
        FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone());
144
146

            
145
146
    // The parachain block import and import queue
146
146
    let block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
147
146
        frontier_block_import,
148
146
        node_builder.backend.clone(),
149
146
    );
150
146
    let import_queue = nimbus_consensus::import_queue(
151
146
        node_builder.client.clone(),
152
146
        block_import.clone(),
153
146
        move |_, _| async move {
154
            let time = sp_timestamp::InherentDataProvider::from_system_time();
155

            
156
            Ok((time,))
157
146
        },
158
146
        &node_builder.task_manager.spawn_essential_handle(),
159
146
        parachain_config.prometheus_registry(),
160
146
        false,
161
146
        false,
162
146
    )
163
146
    .expect("function never fails");
164
146

            
165
146
    (block_import, import_queue)
166
146
}
167

            
168
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
169
///
170
/// This is the actual implementation that is abstract over the executor and the runtime api.
171
#[sc_tracing::logging::prefix_logs_with("Parachain")]
172
async fn start_node_impl(
173
    parachain_config: Configuration,
174
    polkadot_config: Configuration,
175
    collator_options: CollatorOptions,
176
    para_id: ParaId,
177
    rpc_config: crate::cli::RpcConfig,
178
    hwbench: Option<sc_sysinfo::HwBench>,
179
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
180
    let parachain_config = prepare_node_config(parachain_config);
181

            
182
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
183
    let mut node_builder = NodeConfig::new_builder(&parachain_config, hwbench.clone())?;
184

            
185
    // Frontier specific stuff
186
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
187
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
188
    let frontier_backend = fc_db::Backend::KeyValue(
189
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
190
    );
191
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
192
    let fee_history_limit = rpc_config.fee_history_limit;
193

            
194
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
195
        fc_mapping_sync::EthereumBlockNotification<Block>,
196
    > = Default::default();
197
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
198

            
199
    let (_, import_queue) = import_queue(&parachain_config, &node_builder);
200

            
201
    // Relay chain interface
202
    let (relay_chain_interface, _collator_key) = node_builder
203
        .build_relay_chain_interface(&parachain_config, polkadot_config, collator_options.clone())
204
        .await?;
205

            
206
    // Build cumulus network, allowing to access network-related services.
207
    let node_builder = node_builder
208
        .build_cumulus_network::<_, sc_network::NetworkWorker<_, _>>(
209
            &parachain_config,
210
            para_id,
211
            import_queue,
212
            relay_chain_interface.clone(),
213
        )
214
        .await?;
215

            
216
    let frontier_backend = Arc::new(frontier_backend);
217

            
218
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
219
        task_manager: &node_builder.task_manager,
220
        client: node_builder.client.clone(),
221
        substrate_backend: node_builder.backend.clone(),
222
        frontier_backend: frontier_backend.clone(),
223
        filter_pool: filter_pool.clone(),
224
        overrides: overrides.clone(),
225
        fee_history_limit,
226
        fee_history_cache: fee_history_cache.clone(),
227
        sync_service: node_builder.network.sync_service.clone(),
228
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
229
    });
230

            
231
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
232
        node_builder.task_manager.spawn_handle(),
233
        overrides.clone(),
234
        rpc_config.eth_log_block_cache,
235
        rpc_config.eth_statuses_cache,
236
        node_builder.prometheus_registry.clone(),
237
    ));
238

            
239
    let rpc_builder = {
240
        let client = node_builder.client.clone();
241
        let pool = node_builder.transaction_pool.clone();
242
        let pubsub_notification_sinks = pubsub_notification_sinks;
243
        let network = node_builder.network.network.clone();
244
        let sync = node_builder.network.sync_service.clone();
245
        let filter_pool = filter_pool.clone();
246
        let backend = node_builder.backend.clone();
247
        let max_past_logs = rpc_config.max_past_logs;
248
        let overrides = overrides;
249
        let fee_history_cache = fee_history_cache.clone();
250
        let block_data_cache = block_data_cache;
251
        let frontier_backend = frontier_backend.clone();
252

            
253
        Box::new(move |subscription_task_executor| {
254
            let graph_pool = pool.0.as_any()
255
                .downcast_ref::<sc_transaction_pool::BasicPool<
256
                    sc_transaction_pool::FullChainApi<ParachainClient, Block>
257
                    , Block
258
                >>().expect("Frontier container chain template supports only single state transaction pool! Use --pool-type=single-state");
259

            
260
            let deps = crate::rpc::FullDeps {
261
                backend: backend.clone(),
262
                client: client.clone(),
263
                filter_pool: filter_pool.clone(),
264
                frontier_backend: match &*frontier_backend {
265
                    fc_db::Backend::KeyValue(b) => b.clone(),
266
                    fc_db::Backend::Sql(b) => b.clone(),
267
                },
268
                graph: graph_pool.pool().clone(),
269
                pool: pool.clone(),
270
                max_past_logs,
271
                fee_history_limit,
272
                fee_history_cache: fee_history_cache.clone(),
273
                network: Arc::new(network.clone()),
274
                sync: sync.clone(),
275
                block_data_cache: block_data_cache.clone(),
276
                overrides: overrides.clone(),
277
                is_authority: false,
278
                command_sink: None,
279
                xcm_senders: None,
280
            };
281
            crate::rpc::create_full(
282
                deps,
283
                subscription_task_executor,
284
                pubsub_notification_sinks.clone(),
285
            )
286
            .map_err(Into::into)
287
        })
288
    };
289

            
290
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
291

            
292
    let relay_chain_slot_duration = Duration::from_secs(6);
293
    let node_builder = node_builder.start_full_node(
294
        para_id,
295
        relay_chain_interface.clone(),
296
        relay_chain_slot_duration,
297
    )?;
298

            
299
    node_builder.network.start_network.start_network();
300

            
301
    Ok((node_builder.task_manager, node_builder.client))
302
}
303

            
304
/// Start a parachain node.
305
pub async fn start_parachain_node(
306
    parachain_config: Configuration,
307
    polkadot_config: Configuration,
308
    collator_options: CollatorOptions,
309
    para_id: ParaId,
310
    rpc_config: crate::cli::RpcConfig,
311
    hwbench: Option<sc_sysinfo::HwBench>,
312
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
313
    start_node_impl(
314
        parachain_config,
315
        polkadot_config,
316
        collator_options,
317
        para_id,
318
        rpc_config,
319
        hwbench,
320
    )
321
    .await
322
}
323

            
324
/// Helper function to generate a crypto pair from seed
325
146
fn get_aura_id_from_seed(seed: &str) -> NimbusId {
326
146
    sp_core::sr25519::Pair::from_string(&format!("//{}", seed), None)
327
146
        .expect("static values are valid; qed")
328
146
        .public()
329
146
        .into()
330
146
}
331

            
332
/// Builds a new development service. This service uses manual seal, and mocks
333
/// the parachain inherent.
334
146
pub async fn start_dev_node(
335
146
    parachain_config: Configuration,
336
146
    sealing: Sealing,
337
146
    rpc_config: crate::cli::RpcConfig,
338
146
    para_id: ParaId,
339
146
    hwbench: Option<sc_sysinfo::HwBench>,
340
146
) -> Result<TaskManager, sc_service::error::Error> {
341
    // TODO: Not present before, is this wanted and was forgotten?
342
    // let parachain_config = prepare_node_config(parachain_config);
343

            
344
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
345
146
    let node_builder = NodeConfig::new_builder(&parachain_config, hwbench)?;
346

            
347
    // Frontier specific stuff
348
146
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
349
146
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
350
146
    let frontier_backend = fc_db::Backend::KeyValue(
351
146
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
352
146
    );
353
146
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
354
146
    let fee_history_limit = rpc_config.fee_history_limit;
355
146

            
356
146
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
357
146
        fc_mapping_sync::EthereumBlockNotification<Block>,
358
146
    > = Default::default();
359
146
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
360
146

            
361
146
    let (parachain_block_import, import_queue) = import_queue(&parachain_config, &node_builder);
362

            
363
    // Build a Substrate Network. (not cumulus since it is a dev node, it mocks
364
    // the relaychain)
365
146
    let mut node_builder = node_builder
366
146
        .build_substrate_network::<sc_network::NetworkWorker<_, _>>(
367
146
            &parachain_config,
368
146
            import_queue,
369
146
        )?;
370

            
371
146
    let mut command_sink = None;
372
146
    let mut xcm_senders = None;
373
146

            
374
146
    if parachain_config.role.is_authority() {
375
146
        let client = node_builder.client.clone();
376
146
        let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::<Vec<u8>>(100);
377
146
        let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec<u8>)>(100);
378
146
        xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender));
379
146

            
380
146
        let authorities = vec![get_aura_id_from_seed("alice")];
381

            
382
146
        command_sink = node_builder.install_manual_seal(ManualSealConfiguration {
383
146
            block_import: parachain_block_import,
384
146
            sealing,
385
146
            soft_deadline: None,
386
146
            select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()),
387
146
            consensus_data_provider: Some(Box::new(
388
146
                tc_consensus::ContainerManualSealAuraConsensusDataProvider::new(
389
146
                    SlotDuration::from_millis(
390
146
                        container_chain_template_frontier_runtime::SLOT_DURATION,
391
146
                    ),
392
146
                    authorities.clone(),
393
146
                ),
394
146
            )),
395
980
            create_inherent_data_providers: move |block: H256, ()| {
396
980
                let current_para_block = client
397
980
                    .number(block)
398
980
                    .expect("Header lookup should succeed")
399
980
                    .expect("Header passed in as parent should be present in backend.");
400
980

            
401
980
                let hash = client
402
980
                    .hash(current_para_block.saturating_sub(1))
403
980
                    .expect("Hash of the desired block must be present")
404
980
                    .expect("Hash of the desired block should exist");
405
980

            
406
980
                let para_header = client
407
980
                    .expect_header(hash)
408
980
                    .expect("Expected parachain header should exist")
409
980
                    .encode();
410
980

            
411
980
                let para_head_data: Vec<u8> = HeadData(para_header).encode();
412
980
                let client_set_aside_for_cidp = client.clone();
413
980
                let client_for_xcm = client.clone();
414
980
                let authorities_for_cidp = authorities.clone();
415
980
                let para_head_key = RelayWellKnownKeys::para_head(para_id);
416
980
                let relay_slot_key = RelayWellKnownKeys::CURRENT_SLOT.to_vec();
417
980
                let slot_duration = container_chain_template_frontier_runtime::SLOT_DURATION;
418
980

            
419
980
                let mut timestamp = 0u64;
420
980
                TIMESTAMP.with(|x| {
421
980
                    timestamp = x.clone().take();
422
980
                });
423
980

            
424
980
                timestamp += slot_duration;
425
980

            
426
980
                let relay_slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
427
980
						timestamp.into(),
428
980
						SlotDuration::from_millis(slot_duration),
429
980
                    );
430
980
                let relay_slot = u64::from(*relay_slot);
431
980

            
432
980
                let downward_xcm_receiver = downward_xcm_receiver.clone();
433
980
                let hrmp_xcm_receiver = hrmp_xcm_receiver.clone();
434

            
435
980
                async move {
436
980
                    let mocked_authorities_noting =
437
980
                        ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider {
438
980
                            current_para_block,
439
980
                            relay_offset: 1000,
440
980
                            relay_blocks_per_para_block: 2,
441
980
                            orchestrator_para_id: crate::chain_spec::ORCHESTRATOR,
442
980
                            container_para_id: para_id,
443
980
                            authorities: authorities_for_cidp
444
980
                    };
445
980

            
446
980
                    let mut additional_keys = mocked_authorities_noting.get_key_values();
447
980
                    additional_keys.append(&mut vec![(para_head_key, para_head_data), (relay_slot_key, Slot::from(relay_slot).encode())]);
448
980

            
449
980
                    let time = MockTimestampInherentDataProvider;
450
980
                    let current_para_head = client_set_aside_for_cidp
451
980
                            .header(block)
452
980
                            .expect("Header lookup should succeed")
453
980
                            .expect("Header passed in as parent should be present in backend.");
454
980
                    let should_send_go_ahead = match client_set_aside_for_cidp
455
980
                            .runtime_api()
456
980
                            .collect_collation_info(block, &current_para_head)
457
                    {
458
980
                            Ok(info) => info.new_validation_code.is_some(),
459
                            Err(e) => {
460
                                    log::error!("Failed to collect collation info: {:?}", e);
461
                                    false
462
                            },
463
                    };
464
980
                    let mocked_parachain = MockValidationDataInherentDataProvider {
465
980
                        current_para_block,
466
980
                        current_para_block_head: None,
467
980
                        relay_offset: 1000,
468
980
                        relay_blocks_per_para_block: 2,
469
980
                        // TODO: Recheck
470
980
                        para_blocks_per_relay_epoch: 10,
471
980
                        relay_randomness_config: (),
472
980
                        xcm_config: MockXcmConfig::new(
473
980
                            &*client_for_xcm,
474
980
                            block,
475
980
                            Default::default(),
476
980
                        ),
477
980
                        raw_downward_messages: downward_xcm_receiver.drain().collect(),
478
980
                        raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(),
479
980
                        additional_key_values: Some(additional_keys),
480
980
                        para_id,
481
980
                        upgrade_go_ahead: should_send_go_ahead.then(|| {
482
                            log::info!(
483
                                "Detected pending validation code, sending go-ahead signal."
484
                            );
485
                            UpgradeGoAhead::GoAhead
486
980
                        }),
487
980
                    };
488
980

            
489
980
                    Ok((time, mocked_parachain, mocked_authorities_noting))
490
980
                }
491
980
            },
492
146
        })?;
493
    }
494

            
495
146
    let frontier_backend = Arc::new(frontier_backend);
496
146

            
497
146
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
498
146
        task_manager: &node_builder.task_manager,
499
146
        client: node_builder.client.clone(),
500
146
        substrate_backend: node_builder.backend.clone(),
501
146
        frontier_backend: frontier_backend.clone(),
502
146
        filter_pool: filter_pool.clone(),
503
146
        overrides: overrides.clone(),
504
146
        fee_history_limit,
505
146
        fee_history_cache: fee_history_cache.clone(),
506
146
        sync_service: node_builder.network.sync_service.clone(),
507
146
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
508
146
    });
509
146

            
510
146
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
511
146
        node_builder.task_manager.spawn_handle(),
512
146
        overrides.clone(),
513
146
        rpc_config.eth_log_block_cache,
514
146
        rpc_config.eth_statuses_cache,
515
146
        node_builder.prometheus_registry.clone(),
516
146
    ));
517
146

            
518
146
    let rpc_builder = {
519
146
        let client = node_builder.client.clone();
520
146
        let pool = node_builder.transaction_pool.clone();
521
146
        let pubsub_notification_sinks = pubsub_notification_sinks;
522
146
        let network = node_builder.network.network.clone();
523
146
        let sync = node_builder.network.sync_service.clone();
524
146
        let filter_pool = filter_pool;
525
146
        let frontier_backend = frontier_backend.clone();
526
146
        let backend = node_builder.backend.clone();
527
146
        let max_past_logs = rpc_config.max_past_logs;
528
146
        let overrides = overrides;
529
146
        let block_data_cache = block_data_cache;
530
146

            
531
292
        Box::new(move |subscription_task_executor| {
532
292
            let graph_pool= pool.0.as_any()
533
292
                .downcast_ref::<sc_transaction_pool::BasicPool<
534
292
                    sc_transaction_pool::FullChainApi<ParachainClient, Block>
535
292
                    , Block
536
292
                >>().expect("Frontier container chain template supports only single state transaction pool! Use --pool-type=single-state");
537
292
            let deps = crate::rpc::FullDeps {
538
292
                backend: backend.clone(),
539
292
                client: client.clone(),
540
292
                filter_pool: filter_pool.clone(),
541
292
                frontier_backend: match &*frontier_backend {
542
292
                    fc_db::Backend::KeyValue(b) => b.clone(),
543
                    fc_db::Backend::Sql(b) => b.clone(),
544
                },
545
292
                graph: graph_pool.pool().clone(),
546
292
                pool: pool.clone(),
547
292
                max_past_logs,
548
292
                fee_history_limit,
549
292
                fee_history_cache: fee_history_cache.clone(),
550
292
                network: network.clone(),
551
292
                sync: sync.clone(),
552
292
                block_data_cache: block_data_cache.clone(),
553
292
                overrides: overrides.clone(),
554
292
                is_authority: false,
555
292
                command_sink: command_sink.clone(),
556
292
                xcm_senders: xcm_senders.clone(),
557
292
            };
558
292
            crate::rpc::create_full(
559
292
                deps,
560
292
                subscription_task_executor,
561
292
                pubsub_notification_sinks.clone(),
562
292
            )
563
292
            .map_err(Into::into)
564
292
        })
565
    };
566

            
567
146
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
568

            
569
146
    log::info!("Development Service Ready");
570

            
571
146
    node_builder.network.start_network.start_network();
572
146
    Ok(node_builder.task_manager)
573
146
}