1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
18

            
19
#[allow(deprecated)]
20
use {
21
    container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi},
22
    cumulus_client_cli::CollatorOptions,
23
    cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
24
    cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig},
25
    cumulus_client_service::{prepare_node_config, ParachainHostFunctions},
26
    cumulus_primitives_core::{
27
        relay_chain::well_known_keys as RelayWellKnownKeys, CollectCollationInfo, ParaId,
28
    },
29
    fc_consensus::FrontierBlockImport,
30
    fc_db::DatabaseSource,
31
    fc_rpc_core::types::{FeeHistoryCache, FilterPool},
32
    fc_storage::StorageOverrideHandler,
33
    nimbus_primitives::NimbusId,
34
    node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing},
35
    parity_scale_codec::Encode,
36
    polkadot_parachain_primitives::primitives::HeadData,
37
    polkadot_primitives::UpgradeGoAhead,
38
    sc_consensus::BasicQueue,
39
    sc_executor::WasmExecutor,
40
    sc_service::{Configuration, TFullBackend, TFullClient, TaskManager},
41
    sp_api::ProvideRuntimeApi,
42
    sp_blockchain::HeaderBackend,
43
    sp_consensus_slots::{Slot, SlotDuration},
44
    sp_core::{Pair, H256},
45
    std::{
46
        collections::BTreeMap,
47
        sync::{Arc, Mutex},
48
        time::Duration,
49
    },
50
};
51

            
52
type ParachainExecutor = WasmExecutor<ParachainHostFunctions>;
53
type ParachainClient = TFullClient<Block, RuntimeApi, ParachainExecutor>;
54
type ParachainBackend = TFullBackend<Block>;
55
type ParachainBlockImport = TParachainBlockImport<
56
    Block,
57
    FrontierBlockImport<Block, Arc<ParachainClient>, ParachainClient>,
58
    ParachainBackend,
59
>;
60

            
61
pub struct NodeConfig;
62
impl NodeBuilderConfig for NodeConfig {
63
    type Block = Block;
64
    type RuntimeApi = RuntimeApi;
65
    type ParachainExecutor = ParachainExecutor;
66
}
67

            
68
148
pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf {
69
148
    let config_dir = config
70
148
        .base_path
71
148
        .config_dir(config.chain_spec.id())
72
148
        .join("frontier")
73
148
        .join(path);
74
148

            
75
148
    config_dir
76
148
}
77

            
78
// TODO This is copied from frontier. It should be imported instead after
79
// https://github.com/paritytech/frontier/issues/333 is solved
80
148
pub fn open_frontier_backend<C>(
81
148
    client: Arc<C>,
82
148
    config: &Configuration,
83
148
) -> Result<fc_db::kv::Backend<Block, C>, String>
84
148
where
85
148
    C: sp_blockchain::HeaderBackend<Block>,
86
148
{
87
148
    fc_db::kv::Backend::<Block, _>::new(
88
148
        client,
89
148
        &fc_db::kv::DatabaseSettings {
90
148
            source: match config.database {
91
148
                DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
92
148
                    path: frontier_database_dir(config, "db"),
93
148
                    cache_size: 0,
94
148
                },
95
                DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
96
                    path: frontier_database_dir(config, "paritydb"),
97
                },
98
                DatabaseSource::Auto { .. } => DatabaseSource::Auto {
99
                    rocksdb_path: frontier_database_dir(config, "db"),
100
                    paritydb_path: frontier_database_dir(config, "paritydb"),
101
                    cache_size: 0,
102
                },
103
                _ => {
104
                    return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
105
                }
106
            },
107
        },
108
    )
109
148
}
110

            
111
thread_local!(static TIMESTAMP: std::cell::RefCell<u64> = const { std::cell::RefCell::new(0) });
112

            
113
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
114
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
115
struct MockTimestampInherentDataProvider;
116
#[async_trait::async_trait]
117
impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
118
    async fn provide_inherent_data(
119
        &self,
120
        inherent_data: &mut sp_inherents::InherentData,
121
3036
    ) -> Result<(), sp_inherents::Error> {
122
3036
        TIMESTAMP.with(|x| {
123
3036
            *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION;
124
3036
            inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
125
3036
        })
126
6072
    }
127

            
128
    async fn try_handle_error(
129
        &self,
130
        _identifier: &sp_inherents::InherentIdentifier,
131
        _error: &[u8],
132
    ) -> Option<Result<(), sp_inherents::Error>> {
133
        // The pallet never reports error.
134
        None
135
    }
136
}
137

            
138
148
pub fn import_queue(
139
148
    parachain_config: &Configuration,
140
148
    node_builder: &NodeBuilder<NodeConfig>,
141
148
) -> (ParachainBlockImport, BasicQueue<Block>) {
142
148
    let frontier_block_import =
143
148
        FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone());
144
148

            
145
148
    // The parachain block import and import queue
146
148
    let block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
147
148
        frontier_block_import,
148
148
        node_builder.backend.clone(),
149
148
    );
150
148
    let import_queue = nimbus_consensus::import_queue(
151
148
        node_builder.client.clone(),
152
148
        block_import.clone(),
153
148
        move |_, _| async move {
154
            let time = sp_timestamp::InherentDataProvider::from_system_time();
155

            
156
            Ok((time,))
157
148
        },
158
148
        &node_builder.task_manager.spawn_essential_handle(),
159
148
        parachain_config.prometheus_registry(),
160
148
        false,
161
148
        false,
162
148
    )
163
148
    .expect("function never fails");
164
148

            
165
148
    (block_import, import_queue)
166
148
}
167

            
168
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
169
///
170
/// This is the actual implementation that is abstract over the executor and the runtime api.
171
#[sc_tracing::logging::prefix_logs_with("Parachain")]
172
async fn start_node_impl(
173
    parachain_config: Configuration,
174
    polkadot_config: Configuration,
175
    collator_options: CollatorOptions,
176
    para_id: ParaId,
177
    rpc_config: crate::cli::RpcConfig,
178
    hwbench: Option<sc_sysinfo::HwBench>,
179
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
180
    let parachain_config = prepare_node_config(parachain_config);
181

            
182
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
183
    let mut node_builder = NodeConfig::new_builder(&parachain_config, hwbench.clone())?;
184

            
185
    // Frontier specific stuff
186
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
187
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
188
    let frontier_backend = fc_db::Backend::KeyValue(
189
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
190
    );
191
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
192
    let fee_history_limit = rpc_config.fee_history_limit;
193

            
194
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
195
        fc_mapping_sync::EthereumBlockNotification<Block>,
196
    > = Default::default();
197
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
198

            
199
    let (_, import_queue) = import_queue(&parachain_config, &node_builder);
200

            
201
    // Relay chain interface
202
    let (relay_chain_interface, _collator_key) = node_builder
203
        .build_relay_chain_interface(&parachain_config, polkadot_config, collator_options.clone())
204
        .await?;
205

            
206
    // Build cumulus network, allowing to access network-related services.
207
    let node_builder = node_builder
208
        .build_cumulus_network::<_, sc_network::NetworkWorker<_, _>>(
209
            &parachain_config,
210
            para_id,
211
            import_queue,
212
            relay_chain_interface.clone(),
213
        )
214
        .await?;
215

            
216
    let frontier_backend = Arc::new(frontier_backend);
217

            
218
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
219
        task_manager: &node_builder.task_manager,
220
        client: node_builder.client.clone(),
221
        substrate_backend: node_builder.backend.clone(),
222
        frontier_backend: frontier_backend.clone(),
223
        filter_pool: filter_pool.clone(),
224
        overrides: overrides.clone(),
225
        fee_history_limit,
226
        fee_history_cache: fee_history_cache.clone(),
227
        sync_service: node_builder.network.sync_service.clone(),
228
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
229
    });
230

            
231
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
232
        node_builder.task_manager.spawn_handle(),
233
        overrides.clone(),
234
        rpc_config.eth_log_block_cache,
235
        rpc_config.eth_statuses_cache,
236
        node_builder.prometheus_registry.clone(),
237
    ));
238

            
239
    let rpc_builder = {
240
        let client = node_builder.client.clone();
241
        let pool = node_builder.transaction_pool.clone();
242
        let pubsub_notification_sinks = pubsub_notification_sinks;
243
        let network = node_builder.network.network.clone();
244
        let sync = node_builder.network.sync_service.clone();
245
        let filter_pool = filter_pool.clone();
246
        let backend = node_builder.backend.clone();
247
        let max_past_logs = rpc_config.max_past_logs;
248
        let max_block_range = rpc_config.max_block_range;
249
        let overrides = overrides;
250
        let fee_history_cache = fee_history_cache.clone();
251
        let block_data_cache = block_data_cache;
252
        let frontier_backend = frontier_backend.clone();
253

            
254
        Box::new(move |subscription_task_executor| {
255
            let graph_pool = pool.0.as_any()
256
                .downcast_ref::<sc_transaction_pool::BasicPool<
257
                    sc_transaction_pool::FullChainApi<ParachainClient, Block>
258
                    , Block
259
                >>().expect("Frontier container chain template supports only single state transaction pool! Use --pool-type=single-state");
260

            
261
            let deps = crate::rpc::FullDeps {
262
                backend: backend.clone(),
263
                client: client.clone(),
264
                filter_pool: filter_pool.clone(),
265
                frontier_backend: match &*frontier_backend {
266
                    fc_db::Backend::KeyValue(b) => b.clone(),
267
                    fc_db::Backend::Sql(b) => b.clone(),
268
                },
269
                graph: graph_pool.pool().clone(),
270
                pool: pool.clone(),
271
                max_past_logs,
272
                max_block_range,
273
                fee_history_limit,
274
                fee_history_cache: fee_history_cache.clone(),
275
                network: Arc::new(network.clone()),
276
                sync: sync.clone(),
277
                block_data_cache: block_data_cache.clone(),
278
                overrides: overrides.clone(),
279
                is_authority: false,
280
                command_sink: None,
281
                xcm_senders: None,
282
            };
283
            crate::rpc::create_full(
284
                deps,
285
                subscription_task_executor,
286
                pubsub_notification_sinks.clone(),
287
            )
288
            .map_err(Into::into)
289
        })
290
    };
291

            
292
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
293

            
294
    let relay_chain_slot_duration = Duration::from_secs(6);
295
    let node_builder = node_builder.start_full_node(
296
        para_id,
297
        relay_chain_interface.clone(),
298
        relay_chain_slot_duration,
299
    )?;
300

            
301
    node_builder.network.start_network.start_network();
302

            
303
    Ok((node_builder.task_manager, node_builder.client))
304
}
305

            
306
/// Start a parachain node.
307
pub async fn start_parachain_node(
308
    parachain_config: Configuration,
309
    polkadot_config: Configuration,
310
    collator_options: CollatorOptions,
311
    para_id: ParaId,
312
    rpc_config: crate::cli::RpcConfig,
313
    hwbench: Option<sc_sysinfo::HwBench>,
314
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
315
    start_node_impl(
316
        parachain_config,
317
        polkadot_config,
318
        collator_options,
319
        para_id,
320
        rpc_config,
321
        hwbench,
322
    )
323
    .await
324
}
325

            
326
/// Helper function to generate a crypto pair from seed
327
148
fn get_aura_id_from_seed(seed: &str) -> NimbusId {
328
148
    sp_core::sr25519::Pair::from_string(&format!("//{}", seed), None)
329
148
        .expect("static values are valid; qed")
330
148
        .public()
331
148
        .into()
332
148
}
333

            
334
/// Builds a new development service. This service uses manual seal, and mocks
335
/// the parachain inherent.
336
148
pub async fn start_dev_node(
337
148
    parachain_config: Configuration,
338
148
    sealing: Sealing,
339
148
    rpc_config: crate::cli::RpcConfig,
340
148
    para_id: ParaId,
341
148
    hwbench: Option<sc_sysinfo::HwBench>,
342
148
) -> Result<TaskManager, sc_service::error::Error> {
343
    // TODO: Not present before, is this wanted and was forgotten?
344
    // let parachain_config = prepare_node_config(parachain_config);
345

            
346
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
347
148
    let node_builder = NodeConfig::new_builder(&parachain_config, hwbench)?;
348

            
349
    // Frontier specific stuff
350
148
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
351
148
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
352
148
    let frontier_backend = fc_db::Backend::KeyValue(
353
148
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
354
148
    );
355
148
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
356
148
    let fee_history_limit = rpc_config.fee_history_limit;
357
148

            
358
148
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
359
148
        fc_mapping_sync::EthereumBlockNotification<Block>,
360
148
    > = Default::default();
361
148
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
362
148

            
363
148
    let (parachain_block_import, import_queue) = import_queue(&parachain_config, &node_builder);
364

            
365
    // Build a Substrate Network. (not cumulus since it is a dev node, it mocks
366
    // the relaychain)
367
148
    let mut node_builder = node_builder
368
148
        .build_substrate_network::<sc_network::NetworkWorker<_, _>>(
369
148
            &parachain_config,
370
148
            import_queue,
371
148
        )?;
372

            
373
148
    let mut command_sink = None;
374
148
    let mut xcm_senders = None;
375
148

            
376
148
    if parachain_config.role.is_authority() {
377
148
        let client = node_builder.client.clone();
378
148
        let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::<Vec<u8>>(100);
379
148
        let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec<u8>)>(100);
380
148
        xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender));
381
148

            
382
148
        let authorities = vec![get_aura_id_from_seed("alice")];
383

            
384
148
        command_sink = node_builder.install_manual_seal(ManualSealConfiguration {
385
148
            block_import: parachain_block_import,
386
148
            sealing,
387
148
            soft_deadline: None,
388
148
            select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()),
389
148
            consensus_data_provider: Some(Box::new(
390
148
                tc_consensus::ContainerManualSealAuraConsensusDataProvider::new(
391
148
                    SlotDuration::from_millis(
392
148
                        container_chain_template_frontier_runtime::SLOT_DURATION,
393
148
                    ),
394
148
                    authorities.clone(),
395
148
                ),
396
148
            )),
397
3036
            create_inherent_data_providers: move |block: H256, ()| {
398
3036
                let current_para_block = client
399
3036
                    .number(block)
400
3036
                    .expect("Header lookup should succeed")
401
3036
                    .expect("Header passed in as parent should be present in backend.");
402
3036

            
403
3036
                let hash = client
404
3036
                    .hash(current_para_block.saturating_sub(1))
405
3036
                    .expect("Hash of the desired block must be present")
406
3036
                    .expect("Hash of the desired block should exist");
407
3036

            
408
3036
                let para_header = client
409
3036
                    .expect_header(hash)
410
3036
                    .expect("Expected parachain header should exist")
411
3036
                    .encode();
412
3036

            
413
3036
                let para_head_data: Vec<u8> = HeadData(para_header).encode();
414
3036
                let client_set_aside_for_cidp = client.clone();
415
3036
                let client_for_xcm = client.clone();
416
3036
                let authorities_for_cidp = authorities.clone();
417
3036
                let para_head_key = RelayWellKnownKeys::para_head(para_id);
418
3036
                let relay_slot_key = RelayWellKnownKeys::CURRENT_SLOT.to_vec();
419
3036
                let slot_duration = container_chain_template_frontier_runtime::SLOT_DURATION;
420
3036

            
421
3036
                let mut timestamp = 0u64;
422
3036
                TIMESTAMP.with(|x| {
423
3036
                    timestamp = x.clone().take();
424
3036
                });
425
3036

            
426
3036
                timestamp += slot_duration;
427
3036

            
428
3036
                let relay_slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
429
3036
						timestamp.into(),
430
3036
						SlotDuration::from_millis(slot_duration),
431
3036
                    );
432
3036
                let relay_slot = u64::from(*relay_slot);
433
3036

            
434
3036
                let downward_xcm_receiver = downward_xcm_receiver.clone();
435
3036
                let hrmp_xcm_receiver = hrmp_xcm_receiver.clone();
436

            
437
3036
                async move {
438
3036
                    let mocked_authorities_noting =
439
3036
                        ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider {
440
3036
                            current_para_block,
441
3036
                            relay_offset: 1000,
442
3036
                            relay_blocks_per_para_block: 2,
443
3036
                            orchestrator_para_id: crate::chain_spec::ORCHESTRATOR,
444
3036
                            container_para_id: para_id,
445
3036
                            authorities: authorities_for_cidp
446
3036
                    };
447
3036

            
448
3036
                    let mut additional_keys = mocked_authorities_noting.get_key_values();
449
3036
                    additional_keys.append(&mut vec![(para_head_key, para_head_data), (relay_slot_key, Slot::from(relay_slot).encode())]);
450
3036

            
451
3036
                    let time = MockTimestampInherentDataProvider;
452
3036
                    let current_para_head = client_set_aside_for_cidp
453
3036
                            .header(block)
454
3036
                            .expect("Header lookup should succeed")
455
3036
                            .expect("Header passed in as parent should be present in backend.");
456
3036
                    let should_send_go_ahead = match client_set_aside_for_cidp
457
3036
                            .runtime_api()
458
3036
                            .collect_collation_info(block, &current_para_head)
459
                    {
460
3036
                            Ok(info) => info.new_validation_code.is_some(),
461
                            Err(e) => {
462
                                    log::error!("Failed to collect collation info: {:?}", e);
463
                                    false
464
                            },
465
                    };
466
3036
                    let mocked_parachain = MockValidationDataInherentDataProvider {
467
3036
                        current_para_block,
468
3036
                        current_para_block_head: None,
469
3036
                        relay_offset: 1000,
470
3036
                        relay_blocks_per_para_block: 2,
471
3036
                        // TODO: Recheck
472
3036
                        para_blocks_per_relay_epoch: 10,
473
3036
                        relay_randomness_config: (),
474
3036
                        xcm_config: MockXcmConfig::new(
475
3036
                            &*client_for_xcm,
476
3036
                            block,
477
3036
                            Default::default(),
478
3036
                        ),
479
3036
                        raw_downward_messages: downward_xcm_receiver.drain().collect(),
480
3036
                        raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(),
481
3036
                        additional_key_values: Some(additional_keys),
482
3036
                        para_id,
483
3036
                        upgrade_go_ahead: should_send_go_ahead.then(|| {
484
2
                            log::info!(
485
2
                                "Detected pending validation code, sending go-ahead signal."
486
                            );
487
2
                            UpgradeGoAhead::GoAhead
488
3036
                        }),
489
3036
                    };
490
3036

            
491
3036
                    Ok((time, mocked_parachain, mocked_authorities_noting))
492
3036
                }
493
3036
            },
494
148
        })?;
495
    }
496

            
497
148
    let frontier_backend = Arc::new(frontier_backend);
498
148

            
499
148
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
500
148
        task_manager: &node_builder.task_manager,
501
148
        client: node_builder.client.clone(),
502
148
        substrate_backend: node_builder.backend.clone(),
503
148
        frontier_backend: frontier_backend.clone(),
504
148
        filter_pool: filter_pool.clone(),
505
148
        overrides: overrides.clone(),
506
148
        fee_history_limit,
507
148
        fee_history_cache: fee_history_cache.clone(),
508
148
        sync_service: node_builder.network.sync_service.clone(),
509
148
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
510
148
    });
511
148

            
512
148
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
513
148
        node_builder.task_manager.spawn_handle(),
514
148
        overrides.clone(),
515
148
        rpc_config.eth_log_block_cache,
516
148
        rpc_config.eth_statuses_cache,
517
148
        node_builder.prometheus_registry.clone(),
518
148
    ));
519
148

            
520
148
    let rpc_builder = {
521
148
        let client = node_builder.client.clone();
522
148
        let pool = node_builder.transaction_pool.clone();
523
148
        let pubsub_notification_sinks = pubsub_notification_sinks;
524
148
        let network = node_builder.network.network.clone();
525
148
        let sync = node_builder.network.sync_service.clone();
526
148
        let filter_pool = filter_pool;
527
148
        let frontier_backend = frontier_backend.clone();
528
148
        let backend = node_builder.backend.clone();
529
148
        let max_past_logs = rpc_config.max_past_logs;
530
148
        let max_block_range = rpc_config.max_block_range;
531
148
        let overrides = overrides;
532
148
        let block_data_cache = block_data_cache;
533
148

            
534
296
        Box::new(move |subscription_task_executor| {
535
296
            let graph_pool= pool.0.as_any()
536
296
                .downcast_ref::<sc_transaction_pool::BasicPool<
537
296
                    sc_transaction_pool::FullChainApi<ParachainClient, Block>
538
296
                    , Block
539
296
                >>().expect("Frontier container chain template supports only single state transaction pool! Use --pool-type=single-state");
540
296
            let deps = crate::rpc::FullDeps {
541
296
                backend: backend.clone(),
542
296
                client: client.clone(),
543
296
                filter_pool: filter_pool.clone(),
544
296
                frontier_backend: match &*frontier_backend {
545
296
                    fc_db::Backend::KeyValue(b) => b.clone(),
546
                    fc_db::Backend::Sql(b) => b.clone(),
547
                },
548
296
                graph: graph_pool.pool().clone(),
549
296
                pool: pool.clone(),
550
296
                max_past_logs,
551
296
                max_block_range,
552
296
                fee_history_limit,
553
296
                fee_history_cache: fee_history_cache.clone(),
554
296
                network: network.clone(),
555
296
                sync: sync.clone(),
556
296
                block_data_cache: block_data_cache.clone(),
557
296
                overrides: overrides.clone(),
558
296
                is_authority: false,
559
296
                command_sink: command_sink.clone(),
560
296
                xcm_senders: xcm_senders.clone(),
561
296
            };
562
296
            crate::rpc::create_full(
563
296
                deps,
564
296
                subscription_task_executor,
565
296
                pubsub_notification_sinks.clone(),
566
296
            )
567
296
            .map_err(Into::into)
568
296
        })
569
    };
570

            
571
148
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
572

            
573
148
    log::info!("Development Service Ready");
574

            
575
148
    node_builder.network.start_network.start_network();
576
148
    Ok(node_builder.task_manager)
577
148
}