1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
18

            
19
use {
20
    container_chain_template_frontier_runtime::{opaque::Block, Hash, RuntimeApi},
21
    cumulus_client_cli::CollatorOptions,
22
    cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
23
    cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig},
24
    cumulus_client_service::{prepare_node_config, ParachainHostFunctions},
25
    cumulus_primitives_core::{
26
        relay_chain::well_known_keys as RelayWellKnownKeys, CollectCollationInfo, ParaId,
27
    },
28
    fc_consensus::FrontierBlockImport,
29
    fc_db::DatabaseSource,
30
    fc_rpc_core::types::{FeeHistoryCache, FilterPool},
31
    fc_storage::StorageOverrideHandler,
32
    nimbus_primitives::NimbusId,
33
    node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing},
34
    parity_scale_codec::Encode,
35
    polkadot_parachain_primitives::primitives::HeadData,
36
    polkadot_primitives::UpgradeGoAhead,
37
    sc_consensus::BasicQueue,
38
    sc_executor::WasmExecutor,
39
    sc_network::NetworkBackend,
40
    sc_service::{Configuration, TFullBackend, TFullClient, TaskManager},
41
    sp_api::ProvideRuntimeApi,
42
    sp_blockchain::HeaderBackend,
43
    sp_consensus_slots::{Slot, SlotDuration},
44
    sp_core::{Pair, H256},
45
    std::{
46
        collections::BTreeMap,
47
        sync::{Arc, Mutex},
48
        time::Duration,
49
    },
50
};
51

            
52
type ParachainExecutor = WasmExecutor<ParachainHostFunctions>;
53
type ParachainClient = TFullClient<Block, RuntimeApi, ParachainExecutor>;
54
type ParachainBackend = TFullBackend<Block>;
55
type ParachainBlockImport = TParachainBlockImport<
56
    Block,
57
    FrontierBlockImport<Block, Arc<ParachainClient>, ParachainClient>,
58
    ParachainBackend,
59
>;
60

            
61
pub struct NodeConfig;
62
impl NodeBuilderConfig for NodeConfig {
63
    type Block = Block;
64
    type RuntimeApi = RuntimeApi;
65
    type ParachainExecutor = ParachainExecutor;
66
}
67

            
68
148
pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf {
69
148
    let config_dir = config
70
148
        .base_path
71
148
        .config_dir(config.chain_spec.id())
72
148
        .join("frontier")
73
148
        .join(path);
74
148

            
75
148
    config_dir
76
148
}
77

            
78
// TODO This is copied from frontier. It should be imported instead after
79
// https://github.com/paritytech/frontier/issues/333 is solved
80
148
pub fn open_frontier_backend<C>(
81
148
    client: Arc<C>,
82
148
    config: &Configuration,
83
148
) -> Result<fc_db::kv::Backend<Block, C>, String>
84
148
where
85
148
    C: sp_blockchain::HeaderBackend<Block>,
86
148
{
87
148
    fc_db::kv::Backend::<Block, _>::new(
88
148
        client,
89
148
        &fc_db::kv::DatabaseSettings {
90
148
            source: match config.database {
91
148
                DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
92
148
                    path: frontier_database_dir(config, "db"),
93
148
                    cache_size: 0,
94
148
                },
95
                DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
96
                    path: frontier_database_dir(config, "paritydb"),
97
                },
98
                DatabaseSource::Auto { .. } => DatabaseSource::Auto {
99
                    rocksdb_path: frontier_database_dir(config, "db"),
100
                    paritydb_path: frontier_database_dir(config, "paritydb"),
101
                    cache_size: 0,
102
                },
103
                _ => {
104
                    return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
105
                }
106
            },
107
        },
108
    )
109
148
}
110

            
111
thread_local!(static TIMESTAMP: std::cell::RefCell<u64> = const { std::cell::RefCell::new(0) });
112

            
113
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
114
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
115
struct MockTimestampInherentDataProvider;
116
#[async_trait::async_trait]
117
impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
118
    async fn provide_inherent_data(
119
        &self,
120
        inherent_data: &mut sp_inherents::InherentData,
121
3036
    ) -> Result<(), sp_inherents::Error> {
122
3036
        TIMESTAMP.with(|x| {
123
3036
            *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION;
124
3036
            inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
125
3036
        })
126
6072
    }
127

            
128
    async fn try_handle_error(
129
        &self,
130
        _identifier: &sp_inherents::InherentIdentifier,
131
        _error: &[u8],
132
    ) -> Option<Result<(), sp_inherents::Error>> {
133
        // The pallet never reports error.
134
        None
135
    }
136
}
137

            
138
148
pub fn import_queue(
139
148
    parachain_config: &Configuration,
140
148
    node_builder: &NodeBuilder<NodeConfig>,
141
148
) -> (ParachainBlockImport, BasicQueue<Block>) {
142
148
    let frontier_block_import =
143
148
        FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone());
144
148

            
145
148
    // The parachain block import and import queue
146
148
    let block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
147
148
        frontier_block_import,
148
148
        node_builder.backend.clone(),
149
148
    );
150
148
    let import_queue = nimbus_consensus::import_queue(
151
148
        node_builder.client.clone(),
152
148
        block_import.clone(),
153
148
        move |_, _| async move {
154
            let time = sp_timestamp::InherentDataProvider::from_system_time();
155

            
156
            Ok((time,))
157
148
        },
158
148
        &node_builder.task_manager.spawn_essential_handle(),
159
148
        parachain_config.prometheus_registry(),
160
148
        false,
161
148
        false,
162
148
    )
163
148
    .expect("function never fails");
164
148

            
165
148
    (block_import, import_queue)
166
148
}
167

            
168
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
169
///
170
/// This is the actual implementation that is abstract over the executor and the runtime api.
171
#[sc_tracing::logging::prefix_logs_with("Parachain")]
172
async fn start_node_impl<Net>(
173
    parachain_config: Configuration,
174
    polkadot_config: Configuration,
175
    collator_options: CollatorOptions,
176
    para_id: ParaId,
177
    rpc_config: crate::cli::RpcConfig,
178
    hwbench: Option<sc_sysinfo::HwBench>,
179
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)>
180
where
181
    Net: NetworkBackend<Block, Hash>,
182
{
183
    let parachain_config = prepare_node_config(parachain_config);
184

            
185
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
186
    let mut node_builder = NodeConfig::new_builder(&parachain_config, hwbench.clone())?;
187

            
188
    // Frontier specific stuff
189
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
190
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
191
    let frontier_backend = fc_db::Backend::KeyValue(
192
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
193
    );
194
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
195
    let fee_history_limit = rpc_config.fee_history_limit;
196

            
197
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
198
        fc_mapping_sync::EthereumBlockNotification<Block>,
199
    > = Default::default();
200
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
201

            
202
    let (_, import_queue) = import_queue(&parachain_config, &node_builder);
203

            
204
    // Relay chain interface
205
    let (relay_chain_interface, _collator_key) = node_builder
206
        .build_relay_chain_interface(&parachain_config, polkadot_config, collator_options.clone())
207
        .await?;
208

            
209
    // Build cumulus network, allowing to access network-related services.
210
    let node_builder = node_builder
211
        .build_cumulus_network::<_, Net>(
212
            &parachain_config,
213
            para_id,
214
            import_queue,
215
            relay_chain_interface.clone(),
216
        )
217
        .await?;
218

            
219
    let frontier_backend = Arc::new(frontier_backend);
220

            
221
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
222
        task_manager: &node_builder.task_manager,
223
        client: node_builder.client.clone(),
224
        substrate_backend: node_builder.backend.clone(),
225
        frontier_backend: frontier_backend.clone(),
226
        filter_pool: filter_pool.clone(),
227
        overrides: overrides.clone(),
228
        fee_history_limit,
229
        fee_history_cache: fee_history_cache.clone(),
230
        sync_service: node_builder.network.sync_service.clone(),
231
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
232
    });
233

            
234
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
235
        node_builder.task_manager.spawn_handle(),
236
        overrides.clone(),
237
        rpc_config.eth_log_block_cache,
238
        rpc_config.eth_statuses_cache,
239
        node_builder.prometheus_registry.clone(),
240
    ));
241

            
242
    let rpc_builder = {
243
        let client = node_builder.client.clone();
244
        let pool = node_builder.transaction_pool.clone();
245
        let pubsub_notification_sinks = pubsub_notification_sinks;
246
        let network = node_builder.network.network.clone();
247
        let sync = node_builder.network.sync_service.clone();
248
        let filter_pool = filter_pool.clone();
249
        let backend = node_builder.backend.clone();
250
        let max_past_logs = rpc_config.max_past_logs;
251
        let max_block_range = rpc_config.max_block_range;
252
        let overrides = overrides;
253
        let fee_history_cache = fee_history_cache.clone();
254
        let block_data_cache = block_data_cache;
255
        let frontier_backend = frontier_backend.clone();
256

            
257
        Box::new(move |subscription_task_executor| {
258
            let deps = crate::rpc::FullDeps {
259
                backend: backend.clone(),
260
                client: client.clone(),
261
                filter_pool: filter_pool.clone(),
262
                frontier_backend: match &*frontier_backend {
263
                    fc_db::Backend::KeyValue(b) => b.clone(),
264
                    fc_db::Backend::Sql(b) => b.clone(),
265
                },
266
                graph: pool.clone(),
267
                pool: pool.clone(),
268
                max_past_logs,
269
                max_block_range,
270
                fee_history_limit,
271
                fee_history_cache: fee_history_cache.clone(),
272
                network: Arc::new(network.clone()),
273
                sync: sync.clone(),
274
                block_data_cache: block_data_cache.clone(),
275
                overrides: overrides.clone(),
276
                is_authority: false,
277
                command_sink: None,
278
                xcm_senders: None,
279
            };
280
            crate::rpc::create_full(
281
                deps,
282
                subscription_task_executor,
283
                pubsub_notification_sinks.clone(),
284
            )
285
            .map_err(Into::into)
286
        })
287
    };
288

            
289
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
290

            
291
    let relay_chain_slot_duration = Duration::from_secs(6);
292
    let node_builder = node_builder.start_full_node(
293
        para_id,
294
        relay_chain_interface.clone(),
295
        relay_chain_slot_duration,
296
    )?;
297

            
298
    Ok((node_builder.task_manager, node_builder.client))
299
}
300

            
301
/// Start a parachain node.
302
pub async fn start_parachain_node<Net>(
303
    parachain_config: Configuration,
304
    polkadot_config: Configuration,
305
    collator_options: CollatorOptions,
306
    para_id: ParaId,
307
    rpc_config: crate::cli::RpcConfig,
308
    hwbench: Option<sc_sysinfo::HwBench>,
309
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)>
310
where
311
    Net: NetworkBackend<Block, Hash>,
312
{
313
    start_node_impl::<Net>(
314
        parachain_config,
315
        polkadot_config,
316
        collator_options,
317
        para_id,
318
        rpc_config,
319
        hwbench,
320
    )
321
    .await
322
}
323

            
324
/// Helper function to generate a crypto pair from seed
325
148
fn get_aura_id_from_seed(seed: &str) -> NimbusId {
326
148
    sp_core::sr25519::Pair::from_string(&format!("//{}", seed), None)
327
148
        .expect("static values are valid; qed")
328
148
        .public()
329
148
        .into()
330
148
}
331

            
332
/// Builds a new development service. This service uses manual seal, and mocks
333
/// the parachain inherent.
334
148
pub async fn start_dev_node(
335
148
    parachain_config: Configuration,
336
148
    sealing: Sealing,
337
148
    rpc_config: crate::cli::RpcConfig,
338
148
    para_id: ParaId,
339
148
    hwbench: Option<sc_sysinfo::HwBench>,
340
148
) -> Result<TaskManager, sc_service::error::Error> {
341
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
342
148
    let node_builder = NodeConfig::new_builder(&parachain_config, hwbench)?;
343

            
344
    // Frontier specific stuff
345
148
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
346
148
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
347
148
    let frontier_backend = fc_db::Backend::KeyValue(
348
148
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
349
148
    );
350
148
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
351
148
    let fee_history_limit = rpc_config.fee_history_limit;
352
148

            
353
148
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
354
148
        fc_mapping_sync::EthereumBlockNotification<Block>,
355
148
    > = Default::default();
356
148
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
357
148

            
358
148
    let (parachain_block_import, import_queue) = import_queue(&parachain_config, &node_builder);
359

            
360
    // Build a Substrate Network. (not cumulus since it is a dev node, it mocks
361
    // the relaychain)
362
148
    let mut node_builder = node_builder
363
148
        .build_substrate_network::<sc_network::NetworkWorker<_, _>>(
364
148
            &parachain_config,
365
148
            import_queue,
366
148
        )?;
367

            
368
148
    let mut command_sink = None;
369
148
    let mut xcm_senders = None;
370
148

            
371
148
    if parachain_config.role.is_authority() {
372
148
        let client = node_builder.client.clone();
373
148
        let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::<Vec<u8>>(100);
374
148
        let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec<u8>)>(100);
375
148
        xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender));
376
148

            
377
148
        let authorities = vec![get_aura_id_from_seed("alice")];
378

            
379
148
        command_sink = node_builder.install_manual_seal(ManualSealConfiguration {
380
148
            block_import: parachain_block_import,
381
148
            sealing,
382
148
            soft_deadline: None,
383
148
            select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()),
384
148
            consensus_data_provider: Some(Box::new(
385
148
                tc_consensus::ContainerManualSealAuraConsensusDataProvider::new(
386
148
                    SlotDuration::from_millis(
387
148
                        container_chain_template_frontier_runtime::SLOT_DURATION,
388
148
                    ),
389
148
                    authorities.clone(),
390
148
                ),
391
148
            )),
392
3036
            create_inherent_data_providers: move |block: H256, ()| {
393
3036
                let current_para_block = client
394
3036
                    .number(block)
395
3036
                    .expect("Header lookup should succeed")
396
3036
                    .expect("Header passed in as parent should be present in backend.");
397
3036

            
398
3036
                let hash = client
399
3036
                    .hash(current_para_block.saturating_sub(1))
400
3036
                    .expect("Hash of the desired block must be present")
401
3036
                    .expect("Hash of the desired block should exist");
402
3036

            
403
3036
                let para_header = client
404
3036
                    .expect_header(hash)
405
3036
                    .expect("Expected parachain header should exist")
406
3036
                    .encode();
407
3036

            
408
3036
                let para_head_data: Vec<u8> = HeadData(para_header).encode();
409
3036
                let client_set_aside_for_cidp = client.clone();
410
3036
                let client_for_xcm = client.clone();
411
3036
                let authorities_for_cidp = authorities.clone();
412
3036
                let para_head_key = RelayWellKnownKeys::para_head(para_id);
413
3036
                let relay_slot_key = RelayWellKnownKeys::CURRENT_SLOT.to_vec();
414
3036
                let slot_duration = container_chain_template_frontier_runtime::SLOT_DURATION;
415
3036

            
416
3036
                let mut timestamp = 0u64;
417
3036
                TIMESTAMP.with(|x| {
418
3036
                    timestamp = x.clone().take();
419
3036
                });
420
3036

            
421
3036
                timestamp += slot_duration;
422
3036

            
423
3036
                let relay_slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
424
3036
						timestamp.into(),
425
3036
						SlotDuration::from_millis(slot_duration),
426
3036
                    );
427
3036
                let relay_slot = u64::from(*relay_slot);
428
3036

            
429
3036
                let downward_xcm_receiver = downward_xcm_receiver.clone();
430
3036
                let hrmp_xcm_receiver = hrmp_xcm_receiver.clone();
431

            
432
3036
                async move {
433
3036
                    let mocked_authorities_noting =
434
3036
                        ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider {
435
3036
                            current_para_block,
436
3036
                            relay_offset: 1000,
437
3036
                            relay_blocks_per_para_block: 2,
438
3036
                            orchestrator_para_id: crate::chain_spec::ORCHESTRATOR,
439
3036
                            container_para_id: para_id,
440
3036
                            authorities: authorities_for_cidp
441
3036
                    };
442
3036

            
443
3036
                    let mut additional_keys = mocked_authorities_noting.get_key_values();
444
3036
                    additional_keys.append(&mut vec![(para_head_key, para_head_data), (relay_slot_key, Slot::from(relay_slot).encode())]);
445
3036

            
446
3036
                    let time = MockTimestampInherentDataProvider;
447
3036
                    let current_para_head = client_set_aside_for_cidp
448
3036
                            .header(block)
449
3036
                            .expect("Header lookup should succeed")
450
3036
                            .expect("Header passed in as parent should be present in backend.");
451
3036
                    let should_send_go_ahead = match client_set_aside_for_cidp
452
3036
                            .runtime_api()
453
3036
                            .collect_collation_info(block, &current_para_head)
454
                    {
455
3036
                            Ok(info) => info.new_validation_code.is_some(),
456
                            Err(e) => {
457
                                    log::error!("Failed to collect collation info: {:?}", e);
458
                                    false
459
                            },
460
                    };
461
3036
                    let mocked_parachain = MockValidationDataInherentDataProvider {
462
3036
                        current_para_block,
463
3036
                        current_para_block_head: None,
464
3036
                        relay_offset: 1000,
465
3036
                        relay_blocks_per_para_block: 2,
466
3036
                        para_blocks_per_relay_epoch: 10,
467
3036
                        relay_randomness_config: (),
468
3036
                        xcm_config: MockXcmConfig::new(
469
3036
                            &*client_for_xcm,
470
3036
                            block,
471
3036
                            Default::default(),
472
3036
                        ),
473
3036
                        raw_downward_messages: downward_xcm_receiver.drain().collect(),
474
3036
                        raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(),
475
3036
                        additional_key_values: Some(additional_keys),
476
3036
                        para_id,
477
3036
                        upgrade_go_ahead: should_send_go_ahead.then(|| {
478
2
                            log::info!(
479
2
                                "Detected pending validation code, sending go-ahead signal."
480
                            );
481
2
                            UpgradeGoAhead::GoAhead
482
3036
                        }),
483
3036
                    };
484
3036

            
485
3036
                    Ok((time, mocked_parachain, mocked_authorities_noting))
486
3036
                }
487
3036
            },
488
148
        })?;
489
    }
490

            
491
148
    let frontier_backend = Arc::new(frontier_backend);
492
148

            
493
148
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
494
148
        task_manager: &node_builder.task_manager,
495
148
        client: node_builder.client.clone(),
496
148
        substrate_backend: node_builder.backend.clone(),
497
148
        frontier_backend: frontier_backend.clone(),
498
148
        filter_pool: filter_pool.clone(),
499
148
        overrides: overrides.clone(),
500
148
        fee_history_limit,
501
148
        fee_history_cache: fee_history_cache.clone(),
502
148
        sync_service: node_builder.network.sync_service.clone(),
503
148
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
504
148
    });
505
148

            
506
148
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
507
148
        node_builder.task_manager.spawn_handle(),
508
148
        overrides.clone(),
509
148
        rpc_config.eth_log_block_cache,
510
148
        rpc_config.eth_statuses_cache,
511
148
        node_builder.prometheus_registry.clone(),
512
148
    ));
513
148

            
514
148
    let rpc_builder = {
515
148
        let client = node_builder.client.clone();
516
148
        let pool = node_builder.transaction_pool.clone();
517
148
        let pubsub_notification_sinks = pubsub_notification_sinks;
518
148
        let network = node_builder.network.network.clone();
519
148
        let sync = node_builder.network.sync_service.clone();
520
148
        let filter_pool = filter_pool;
521
148
        let frontier_backend = frontier_backend.clone();
522
148
        let backend = node_builder.backend.clone();
523
148
        let max_past_logs = rpc_config.max_past_logs;
524
148
        let max_block_range = rpc_config.max_block_range;
525
148
        let overrides = overrides;
526
148
        let block_data_cache = block_data_cache;
527
148

            
528
296
        Box::new(move |subscription_task_executor| {
529
296
            let deps = crate::rpc::FullDeps {
530
296
                backend: backend.clone(),
531
296
                client: client.clone(),
532
296
                filter_pool: filter_pool.clone(),
533
296
                frontier_backend: match &*frontier_backend {
534
296
                    fc_db::Backend::KeyValue(b) => b.clone(),
535
                    fc_db::Backend::Sql(b) => b.clone(),
536
                },
537
296
                graph: pool.clone(),
538
296
                pool: pool.clone(),
539
296
                max_past_logs,
540
296
                max_block_range,
541
296
                fee_history_limit,
542
296
                fee_history_cache: fee_history_cache.clone(),
543
296
                network: network.clone(),
544
296
                sync: sync.clone(),
545
296
                block_data_cache: block_data_cache.clone(),
546
296
                overrides: overrides.clone(),
547
296
                is_authority: false,
548
296
                command_sink: command_sink.clone(),
549
296
                xcm_senders: xcm_senders.clone(),
550
296
            };
551
296
            crate::rpc::create_full(
552
296
                deps,
553
296
                subscription_task_executor,
554
296
                pubsub_notification_sinks.clone(),
555
296
            )
556
296
            .map_err(Into::into)
557
296
        })
558
    };
559

            
560
148
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
561

            
562
148
    log::info!("Development Service Ready");
563

            
564
148
    Ok(node_builder.task_manager)
565
148
}