1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
18

            
19
#[allow(deprecated)]
20
use {
21
    container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi},
22
    cumulus_client_cli::CollatorOptions,
23
    cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
24
    cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig},
25
    cumulus_client_service::{prepare_node_config, ParachainHostFunctions},
26
    cumulus_primitives_core::{
27
        relay_chain::well_known_keys as RelayWellKnownKeys, CollectCollationInfo, ParaId,
28
    },
29
    fc_consensus::FrontierBlockImport,
30
    fc_db::DatabaseSource,
31
    fc_rpc_core::types::{FeeHistoryCache, FilterPool},
32
    fc_storage::StorageOverrideHandler,
33
    nimbus_primitives::NimbusId,
34
    node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing},
35
    parity_scale_codec::Encode,
36
    polkadot_parachain_primitives::primitives::HeadData,
37
    polkadot_primitives::UpgradeGoAhead,
38
    sc_consensus::BasicQueue,
39
    sc_executor::WasmExecutor,
40
    sc_service::{Configuration, TFullBackend, TFullClient, TaskManager},
41
    sp_api::ProvideRuntimeApi,
42
    sp_blockchain::HeaderBackend,
43
    sp_consensus_slots::{Slot, SlotDuration},
44
    sp_core::{Pair, H256},
45
    std::{
46
        collections::BTreeMap,
47
        sync::{Arc, Mutex},
48
        time::Duration,
49
    },
50
};
51

            
52
type ParachainExecutor = WasmExecutor<ParachainHostFunctions>;
53
type ParachainClient = TFullClient<Block, RuntimeApi, ParachainExecutor>;
54
type ParachainBackend = TFullBackend<Block>;
55
type ParachainBlockImport = TParachainBlockImport<
56
    Block,
57
    FrontierBlockImport<Block, Arc<ParachainClient>, ParachainClient>,
58
    ParachainBackend,
59
>;
60

            
61
pub struct NodeConfig;
62
impl NodeBuilderConfig for NodeConfig {
63
    type Block = Block;
64
    type RuntimeApi = RuntimeApi;
65
    type ParachainExecutor = ParachainExecutor;
66
}
67

            
68
148
pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf {
69
148
    let config_dir = config
70
148
        .base_path
71
148
        .config_dir(config.chain_spec.id())
72
148
        .join("frontier")
73
148
        .join(path);
74
148

            
75
148
    config_dir
76
148
}
77

            
78
// TODO This is copied from frontier. It should be imported instead after
79
// https://github.com/paritytech/frontier/issues/333 is solved
80
148
pub fn open_frontier_backend<C>(
81
148
    client: Arc<C>,
82
148
    config: &Configuration,
83
148
) -> Result<fc_db::kv::Backend<Block, C>, String>
84
148
where
85
148
    C: sp_blockchain::HeaderBackend<Block>,
86
148
{
87
148
    fc_db::kv::Backend::<Block, _>::new(
88
148
        client,
89
148
        &fc_db::kv::DatabaseSettings {
90
148
            source: match config.database {
91
148
                DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
92
148
                    path: frontier_database_dir(config, "db"),
93
148
                    cache_size: 0,
94
148
                },
95
                DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
96
                    path: frontier_database_dir(config, "paritydb"),
97
                },
98
                DatabaseSource::Auto { .. } => DatabaseSource::Auto {
99
                    rocksdb_path: frontier_database_dir(config, "db"),
100
                    paritydb_path: frontier_database_dir(config, "paritydb"),
101
                    cache_size: 0,
102
                },
103
                _ => {
104
                    return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
105
                }
106
            },
107
        },
108
    )
109
148
}
110

            
111
thread_local!(static TIMESTAMP: std::cell::RefCell<u64> = const { std::cell::RefCell::new(0) });
112

            
113
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
114
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
115
struct MockTimestampInherentDataProvider;
116
#[async_trait::async_trait]
117
impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
118
    async fn provide_inherent_data(
119
        &self,
120
        inherent_data: &mut sp_inherents::InherentData,
121
3036
    ) -> Result<(), sp_inherents::Error> {
122
3036
        TIMESTAMP.with(|x| {
123
3036
            *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION;
124
3036
            inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
125
3036
        })
126
6072
    }
127

            
128
    async fn try_handle_error(
129
        &self,
130
        _identifier: &sp_inherents::InherentIdentifier,
131
        _error: &[u8],
132
    ) -> Option<Result<(), sp_inherents::Error>> {
133
        // The pallet never reports error.
134
        None
135
    }
136
}
137

            
138
148
pub fn import_queue(
139
148
    parachain_config: &Configuration,
140
148
    node_builder: &NodeBuilder<NodeConfig>,
141
148
) -> (ParachainBlockImport, BasicQueue<Block>) {
142
148
    let frontier_block_import =
143
148
        FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone());
144
148

            
145
148
    // The parachain block import and import queue
146
148
    let block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
147
148
        frontier_block_import,
148
148
        node_builder.backend.clone(),
149
148
    );
150
148
    let import_queue = nimbus_consensus::import_queue(
151
148
        node_builder.client.clone(),
152
148
        block_import.clone(),
153
148
        move |_, _| async move {
154
            let time = sp_timestamp::InherentDataProvider::from_system_time();
155

            
156
            Ok((time,))
157
148
        },
158
148
        &node_builder.task_manager.spawn_essential_handle(),
159
148
        parachain_config.prometheus_registry(),
160
148
        false,
161
148
        false,
162
148
    )
163
148
    .expect("function never fails");
164
148

            
165
148
    (block_import, import_queue)
166
148
}
167

            
168
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
169
///
170
/// This is the actual implementation that is abstract over the executor and the runtime api.
171
#[sc_tracing::logging::prefix_logs_with("Parachain")]
172
async fn start_node_impl(
173
    parachain_config: Configuration,
174
    polkadot_config: Configuration,
175
    collator_options: CollatorOptions,
176
    para_id: ParaId,
177
    rpc_config: crate::cli::RpcConfig,
178
    hwbench: Option<sc_sysinfo::HwBench>,
179
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
180
    let parachain_config = prepare_node_config(parachain_config);
181

            
182
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
183
    let mut node_builder = NodeConfig::new_builder(&parachain_config, hwbench.clone())?;
184

            
185
    // Frontier specific stuff
186
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
187
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
188
    let frontier_backend = fc_db::Backend::KeyValue(
189
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
190
    );
191
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
192
    let fee_history_limit = rpc_config.fee_history_limit;
193

            
194
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
195
        fc_mapping_sync::EthereumBlockNotification<Block>,
196
    > = Default::default();
197
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
198

            
199
    let (_, import_queue) = import_queue(&parachain_config, &node_builder);
200

            
201
    // Relay chain interface
202
    let (relay_chain_interface, _collator_key) = node_builder
203
        .build_relay_chain_interface(&parachain_config, polkadot_config, collator_options.clone())
204
        .await?;
205

            
206
    // Build cumulus network, allowing to access network-related services.
207
    let node_builder = node_builder
208
        .build_cumulus_network::<_, sc_network::NetworkWorker<_, _>>(
209
            &parachain_config,
210
            para_id,
211
            import_queue,
212
            relay_chain_interface.clone(),
213
        )
214
        .await?;
215

            
216
    let frontier_backend = Arc::new(frontier_backend);
217

            
218
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
219
        task_manager: &node_builder.task_manager,
220
        client: node_builder.client.clone(),
221
        substrate_backend: node_builder.backend.clone(),
222
        frontier_backend: frontier_backend.clone(),
223
        filter_pool: filter_pool.clone(),
224
        overrides: overrides.clone(),
225
        fee_history_limit,
226
        fee_history_cache: fee_history_cache.clone(),
227
        sync_service: node_builder.network.sync_service.clone(),
228
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
229
    });
230

            
231
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
232
        node_builder.task_manager.spawn_handle(),
233
        overrides.clone(),
234
        rpc_config.eth_log_block_cache,
235
        rpc_config.eth_statuses_cache,
236
        node_builder.prometheus_registry.clone(),
237
    ));
238

            
239
    let rpc_builder = {
240
        let client = node_builder.client.clone();
241
        let pool = node_builder.transaction_pool.clone();
242
        let pubsub_notification_sinks = pubsub_notification_sinks;
243
        let network = node_builder.network.network.clone();
244
        let sync = node_builder.network.sync_service.clone();
245
        let filter_pool = filter_pool.clone();
246
        let backend = node_builder.backend.clone();
247
        let max_past_logs = rpc_config.max_past_logs;
248
        let max_block_range = rpc_config.max_block_range;
249
        let overrides = overrides;
250
        let fee_history_cache = fee_history_cache.clone();
251
        let block_data_cache = block_data_cache;
252
        let frontier_backend = frontier_backend.clone();
253

            
254
        Box::new(move |subscription_task_executor| {
255
            let deps = crate::rpc::FullDeps {
256
                backend: backend.clone(),
257
                client: client.clone(),
258
                filter_pool: filter_pool.clone(),
259
                frontier_backend: match &*frontier_backend {
260
                    fc_db::Backend::KeyValue(b) => b.clone(),
261
                    fc_db::Backend::Sql(b) => b.clone(),
262
                },
263
                graph: pool.clone(),
264
                pool: pool.clone(),
265
                max_past_logs,
266
                max_block_range,
267
                fee_history_limit,
268
                fee_history_cache: fee_history_cache.clone(),
269
                network: Arc::new(network.clone()),
270
                sync: sync.clone(),
271
                block_data_cache: block_data_cache.clone(),
272
                overrides: overrides.clone(),
273
                is_authority: false,
274
                command_sink: None,
275
                xcm_senders: None,
276
            };
277
            crate::rpc::create_full(
278
                deps,
279
                subscription_task_executor,
280
                pubsub_notification_sinks.clone(),
281
            )
282
            .map_err(Into::into)
283
        })
284
    };
285

            
286
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
287

            
288
    let relay_chain_slot_duration = Duration::from_secs(6);
289
    let node_builder = node_builder.start_full_node(
290
        para_id,
291
        relay_chain_interface.clone(),
292
        relay_chain_slot_duration,
293
    )?;
294

            
295
    Ok((node_builder.task_manager, node_builder.client))
296
}
297

            
298
/// Start a parachain node.
299
pub async fn start_parachain_node(
300
    parachain_config: Configuration,
301
    polkadot_config: Configuration,
302
    collator_options: CollatorOptions,
303
    para_id: ParaId,
304
    rpc_config: crate::cli::RpcConfig,
305
    hwbench: Option<sc_sysinfo::HwBench>,
306
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
307
    start_node_impl(
308
        parachain_config,
309
        polkadot_config,
310
        collator_options,
311
        para_id,
312
        rpc_config,
313
        hwbench,
314
    )
315
    .await
316
}
317

            
318
/// Helper function to generate a crypto pair from seed
319
148
fn get_aura_id_from_seed(seed: &str) -> NimbusId {
320
148
    sp_core::sr25519::Pair::from_string(&format!("//{}", seed), None)
321
148
        .expect("static values are valid; qed")
322
148
        .public()
323
148
        .into()
324
148
}
325

            
326
/// Builds a new development service. This service uses manual seal, and mocks
327
/// the parachain inherent.
328
148
pub async fn start_dev_node(
329
148
    parachain_config: Configuration,
330
148
    sealing: Sealing,
331
148
    rpc_config: crate::cli::RpcConfig,
332
148
    para_id: ParaId,
333
148
    hwbench: Option<sc_sysinfo::HwBench>,
334
148
) -> Result<TaskManager, sc_service::error::Error> {
335
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
336
148
    let node_builder = NodeConfig::new_builder(&parachain_config, hwbench)?;
337

            
338
    // Frontier specific stuff
339
148
    let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
340
148
    let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
341
148
    let frontier_backend = fc_db::Backend::KeyValue(
342
148
        open_frontier_backend(node_builder.client.clone(), &parachain_config)?.into(),
343
148
    );
344
148
    let overrides = Arc::new(StorageOverrideHandler::new(node_builder.client.clone()));
345
148
    let fee_history_limit = rpc_config.fee_history_limit;
346
148

            
347
148
    let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
348
148
        fc_mapping_sync::EthereumBlockNotification<Block>,
349
148
    > = Default::default();
350
148
    let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
351
148

            
352
148
    let (parachain_block_import, import_queue) = import_queue(&parachain_config, &node_builder);
353

            
354
    // Build a Substrate Network. (not cumulus since it is a dev node, it mocks
355
    // the relaychain)
356
148
    let mut node_builder = node_builder
357
148
        .build_substrate_network::<sc_network::NetworkWorker<_, _>>(
358
148
            &parachain_config,
359
148
            import_queue,
360
148
        )?;
361

            
362
148
    let mut command_sink = None;
363
148
    let mut xcm_senders = None;
364
148

            
365
148
    if parachain_config.role.is_authority() {
366
148
        let client = node_builder.client.clone();
367
148
        let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::<Vec<u8>>(100);
368
148
        let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec<u8>)>(100);
369
148
        xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender));
370
148

            
371
148
        let authorities = vec![get_aura_id_from_seed("alice")];
372

            
373
148
        command_sink = node_builder.install_manual_seal(ManualSealConfiguration {
374
148
            block_import: parachain_block_import,
375
148
            sealing,
376
148
            soft_deadline: None,
377
148
            select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()),
378
148
            consensus_data_provider: Some(Box::new(
379
148
                tc_consensus::ContainerManualSealAuraConsensusDataProvider::new(
380
148
                    SlotDuration::from_millis(
381
148
                        container_chain_template_frontier_runtime::SLOT_DURATION,
382
148
                    ),
383
148
                    authorities.clone(),
384
148
                ),
385
148
            )),
386
3036
            create_inherent_data_providers: move |block: H256, ()| {
387
3036
                let current_para_block = client
388
3036
                    .number(block)
389
3036
                    .expect("Header lookup should succeed")
390
3036
                    .expect("Header passed in as parent should be present in backend.");
391
3036

            
392
3036
                let hash = client
393
3036
                    .hash(current_para_block.saturating_sub(1))
394
3036
                    .expect("Hash of the desired block must be present")
395
3036
                    .expect("Hash of the desired block should exist");
396
3036

            
397
3036
                let para_header = client
398
3036
                    .expect_header(hash)
399
3036
                    .expect("Expected parachain header should exist")
400
3036
                    .encode();
401
3036

            
402
3036
                let para_head_data: Vec<u8> = HeadData(para_header).encode();
403
3036
                let client_set_aside_for_cidp = client.clone();
404
3036
                let client_for_xcm = client.clone();
405
3036
                let authorities_for_cidp = authorities.clone();
406
3036
                let para_head_key = RelayWellKnownKeys::para_head(para_id);
407
3036
                let relay_slot_key = RelayWellKnownKeys::CURRENT_SLOT.to_vec();
408
3036
                let slot_duration = container_chain_template_frontier_runtime::SLOT_DURATION;
409
3036

            
410
3036
                let mut timestamp = 0u64;
411
3036
                TIMESTAMP.with(|x| {
412
3036
                    timestamp = x.clone().take();
413
3036
                });
414
3036

            
415
3036
                timestamp += slot_duration;
416
3036

            
417
3036
                let relay_slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
418
3036
						timestamp.into(),
419
3036
						SlotDuration::from_millis(slot_duration),
420
3036
                    );
421
3036
                let relay_slot = u64::from(*relay_slot);
422
3036

            
423
3036
                let downward_xcm_receiver = downward_xcm_receiver.clone();
424
3036
                let hrmp_xcm_receiver = hrmp_xcm_receiver.clone();
425

            
426
3036
                async move {
427
3036
                    let mocked_authorities_noting =
428
3036
                        ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider {
429
3036
                            current_para_block,
430
3036
                            relay_offset: 1000,
431
3036
                            relay_blocks_per_para_block: 2,
432
3036
                            orchestrator_para_id: crate::chain_spec::ORCHESTRATOR,
433
3036
                            container_para_id: para_id,
434
3036
                            authorities: authorities_for_cidp
435
3036
                    };
436
3036

            
437
3036
                    let mut additional_keys = mocked_authorities_noting.get_key_values();
438
3036
                    additional_keys.append(&mut vec![(para_head_key, para_head_data), (relay_slot_key, Slot::from(relay_slot).encode())]);
439
3036

            
440
3036
                    let time = MockTimestampInherentDataProvider;
441
3036
                    let current_para_head = client_set_aside_for_cidp
442
3036
                            .header(block)
443
3036
                            .expect("Header lookup should succeed")
444
3036
                            .expect("Header passed in as parent should be present in backend.");
445
3036
                    let should_send_go_ahead = match client_set_aside_for_cidp
446
3036
                            .runtime_api()
447
3036
                            .collect_collation_info(block, &current_para_head)
448
                    {
449
3036
                            Ok(info) => info.new_validation_code.is_some(),
450
                            Err(e) => {
451
                                    log::error!("Failed to collect collation info: {:?}", e);
452
                                    false
453
                            },
454
                    };
455
3036
                    let mocked_parachain = MockValidationDataInherentDataProvider {
456
3036
                        current_para_block,
457
3036
                        current_para_block_head: None,
458
3036
                        relay_offset: 1000,
459
3036
                        relay_blocks_per_para_block: 2,
460
3036
                        para_blocks_per_relay_epoch: 10,
461
3036
                        relay_randomness_config: (),
462
3036
                        xcm_config: MockXcmConfig::new(
463
3036
                            &*client_for_xcm,
464
3036
                            block,
465
3036
                            Default::default(),
466
3036
                        ),
467
3036
                        raw_downward_messages: downward_xcm_receiver.drain().collect(),
468
3036
                        raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(),
469
3036
                        additional_key_values: Some(additional_keys),
470
3036
                        para_id,
471
3036
                        upgrade_go_ahead: should_send_go_ahead.then(|| {
472
2
                            log::info!(
473
2
                                "Detected pending validation code, sending go-ahead signal."
474
                            );
475
2
                            UpgradeGoAhead::GoAhead
476
3036
                        }),
477
3036
                    };
478
3036

            
479
3036
                    Ok((time, mocked_parachain, mocked_authorities_noting))
480
3036
                }
481
3036
            },
482
148
        })?;
483
    }
484

            
485
148
    let frontier_backend = Arc::new(frontier_backend);
486
148

            
487
148
    crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams {
488
148
        task_manager: &node_builder.task_manager,
489
148
        client: node_builder.client.clone(),
490
148
        substrate_backend: node_builder.backend.clone(),
491
148
        frontier_backend: frontier_backend.clone(),
492
148
        filter_pool: filter_pool.clone(),
493
148
        overrides: overrides.clone(),
494
148
        fee_history_limit,
495
148
        fee_history_cache: fee_history_cache.clone(),
496
148
        sync_service: node_builder.network.sync_service.clone(),
497
148
        pubsub_notification_sinks: pubsub_notification_sinks.clone(),
498
148
    });
499
148

            
500
148
    let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
501
148
        node_builder.task_manager.spawn_handle(),
502
148
        overrides.clone(),
503
148
        rpc_config.eth_log_block_cache,
504
148
        rpc_config.eth_statuses_cache,
505
148
        node_builder.prometheus_registry.clone(),
506
148
    ));
507
148

            
508
148
    let rpc_builder = {
509
148
        let client = node_builder.client.clone();
510
148
        let pool = node_builder.transaction_pool.clone();
511
148
        let pubsub_notification_sinks = pubsub_notification_sinks;
512
148
        let network = node_builder.network.network.clone();
513
148
        let sync = node_builder.network.sync_service.clone();
514
148
        let filter_pool = filter_pool;
515
148
        let frontier_backend = frontier_backend.clone();
516
148
        let backend = node_builder.backend.clone();
517
148
        let max_past_logs = rpc_config.max_past_logs;
518
148
        let max_block_range = rpc_config.max_block_range;
519
148
        let overrides = overrides;
520
148
        let block_data_cache = block_data_cache;
521
148

            
522
296
        Box::new(move |subscription_task_executor| {
523
296
            let deps = crate::rpc::FullDeps {
524
296
                backend: backend.clone(),
525
296
                client: client.clone(),
526
296
                filter_pool: filter_pool.clone(),
527
296
                frontier_backend: match &*frontier_backend {
528
296
                    fc_db::Backend::KeyValue(b) => b.clone(),
529
                    fc_db::Backend::Sql(b) => b.clone(),
530
                },
531
296
                graph: pool.clone(),
532
296
                pool: pool.clone(),
533
296
                max_past_logs,
534
296
                max_block_range,
535
296
                fee_history_limit,
536
296
                fee_history_cache: fee_history_cache.clone(),
537
296
                network: network.clone(),
538
296
                sync: sync.clone(),
539
296
                block_data_cache: block_data_cache.clone(),
540
296
                overrides: overrides.clone(),
541
296
                is_authority: false,
542
296
                command_sink: command_sink.clone(),
543
296
                xcm_senders: xcm_senders.clone(),
544
296
            };
545
296
            crate::rpc::create_full(
546
296
                deps,
547
296
                subscription_task_executor,
548
296
                pubsub_notification_sinks.clone(),
549
296
            )
550
296
            .map_err(Into::into)
551
296
        })
552
    };
553

            
554
148
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
555

            
556
148
    log::info!("Development Service Ready");
557

            
558
148
    Ok(node_builder.task_manager)
559
148
}