1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>
16

            
17
//! Development Polkadot service. Adapted from `polkadot_service` crate
18
//! and removed un-necessary components which are not required in dev node.
19
//!
20
//! Following major changes are made:
21
//! 1. Removed beefy and grandpa notification service and request response protocols
22
//! 2. Removed support for parachains which also eliminated the need to start overseer and all other subsystems associated with collation + network request/response protocols for the same
23
//! 3. Removed support for hardware benchmarking
24
//! 4. Removed authority discovery service
25
//! 5. Removed spawning of beefy, grandpa and MMR worker
26
//! 6. Removed rpc extensions for beefy, grandpa and babe and added support for manual seal
27
//! 7. Removed beefy and grandpa block import from block import pipeline (Babe remains)
28
//! 8. Using manual seal import queue instead of babe import queue
29
//! 9. Started manual seal worker
30
//! 10. If amount of time passed between two block is less than slot duration, we emulate passing of time babe block import and runtime
31
//!     by incrementing timestamp by slot duration.
32

            
33
use {
34
    async_io::Timer,
35
    babe::{BabeBlockImport, BabeLink},
36
    codec::{Decode, Encode},
37
    consensus_common::SelectChain,
38
    dancelight_runtime::RuntimeApi,
39
    futures::{Stream, StreamExt},
40
    jsonrpsee::RpcModule,
41
    node_common::service::Sealing,
42
    polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce},
43
    polkadot_node_core_parachains_inherent::Error as InherentError,
44
    polkadot_overseer::Handle,
45
    polkadot_primitives::InherentData as ParachainsInherentData,
46
    polkadot_rpc::{DenyUnsafe, RpcExtension},
47
    polkadot_service::{
48
        BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain,
49
    },
50
    sc_client_api::{AuxStore, Backend},
51
    sc_consensus_manual_seal::{
52
        consensus::babe::BabeConsensusDataProvider,
53
        rpc::{ManualSeal, ManualSealApiServer},
54
        run_manual_seal, EngineCommand, ManualSealParams,
55
    },
56
    sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
57
    sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool},
58
    service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager},
59
    sp_api::ProvideRuntimeApi,
60
    sp_block_builder::BlockBuilder,
61
    sp_blockchain::{HeaderBackend, HeaderMetadata},
62
    sp_consensus_babe::SlotDuration,
63
    std::{cmp::max, ops::Add, sync::Arc, time::Duration},
64
    telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
65
};
66

            
67
pub type FullBackend = service::TFullBackend<Block>;
68

            
69
pub type FullClient = service::TFullClient<
70
    Block,
71
    RuntimeApi,
72
    WasmExecutor<(
73
        sp_io::SubstrateHostFunctions,
74
        frame_benchmarking::benchmarking::HostFunctions,
75
    )>,
76
>;
77

            
78
pub struct NewFull {
79
    pub task_manager: TaskManager,
80
    pub client: Arc<FullClient>,
81
    pub overseer_handle: Option<Handle>,
82
    pub network: Arc<dyn sc_network::service::traits::NetworkService>,
83
    pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
84
    pub rpc_handlers: RpcHandlers,
85
    pub backend: Arc<FullBackend>,
86
}
87

            
88
/// Custom Deps for dev Rpc extension
89
struct DevDeps<C, P> {
90
    /// The client instance to use.
91
    pub client: Arc<C>,
92
    /// Transaction pool instance.
93
    pub pool: Arc<P>,
94
    /// A copy of the chain spec.
95
    pub chain_spec: Box<dyn sc_chain_spec::ChainSpec>,
96
    /// Whether to deny unsafe calls
97
    pub deny_unsafe: DenyUnsafe,
98
    /// Manual seal command sink
99
    pub command_sink: Option<futures::channel::mpsc::Sender<EngineCommand<Hash>>>,
100
}
101

            
102
714
fn create_dev_rpc_extension<C, P>(
103
714
    DevDeps {
104
714
        client,
105
714
        pool,
106
714
        chain_spec,
107
714
        deny_unsafe,
108
714
        command_sink: maybe_command_sink,
109
714
    }: DevDeps<C, P>,
110
714
) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
111
714
where
112
714
    C: ProvideRuntimeApi<Block>
113
714
        + HeaderBackend<Block>
114
714
        + AuxStore
115
714
        + HeaderMetadata<Block, Error = sp_blockchain::Error>
116
714
        + Send
117
714
        + Sync
118
714
        + 'static,
119
714
    C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
120
714
    C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
121
714
    C::Api: BlockBuilder<Block>,
122
714
    P: TransactionPool + Sync + Send + 'static,
123
714
{
124
    use {
125
        pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer},
126
        sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer},
127
        substrate_frame_rpc_system::{System, SystemApiServer},
128
    };
129

            
130
714
    let mut io = RpcModule::new(());
131
714

            
132
714
    let chain_name = chain_spec.name().to_string();
133
714
    let genesis_hash = client
134
714
        .hash(0)
135
714
        .ok()
136
714
        .flatten()
137
714
        .expect("Genesis block exists; qed");
138
714
    let properties = chain_spec.properties();
139
714

            
140
714
    io.merge(ChainSpec::new(chain_name, genesis_hash, properties).into_rpc())?;
141
714
    io.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
142
714
    io.merge(TransactionPayment::new(client.clone()).into_rpc())?;
143

            
144
714
    if let Some(command_sink) = maybe_command_sink {
145
714
        io.merge(ManualSeal::new(command_sink).into_rpc())?;
146
    }
147

            
148
714
    Ok(io)
149
714
}
150

            
151
/// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block
152
/// to satisfy runtime
153
struct EmptyParachainsInherentDataProvider<C: HeaderBackend<Block>> {
154
    pub client: Arc<C>,
155
    pub parent: Hash,
156
}
157

            
158
/// Copied from polkadot service just so that this code retains same structure as
159
/// polkadot_service crate.
160
struct Basics {
161
    task_manager: TaskManager,
162
    client: Arc<FullClient>,
163
    backend: Arc<FullBackend>,
164
    keystore_container: KeystoreContainer,
165
    telemetry: Option<Telemetry>,
166
}
167

            
168
impl<C: HeaderBackend<Block>> EmptyParachainsInherentDataProvider<C> {
169
15778
    pub fn new(client: Arc<C>, parent: Hash) -> Self {
170
15778
        EmptyParachainsInherentDataProvider { client, parent }
171
15778
    }
172

            
173
15778
    pub async fn create(
174
15778
        client: Arc<C>,
175
15778
        parent: Hash,
176
15778
    ) -> Result<ParachainsInherentData, InherentError> {
177
15778
        let parent_header = match client.header(parent) {
178
15778
            Ok(Some(h)) => h,
179
            Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)),
180
            Err(err) => return Err(InherentError::Blockchain(err)),
181
        };
182

            
183
15778
        Ok(ParachainsInherentData {
184
15778
            bitfields: Vec::new(),
185
15778
            backed_candidates: Vec::new(),
186
15778
            disputes: Vec::new(),
187
15778
            parent_header,
188
15778
        })
189
15778
    }
190
}
191

            
192
#[async_trait::async_trait]
193
impl<C: HeaderBackend<Block>> sp_inherents::InherentDataProvider
194
    for EmptyParachainsInherentDataProvider<C>
195
{
196
    async fn provide_inherent_data(
197
        &self,
198
        dst_inherent_data: &mut sp_inherents::InherentData,
199
15778
    ) -> Result<(), sp_inherents::Error> {
200
15778
        let inherent_data =
201
15778
            EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent)
202
                .await
203
15778
                .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?;
204

            
205
15778
        dst_inherent_data.put_data(
206
15778
            polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER,
207
15778
            &inherent_data,
208
15778
        )
209
31556
    }
210

            
211
    async fn try_handle_error(
212
        &self,
213
        _identifier: &sp_inherents::InherentIdentifier,
214
        _error: &[u8],
215
    ) -> Option<Result<(), sp_inherents::Error>> {
216
        // Inherent isn't checked and can not return any error
217
        None
218
    }
219
}
220

            
221
/// Creates new development full node with manual seal
222
357
pub fn build_full<OverseerGenerator: OverseerGen>(
223
357
    sealing: Sealing,
224
357
    config: Configuration,
225
357
    mut params: NewFullParams<OverseerGenerator>,
226
357
) -> Result<NewFull, Error> {
227
357
    let is_polkadot = config.chain_spec.is_polkadot();
228
357

            
229
357
    params.overseer_message_channel_capacity_override = params
230
357
        .overseer_message_channel_capacity_override
231
357
        .map(move |capacity| {
232
            if is_polkadot {
233
                gum::warn!("Channel capacity should _never_ be tampered with on polkadot!");
234
            }
235
            capacity
236
357
        });
237
357

            
238
357
    match config.network.network_backend {
239
        sc_network::config::NetworkBackendType::Libp2p => {
240
357
            new_full::<_, sc_network::NetworkWorker<Block, Hash>>(sealing, config, params)
241
        }
242
        sc_network::config::NetworkBackendType::Litep2p => {
243
            new_full::<_, sc_network::Litep2pNetworkBackend>(sealing, config, params)
244
        }
245
    }
246
357
}
247

            
248
/// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by
249
/// slot duration from previous timestamp or current timestamp if in reality more time is passed.
250
15778
fn get_next_timestamp(
251
15778
    client: Arc<FullClient>,
252
15778
    slot_duration: SlotDuration,
253
15778
) -> sp_timestamp::InherentDataProvider {
254
    const TIMESTAMP_AUX_KEY: &[u8] = b"__DEV_TIMESTAMP";
255

            
256
15778
    let maybe_last_timestamp = client
257
15778
        .get_aux(TIMESTAMP_AUX_KEY)
258
15778
        .expect("Should be able to query aux storage; qed");
259
15778
    if let Some(last_timestamp) = maybe_last_timestamp {
260
15463
        let last_inherent_data = sp_timestamp::InherentType::decode(&mut last_timestamp.as_slice())
261
15463
            .expect("Timestamp data must be decoded; qed");
262
15463
        let new_inherent_data: sp_timestamp::InherentType = max(
263
15463
            last_inherent_data.add(slot_duration.as_millis()),
264
15463
            sp_timestamp::InherentType::current(),
265
15463
        );
266
15463
        client
267
15463
            .insert_aux(
268
15463
                &[(TIMESTAMP_AUX_KEY, new_inherent_data.encode().as_slice())],
269
15463
                &[],
270
15463
            )
271
15463
            .expect("Should be able to write to aux storage; qed");
272
15463
        sp_timestamp::InherentDataProvider::new(new_inherent_data)
273
    } else {
274
315
        let current_timestamp = sp_timestamp::InherentType::current();
275
315
        client
276
315
            .insert_aux(
277
315
                &[(TIMESTAMP_AUX_KEY, current_timestamp.encode().as_slice())],
278
315
                &[],
279
315
            )
280
315
            .expect("Should be able to write to aux storage; qed");
281
315
        sp_timestamp::InherentDataProvider::new(current_timestamp)
282
    }
283
15778
}
284

            
285
357
fn new_full<
286
357
    OverseerGenerator: OverseerGen,
287
357
    Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
288
357
>(
289
357
    sealing: Sealing,
290
357
    mut config: Configuration,
291
357
    NewFullParams {
292
357
        telemetry_worker_handle,
293
357
        ..
294
357
    }: NewFullParams<OverseerGenerator>,
295
357
) -> Result<NewFull, Error> {
296
357
    let role = config.role.clone();
297

            
298
357
    let basics = new_partial_basics(&mut config, telemetry_worker_handle)?;
299

            
300
357
    let prometheus_registry = config.prometheus_registry().cloned();
301
357

            
302
357
    let keystore = basics.keystore_container.local_keystore();
303
357

            
304
357
    let select_chain = SelectRelayChain::new_longest_chain(basics.backend.clone());
305

            
306
357
    let service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> {
307
357
        client,
308
357
        backend,
309
357
        mut task_manager,
310
357
        keystore_container,
311
357
        select_chain,
312
357
        import_queue,
313
357
        transaction_pool,
314
357
        other: (block_import, babe_link, slot_duration, mut telemetry),
315
357
    } = new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
316

            
317
357
    let metrics = Network::register_notification_metrics(
318
357
        config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
319
357
    );
320
357

            
321
357
    let net_config =
322
357
        sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network);
323

            
324
357
    let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
325
357
        service::build_network(service::BuildNetworkParams {
326
357
            config: &config,
327
357
            net_config,
328
357
            client: client.clone(),
329
357
            transaction_pool: transaction_pool.clone(),
330
357
            spawn_handle: task_manager.spawn_handle(),
331
357
            import_queue,
332
357
            block_announce_validator_builder: None,
333
357
            warp_sync_params: None,
334
357
            block_relay: None,
335
357
            metrics,
336
357
        })?;
337

            
338
357
    if config.offchain_worker.enabled {
339
357
        use futures::FutureExt;
340
357

            
341
357
        task_manager.spawn_handle().spawn(
342
357
            "offchain-workers-runner",
343
357
            "offchain-work",
344
357
            sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
345
357
                runtime_api_provider: client.clone(),
346
357
                keystore: Some(keystore_container.keystore()),
347
357
                offchain_db: backend.offchain_storage(),
348
357
                transaction_pool: Some(OffchainTransactionPoolFactory::new(
349
357
                    transaction_pool.clone(),
350
357
                )),
351
357
                network_provider: Arc::new(network.clone()),
352
357
                is_validator: role.is_authority(),
353
357
                enable_http_requests: false,
354
15778
                custom_extensions: move |_| vec![],
355
357
            })
356
357
            .run(client.clone(), task_manager.spawn_handle())
357
357
            .boxed(),
358
357
        );
359
357
    }
360

            
361
357
    let mut command_sink = None;
362
357

            
363
357
    if role.is_authority() {
364
357
        let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording(
365
357
            task_manager.spawn_handle(),
366
357
            client.clone(),
367
357
            transaction_pool.clone(),
368
357
            prometheus_registry.as_ref(),
369
357
            telemetry.as_ref().map(|x| x.handle()),
370
357
        );
371

            
372
357
        let commands_stream: Box<
373
357
            dyn Stream<Item = EngineCommand<<Block as BlockT>::Hash>> + Send + Sync + Unpin,
374
357
        > = match sealing {
375
            Sealing::Instant => {
376
                Box::new(
377
                    // This bit cribbed from the implementation of instant seal.
378
                    transaction_pool
379
                        .pool()
380
                        .validated_pool()
381
                        .import_notification_stream()
382
                        .map(|_| EngineCommand::SealNewBlock {
383
                            create_empty: false,
384
                            finalize: false,
385
                            parent_hash: None,
386
                            sender: None,
387
                        }),
388
                )
389
            }
390
            Sealing::Manual => {
391
357
                let (sink, stream) = futures::channel::mpsc::channel(1000);
392
357
                // Keep a reference to the other end of the channel. It goes to the RPC.
393
357
                command_sink = Some(sink);
394
357
                Box::new(stream)
395
            }
396
            Sealing::Interval(millis) => Box::new(StreamExt::map(
397
                Timer::interval(Duration::from_millis(millis)),
398
                |_| EngineCommand::SealNewBlock {
399
                    create_empty: true,
400
                    finalize: true,
401
                    parent_hash: None,
402
                    sender: None,
403
                },
404
            )),
405
        };
406

            
407
357
        let babe_config = babe_link.config();
408
357
        let babe_consensus_provider = BabeConsensusDataProvider::new(
409
357
            client.clone(),
410
357
            keystore,
411
357
            babe_link.epoch_changes().clone(),
412
357
            babe_config.authorities.clone(),
413
357
        )
414
357
        .map_err(|babe_error| {
415
            Error::Consensus(consensus_common::Error::Other(babe_error.into()))
416
357
        })?;
417

            
418
        // Need to clone it and store here to avoid moving of `client`
419
        // variable in closure below.
420
357
        let client_clone = client.clone();
421
357
        task_manager.spawn_essential_handle().spawn_blocking(
422
357
            "authorship_task",
423
357
            Some("block-authoring"),
424
357
            run_manual_seal(ManualSealParams {
425
357
                block_import,
426
357
                env: proposer,
427
357
                client: client.clone(),
428
357
                pool: transaction_pool.clone(),
429
357
                commands_stream,
430
357
                select_chain,
431
15778
                create_inherent_data_providers: move |parent, ()| {
432
15778
                    let client_clone = client_clone.clone();
433

            
434
15778
                    async move {
435
15778
                        let parachain =
436
15778
                            EmptyParachainsInherentDataProvider::new(
437
15778
                                client_clone.clone(),
438
15778
                                parent,
439
15778
                            );
440
15778

            
441
15778
                        let timestamp = get_next_timestamp(client_clone, slot_duration);
442
15778

            
443
15778
                        let slot =
444
15778
                            sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
445
15778
                                *timestamp,
446
15778
                                slot_duration,
447
15778
                            );
448
15778

            
449
15778
                        Ok((slot, timestamp, parachain))
450
15778
                    }
451
15778
                },
452
357
                consensus_data_provider: Some(Box::new(babe_consensus_provider)),
453
357
            }),
454
357
        );
455
357
    }
456

            
457
357
    let rpc_extensions_builder = {
458
357
        let client = client.clone();
459
357
        let transaction_pool = transaction_pool.clone();
460
357
        let chain_spec = config.chain_spec.cloned_box();
461

            
462
        move |deny_unsafe,
463
              _subscription_executor: polkadot_rpc::SubscriptionTaskExecutor|
464
714
              -> Result<RpcExtension, service::Error> {
465
714
            let deps = DevDeps {
466
714
                client: client.clone(),
467
714
                pool: transaction_pool.clone(),
468
714
                chain_spec: chain_spec.cloned_box(),
469
714
                deny_unsafe,
470
714
                command_sink: command_sink.clone(),
471
714
            };
472
714

            
473
714
            create_dev_rpc_extension(deps).map_err(Into::into)
474
714
        }
475
    };
476

            
477
357
    let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
478
357
        config,
479
357
        backend: backend.clone(),
480
357
        client: client.clone(),
481
357
        keystore: keystore_container.keystore(),
482
357
        network: network.clone(),
483
357
        sync_service: sync_service.clone(),
484
357
        rpc_builder: Box::new(rpc_extensions_builder),
485
357
        transaction_pool: transaction_pool.clone(),
486
357
        task_manager: &mut task_manager,
487
357
        system_rpc_tx,
488
357
        tx_handler_controller,
489
357
        telemetry: telemetry.as_mut(),
490
357
    })?;
491

            
492
357
    network_starter.start_network();
493
357

            
494
357
    Ok(NewFull {
495
357
        task_manager,
496
357
        client,
497
357
        overseer_handle: None,
498
357
        network,
499
357
        sync_service,
500
357
        rpc_handlers,
501
357
        backend,
502
357
    })
503
357
}
504

            
505
357
fn new_partial<ChainSelection>(
506
357
    config: &mut Configuration,
507
357
    Basics {
508
357
        task_manager,
509
357
        backend,
510
357
        client,
511
357
        keystore_container,
512
357
        telemetry,
513
357
    }: Basics,
514
357
    select_chain: ChainSelection,
515
357
) -> Result<
516
357
    service::PartialComponents<
517
357
        FullClient,
518
357
        FullBackend,
519
357
        ChainSelection,
520
357
        sc_consensus::DefaultImportQueue<Block>,
521
357
        sc_transaction_pool::FullPool<Block, FullClient>,
522
357
        (
523
357
            BabeBlockImport<Block, FullClient, Arc<FullClient>>,
524
357
            BabeLink<Block>,
525
357
            SlotDuration,
526
357
            Option<Telemetry>,
527
357
        ),
528
357
    >,
529
357
    Error,
530
357
>
531
357
where
532
357
    ChainSelection: 'static + SelectChain<Block>,
533
357
{
534
357
    let transaction_pool = sc_transaction_pool::BasicPool::new_full(
535
357
        config.transaction_pool.clone(),
536
357
        config.role.is_authority().into(),
537
357
        config.prometheus_registry(),
538
357
        task_manager.spawn_essential_handle(),
539
357
        client.clone(),
540
357
    );
541

            
542
    // Create babe block import queue; this is required to have correct epoch data
543
    // available for manual seal to produce block
544
357
    let babe_config = babe::configuration(&*client)?;
545
357
    let (babe_block_import, babe_link) =
546
357
        babe::block_import(babe_config.clone(), client.clone(), client.clone())?;
547
357
    let slot_duration = babe_link.config().slot_duration();
548
357

            
549
357
    // Create manual seal block import with manual seal block import queue
550
357
    let import_queue = sc_consensus_manual_seal::import_queue(
551
357
        Box::new(babe_block_import.clone()),
552
357
        &task_manager.spawn_essential_handle(),
553
357
        config.prometheus_registry(),
554
357
    );
555
357

            
556
357
    Ok(service::PartialComponents {
557
357
        client,
558
357
        backend,
559
357
        task_manager,
560
357
        keystore_container,
561
357
        select_chain,
562
357
        import_queue,
563
357
        transaction_pool,
564
357
        other: (babe_block_import, babe_link, slot_duration, telemetry),
565
357
    })
566
357
}
567

            
568
357
fn new_partial_basics(
569
357
    config: &mut Configuration,
570
357
    telemetry_worker_handle: Option<TelemetryWorkerHandle>,
571
357
) -> Result<Basics, Error> {
572
357
    let telemetry = config
573
357
        .telemetry_endpoints
574
357
        .clone()
575
357
        .filter(|x| !x.is_empty())
576
357
        .map(move |endpoints| -> Result<_, telemetry::Error> {
577
            let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle {
578
                (None, worker_handle)
579
            } else {
580
                let worker = TelemetryWorker::new(16)?;
581
                let worker_handle = worker.handle();
582
                (Some(worker), worker_handle)
583
            };
584
            let telemetry = worker_handle.new_telemetry(endpoints);
585
            Ok((worker, telemetry))
586
357
        })
587
357
        .transpose()?;
588

            
589
357
    let heap_pages = config
590
357
        .default_heap_pages
591
357
        .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
592
            extra_pages: h as u32,
593
357
        });
594
357

            
595
357
    let mut wasm_builder = WasmExecutor::builder()
596
357
        .with_execution_method(config.wasm_method)
597
357
        .with_onchain_heap_alloc_strategy(heap_pages)
598
357
        .with_offchain_heap_alloc_strategy(heap_pages)
599
357
        .with_max_runtime_instances(config.max_runtime_instances)
600
357
        .with_runtime_cache_size(config.runtime_cache_size);
601
357
    if let Some(ref wasmtime_precompiled_path) = config.wasmtime_precompiled {
602
329
        wasm_builder = wasm_builder.with_wasmtime_precompiled_path(wasmtime_precompiled_path);
603
329
    }
604
357
    let executor = wasm_builder.build();
605

            
606
357
    let (client, backend, keystore_container, task_manager) =
607
357
        service::new_full_parts::<Block, RuntimeApi, _>(
608
357
            config,
609
357
            telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
610
357
            executor,
611
357
        )?;
612
357
    let client = Arc::new(client);
613
357

            
614
357
    let telemetry = telemetry.map(|(worker, telemetry)| {
615
        if let Some(worker) = worker {
616
            task_manager.spawn_handle().spawn(
617
                "telemetry",
618
                Some("telemetry"),
619
                Box::pin(worker.run()),
620
            );
621
        }
622
        telemetry
623
357
    });
624
357

            
625
357
    Ok(Basics {
626
357
        task_manager,
627
357
        client,
628
357
        backend,
629
357
        keystore_container,
630
357
        telemetry,
631
357
    })
632
357
}