1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>
16

            
17
//! Development Polkadot service. Adapted from `polkadot_service` crate
18
//! and removed un-necessary components which are not required in dev node.
19
//!
20
//! Following major changes are made:
21
//! 1. Removed beefy and grandpa notification service and request response protocols
22
//! 2. Removed support for parachains which also eliminated the need to start overseer and all other subsystems associated with collation + network request/response protocols for the same
23
//! 3. Removed support for hardware benchmarking
24
//! 4. Removed authority discovery service
25
//! 5. Removed spawning of beefy, grandpa and MMR worker
26
//! 6. Removed rpc extensions for beefy, grandpa and babe and added support for manual seal
27
//! 7. Removed beefy and grandpa block import from block import pipeline (Babe remains)
28
//! 8. Using manual seal import queue instead of babe import queue
29
//! 9. Started manual seal worker
30
//! 10. If amount of time passed between two block is less than slot duration, we emulate passing of time babe block import and runtime
31
//!     by incrementing timestamp by slot duration.
32

            
33
use {
34
    crate::dev_rpcs::{DevApiServer, DevRpc},
35
    async_io::Timer,
36
    babe::{BabeBlockImport, BabeLink},
37
    codec::{Decode, Encode},
38
    consensus_common::SelectChain,
39
    cumulus_primitives_core::ParaId,
40
    dancelight_runtime::RuntimeApi,
41
    futures::{Stream, StreamExt},
42
    jsonrpsee::RpcModule,
43
    manual_container_chains_exclusion_rpc::{
44
        ManualContainerChainsExclusion, ManualContainerChainsExclusionApiServer,
45
    },
46
    node_common::service::node_builder::Sealing,
47
    polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce},
48
    polkadot_node_core_parachains_inherent::Error as InherentError,
49
    polkadot_overseer::Handle,
50
    polkadot_parachain_primitives::primitives::UpwardMessages,
51
    polkadot_primitives::{
52
        runtime_api::ParachainHost, BackedCandidate, CandidateCommitments, CandidateDescriptor,
53
        CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs,
54
        InherentData as ParachainsInherentData, OccupiedCoreAssumption, SigningContext,
55
        ValidityAttestation,
56
    },
57
    polkadot_rpc::RpcExtension,
58
    polkadot_service::{
59
        BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain,
60
    },
61
    sc_client_api::{AuxStore, Backend},
62
    sc_consensus_manual_seal::{
63
        consensus::babe::BabeConsensusDataProvider,
64
        rpc::{ManualSeal, ManualSealApiServer},
65
        run_manual_seal, EngineCommand, ManualSealParams,
66
    },
67
    sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
68
    sc_keystore::Keystore,
69
    sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool},
70
    service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager},
71
    sp_api::ProvideRuntimeApi,
72
    sp_block_builder::BlockBuilder,
73
    sp_blockchain::{HeaderBackend, HeaderMetadata},
74
    sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID},
75
    sp_consensus_babe::SlotDuration,
76
    sp_core::{ByteArray, Pair, H256},
77
    sp_keystore::KeystorePtr,
78
    sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic},
79
    std::{cmp::max, ops::Add, sync::Arc, time::Duration},
80
    telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
81
};
82

            
83
// We use this key to store whether we want the para inherent mocker to be active
84
const PARA_INHERENT_SELECTOR_AUX_KEY: &[u8] = b"__DEV_PARA_INHERENT_SELECTOR";
85

            
86
const CONTAINER_CHAINS_EXCLUSION_AUX_KEY: &[u8] = b"__DEV_CONTAINER_CHAINS_EXCLUSION";
87

            
88
pub type FullBackend = service::TFullBackend<Block>;
89

            
90
pub type FullClient = service::TFullClient<
91
    Block,
92
    RuntimeApi,
93
    WasmExecutor<(
94
        sp_io::SubstrateHostFunctions,
95
        frame_benchmarking::benchmarking::HostFunctions,
96
    )>,
97
>;
98

            
99
pub struct NewFull {
100
    pub task_manager: TaskManager,
101
    pub client: Arc<FullClient>,
102
    pub overseer_handle: Option<Handle>,
103
    pub network: Arc<dyn sc_network::service::traits::NetworkService>,
104
    pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
105
    pub rpc_handlers: RpcHandlers,
106
    pub backend: Arc<FullBackend>,
107
}
108

            
109
/// Custom Deps for dev Rpc extension
110
struct DevDeps<C, P> {
111
    /// The client instance to use.
112
    pub client: Arc<C>,
113
    /// Transaction pool instance.
114
    pub pool: Arc<P>,
115
    /// Manual seal command sink
116
    pub command_sink: Option<futures::channel::mpsc::Sender<EngineCommand<Hash>>>,
117
    /// Dev rpcs
118
    pub dev_rpc: Option<DevRpc>,
119
    /// Channels for manually excluding container chains from producing blocks
120
    pub container_chain_exclusion_sender: Option<flume::Sender<Vec<ParaId>>>,
121
}
122

            
123
2688
fn create_dev_rpc_extension<C, P>(
124
2688
    DevDeps {
125
2688
        client,
126
2688
        pool,
127
2688
        command_sink: maybe_command_sink,
128
2688
        dev_rpc: maybe_dev_rpc,
129
2688
        container_chain_exclusion_sender: maybe_container_chain_exclusion_sender,
130
2688
    }: DevDeps<C, P>,
131
2688
) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
132
2688
where
133
2688
    C: ProvideRuntimeApi<Block>
134
2688
        + HeaderBackend<Block>
135
2688
        + AuxStore
136
2688
        + HeaderMetadata<Block, Error = sp_blockchain::Error>
137
2688
        + Send
138
2688
        + Sync
139
2688
        + 'static,
140
2688
    C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
141
2688
    C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
142
2688
    C::Api: BlockBuilder<Block>,
143
2688
    P: TransactionPool + Sync + Send + 'static,
144
{
145
    use {
146
        pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer},
147
        substrate_frame_rpc_system::{System, SystemApiServer},
148
    };
149

            
150
2688
    let mut io = RpcModule::new(());
151
2688
    io.merge(System::new(client.clone(), pool.clone()).into_rpc())?;
152
2688
    io.merge(TransactionPayment::new(client.clone()).into_rpc())?;
153

            
154
2688
    if let Some(command_sink) = maybe_command_sink {
155
2688
        io.merge(ManualSeal::new(command_sink).into_rpc())?;
156
    }
157

            
158
2688
    if let Some(dev_rpc_data) = maybe_dev_rpc {
159
2688
        io.merge(dev_rpc_data.into_rpc())?;
160
    }
161

            
162
2688
    if let Some(container_chain_exclusion_message_channel) = maybe_container_chain_exclusion_sender
163
    {
164
2688
        io.merge(
165
2688
            ManualContainerChainsExclusion {
166
2688
                container_chain_exclusion_message_channel,
167
2688
            }
168
2688
            .into_rpc(),
169
2688
        )?;
170
    }
171

            
172
2688
    Ok(io)
173
2688
}
174

            
175
/// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block
176
/// to satisfy runtime
177
struct EmptyParachainsInherentDataProvider;
178

            
179
/// Copied from polkadot service just so that this code retains same structure as
180
/// polkadot_service crate.
181
struct Basics {
182
    task_manager: TaskManager,
183
    client: Arc<FullClient>,
184
    backend: Arc<FullBackend>,
185
    keystore_container: KeystoreContainer,
186
    telemetry: Option<Telemetry>,
187
}
188

            
189
impl EmptyParachainsInherentDataProvider {
190
68106
    pub async fn create<C: HeaderBackend<Block>>(
191
68106
        client: Arc<C>,
192
68106
        parent: Hash,
193
68106
    ) -> Result<ParachainsInherentData, InherentError> {
194
68106
        let parent_header = match client.header(parent) {
195
68106
            Ok(Some(h)) => h,
196
            Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)),
197
            Err(err) => return Err(InherentError::Blockchain(err)),
198
        };
199

            
200
68106
        Ok(ParachainsInherentData {
201
68106
            bitfields: Vec::new(),
202
68106
            backed_candidates: Vec::new(),
203
68106
            disputes: Vec::new(),
204
68106
            parent_header,
205
68106
        })
206
68106
    }
207
}
208

            
209
/// Creates new development full node with manual seal
210
1344
pub fn build_full<OverseerGenerator: OverseerGen>(
211
1344
    sealing: Sealing,
212
1344
    config: Configuration,
213
1344
    mut params: NewFullParams<OverseerGenerator>,
214
1344
) -> Result<NewFull, Error> {
215
1344
    let is_polkadot = config.chain_spec.is_polkadot();
216

            
217
1344
    params.overseer_message_channel_capacity_override = params
218
1344
        .overseer_message_channel_capacity_override
219
1344
        .map(move |capacity| {
220
            if is_polkadot {
221
                gum::warn!("Channel capacity should _never_ be tampered with on polkadot!");
222
            }
223
            capacity
224
        });
225

            
226
1344
    match config.network.network_backend {
227
        sc_network::config::NetworkBackendType::Libp2p => {
228
            new_full::<_, sc_network::NetworkWorker<Block, Hash>>(sealing, config, params)
229
        }
230
        sc_network::config::NetworkBackendType::Litep2p => {
231
1344
            new_full::<_, sc_network::Litep2pNetworkBackend>(sealing, config, params)
232
        }
233
    }
234
1344
}
235

            
236
/// We use MockParachainsInherentDataProvider to insert an parachain inherent with mocked
237
/// candidates
238
/// We detect whether any of the keys in our keystore is assigned to a core and provide
239
/// a mocked candidate in such core
240
struct MockParachainsInherentDataProvider<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> {
241
    pub client: Arc<C>,
242
    pub parent: Hash,
243
    pub keystore: KeystorePtr,
244
    pub upward_messages_receiver: flume::Receiver<Vec<u8>>,
245
    pub container_chain_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
246
}
247

            
248
impl<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> MockParachainsInherentDataProvider<C>
249
where
250
    C::Api: ParachainHost<Block>,
251
    C: AuxStore,
252
{
253
84450
    pub fn new(
254
84450
        client: Arc<C>,
255
84450
        parent: Hash,
256
84450
        keystore: KeystorePtr,
257
84450
        upward_messages_receiver: flume::Receiver<Vec<u8>>,
258
84450
        container_chain_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
259
84450
    ) -> Self {
260
84450
        MockParachainsInherentDataProvider {
261
84450
            client,
262
84450
            parent,
263
84450
            keystore,
264
84450
            upward_messages_receiver,
265
84450
            container_chain_exclusion_receiver,
266
84450
        }
267
84450
    }
268

            
269
16344
    pub async fn create(
270
16344
        client: Arc<C>,
271
16344
        parent: Hash,
272
16344
        keystore: KeystorePtr,
273
16344
        upward_messages_receiver: flume::Receiver<Vec<u8>>,
274
16344
        container_chains_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
275
16344
    ) -> Result<ParachainsInherentData, InherentError> {
276
16344
        let parent_header = match client.header(parent) {
277
16344
            Ok(Some(h)) => h,
278
            Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)),
279
            Err(err) => return Err(InherentError::Blockchain(err)),
280
        };
281

            
282
        // Strategy:
283
        // we usually have 1 validator per core, and we usually run with --alice
284
        // the idea is that at least alice will be assigned to one core
285
        // if we find in the keystore the validator attached to a particular core,
286
        // we generate a signature for the parachain assigned to that core
287
        // To retrieve the validator keys, cal runtime api:
288

            
289
        // this following piece of code predicts whether the validator is assigned to a particular
290
        // core where a candidate for a parachain needs to be created
291
16344
        let runtime_api = client.runtime_api();
292

            
293
        // we get all validators
294

            
295
        // we get the current claim queue to know core availability
296
16344
        let claim_queue = runtime_api.claim_queue(parent).unwrap();
297

            
298
        // we get the validator groups
299
16344
        let (groups, rotation_info) = runtime_api.validator_groups(parent).unwrap();
300

            
301
        // we calculate rotation since start, which will define the core assignation
302
        // to validators
303
16344
        let rotations_since_session_start = (parent_header.number
304
16344
            - rotation_info.session_start_block)
305
16344
            / rotation_info.group_rotation_frequency;
306

            
307
        // Get all the available keys in the keystore
308
16344
        let available_keys = keystore
309
16344
            .keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID)
310
16344
            .unwrap();
311

            
312
        // create a slot number identical to the parent block num
313
16344
        let slot_number = AuraInherentType::from(u64::from(parent_header.number));
314

            
315
        // create a mocked header
316
16344
        let parachain_mocked_header = sp_runtime::generic::Header::<u32, BlakeTwo256> {
317
16344
            parent_hash: Default::default(),
318
16344
            number: parent_header.number,
319
16344
            state_root: Default::default(),
320
16344
            extrinsics_root: Default::default(),
321
16344
            digest: sp_runtime::generic::Digest {
322
16344
                logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot_number.encode())],
323
16344
            },
324
16344
        };
325

            
326
        // retrieve availability cores
327
16344
        let availability_cores = runtime_api.availability_cores(parent).unwrap();
328

            
329
        // retrieve current session_idx
330
16344
        let session_idx = runtime_api.session_index_for_child(parent).unwrap();
331

            
332
        // retrieve all validators
333
16344
        let all_validators = runtime_api.validators(parent).unwrap();
334

            
335
        // construct full availability bitvec
336
16344
        let availability_bitvec = availability_bitvec(1, availability_cores.len());
337

            
338
16344
        let signature_ctx = SigningContext {
339
16344
            parent_hash: parent,
340
16344
            session_index: session_idx,
341
16344
        };
342

            
343
        // we generate the availability bitfield sigs
344
        // TODO: here we assume all validator keys are able to sign with our keystore
345
        // we need to make sure the key is there before we try to sign
346
        // this is mostly to indicate that the erasure coding chunks where received by all val
347
16344
        let bitfields: Vec<UncheckedSigned<AvailabilityBitfield>> = all_validators
348
16344
            .iter()
349
16344
            .enumerate()
350
16344
            .map(|(i, public)| {
351
16344
                keystore_sign(
352
16344
                    &keystore,
353
16344
                    availability_bitvec.clone(),
354
16344
                    &signature_ctx,
355
16344
                    ValidatorIndex(i as u32),
356
16344
                    public,
357
                )
358
16344
                .unwrap()
359
16344
                .unwrap()
360
16344
            })
361
16344
            .collect();
362

            
363
        // generate a random collator pair
364
16344
        let collator_pair = CollatorPair::generate().0;
365
16344
        let mut backed_cand: Vec<BackedCandidate<H256>> = vec![];
366

            
367
16344
        let container_chains_exclusion_messages: Vec<Vec<ParaId>> =
368
16344
            container_chains_exclusion_receiver.drain().collect();
369
        // If there is a new set of excluded container chains, we update it
370
16344
        if let Some(mock_excluded_container_chains) = container_chains_exclusion_messages.last() {
371
            client
372
                .insert_aux(
373
                    &[(
374
                        CONTAINER_CHAINS_EXCLUSION_AUX_KEY,
375
                        mock_excluded_container_chains.encode().as_slice(),
376
                    )],
377
                    &[],
378
                )
379
                .expect("Should be able to write to aux storage; qed");
380
16344
        }
381
16344
        let new_excluded_container_chains_value = client
382
16344
            .get_aux(CONTAINER_CHAINS_EXCLUSION_AUX_KEY)
383
16344
            .expect("Should be able to query aux storage; qed")
384
16344
            .unwrap_or(Vec::<ParaId>::new().encode());
385
16344
        let mock_excluded_container_chains: Vec<ParaId> =
386
16344
            Decode::decode(&mut new_excluded_container_chains_value.as_slice())
387
16344
                .expect("Vector non-decodable");
388

            
389
        // iterate over every core|para pair
390
45480
        for (core, para) in claim_queue {
391
            // allows preventing container chains from producing blocks in dev mode
392
29136
            let mut para = para.clone();
393
58248
            para.retain(|x| !mock_excluded_container_chains.contains(x));
394
            // check which group is assigned to each core
395
29136
            let group_assigned_to_core =
396
29136
                core.0 + rotations_since_session_start % groups.len() as u32;
397
            // check validator indices associated to the core
398
29136
            let indices_associated_to_core = groups.get(group_assigned_to_core as usize).unwrap();
399
43872
            for index in indices_associated_to_core {
400
                // fetch validator keys
401
14736
                let validator_keys_to_find = all_validators.get(index.0 as usize).unwrap();
402
                // Iterate keys until we find an eligible one, or run out of candidates.
403
29472
                for type_public_pair in &available_keys {
404
14736
                    if let Ok(validator) =
405
14736
                        polkadot_primitives::ValidatorId::from_slice(type_public_pair)
406
                    {
407
                        // if we find the validator in keystore, we try to create a backed cand
408
14736
                        if validator_keys_to_find == &validator {
409
14736
                            // we work with the previous included data
410
14736
                            let mut persisted_validation_data = runtime_api
411
14736
                                .persisted_validation_data(
412
14736
                                    parent,
413
14736
                                    para[0],
414
14736
                                    OccupiedCoreAssumption::Included,
415
14736
                                )
416
14736
                                .unwrap()
417
14736
                                .unwrap();
418
14736

            
419
14736
                            // if we dont do this we have a backed candidate every 2 blocks
420
14736
                            // we want
421
14736
                            persisted_validation_data.relay_parent_storage_root =
422
14736
                                parent_header.state_root;
423
14736

            
424
14736
                            let persisted_validation_data_hash = persisted_validation_data.hash();
425
14736
                            // retrieve the validation code hash
426
14736
                            let validation_code_hash = runtime_api
427
14736
                                .validation_code_hash(
428
14736
                                    parent,
429
14736
                                    para[0],
430
14736
                                    OccupiedCoreAssumption::Included,
431
14736
                                )
432
14736
                                .unwrap()
433
14736
                                .unwrap();
434
14736
                            let pov_hash = Default::default();
435
14736
                            // generate a fake collator signature
436
14736
                            let payload = polkadot_primitives::collator_signature_payload(
437
14736
                                &parent,
438
14736
                                &para[0],
439
14736
                                &persisted_validation_data_hash,
440
14736
                                &pov_hash,
441
14736
                                &validation_code_hash,
442
14736
                            );
443
14736
                            let collator_signature = collator_pair.sign(&payload);
444
14736

            
445
14736
                            let upward_messages = UpwardMessages::try_from(
446
14736
                                upward_messages_receiver.drain().collect::<Vec<_>>(),
447
14736
                            )
448
14736
                            .expect("create upward messages from raw messages");
449
14736

            
450
14736
                            // generate a candidate with most of the values mocked
451
14736
                            let candidate = CommittedCandidateReceipt::<H256> {
452
14736
                                descriptor: CandidateDescriptor::<H256> {
453
14736
                                    para_id: para[0],
454
14736
                                    relay_parent: parent,
455
14736
                                    collator: collator_pair.public(),
456
14736
                                    persisted_validation_data_hash,
457
14736
                                    pov_hash,
458
14736
                                    erasure_root: Default::default(),
459
14736
                                    signature: collator_signature,
460
14736
                                    para_head: parachain_mocked_header.clone().hash(),
461
14736
                                    validation_code_hash,
462
14736
                                },
463
14736
                                commitments: CandidateCommitments::<u32> {
464
14736
                                    upward_messages,
465
14736
                                    horizontal_messages: Default::default(),
466
14736
                                    new_validation_code: None,
467
14736
                                    head_data: parachain_mocked_header.clone().encode().into(),
468
14736
                                    processed_downward_messages: 0,
469
14736
                                    hrmp_watermark: parent_header.number,
470
14736
                                },
471
14736
                            };
472
14736
                            let candidate_hash = candidate.hash();
473
14736
                            let payload = CompactStatement::Valid(candidate_hash);
474
14736

            
475
14736
                            let signature_ctx = SigningContext {
476
14736
                                parent_hash: parent,
477
14736
                                session_index: session_idx,
478
14736
                            };
479
14736

            
480
14736
                            // sign the candidate with the validator key
481
14736
                            let signature = keystore_sign(
482
14736
                                &keystore,
483
14736
                                payload,
484
14736
                                &signature_ctx,
485
14736
                                *index,
486
14736
                                &validator,
487
14736
                            )
488
14736
                            .unwrap()
489
14736
                            .unwrap()
490
14736
                            .benchmark_signature();
491
14736

            
492
14736
                            // construct a validity vote
493
14736
                            let validity_votes = vec![ValidityAttestation::Explicit(signature)];
494
14736

            
495
14736
                            // push the candidate
496
14736
                            backed_cand.push(BackedCandidate::<H256>::new(
497
14736
                                candidate,
498
14736
                                validity_votes.clone(),
499
14736
                                bitvec::bitvec![u8, bitvec::order::Lsb0; 1; indices_associated_to_core.len()],
500
14736
                                core,
501
14736
                            ));
502
14736
                        }
503
                    }
504
                }
505
            }
506
        }
507

            
508
16344
        Ok(ParachainsInherentData {
509
16344
            bitfields,
510
16344
            backed_candidates: backed_cand,
511
16344
            disputes: Vec::new(),
512
16344
            parent_header,
513
16344
        })
514
16344
    }
515
}
516

            
517
#[async_trait::async_trait]
518
impl<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> sp_inherents::InherentDataProvider
519
    for MockParachainsInherentDataProvider<C>
520
where
521
    C::Api: ParachainHost<Block>,
522
    C: AuxStore,
523
{
524
    async fn provide_inherent_data(
525
        &self,
526
        dst_inherent_data: &mut sp_inherents::InherentData,
527
168900
    ) -> Result<(), sp_inherents::Error> {
528
        // fetch whether the para inherent selector has been set
529
84450
        let maybe_para_selector = self
530
84450
            .client
531
84450
            .get_aux(PARA_INHERENT_SELECTOR_AUX_KEY)
532
84450
            .expect("Should be able to query aux storage; qed");
533

            
534
84450
        let inherent_data = {
535
84450
            if let Some(aux) = maybe_para_selector {
536
                // if it is true, the candidates need to be mocked
537
                // else, we output the empty parachain inherent data provider
538
16344
                if aux == true.encode() {
539
16344
                    MockParachainsInherentDataProvider::create(
540
16344
                        self.client.clone(),
541
16344
                        self.parent,
542
16344
                        self.keystore.clone(),
543
16344
                        self.upward_messages_receiver.clone(),
544
16344
                        self.container_chain_exclusion_receiver.clone(),
545
16344
                    )
546
16344
                    .await
547
16344
                    .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
548
                } else {
549
                    EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent)
550
                        .await
551
                        .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
552
                }
553
            } else {
554
68106
                EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent)
555
68106
                    .await
556
68106
                    .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
557
            }
558
        };
559

            
560
84450
        dst_inherent_data.put_data(
561
            polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER,
562
84450
            &inherent_data,
563
        )
564
168900
    }
565

            
566
    async fn try_handle_error(
567
        &self,
568
        _identifier: &sp_inherents::InherentIdentifier,
569
        _error: &[u8],
570
    ) -> Option<Result<(), sp_inherents::Error>> {
571
        // Inherent isn't checked and can not return any error
572
        None
573
    }
574
}
575

            
576
/// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by
577
/// slot duration from previous timestamp or current timestamp if in reality more time is passed.
578
84450
fn get_next_timestamp(
579
84450
    client: Arc<FullClient>,
580
84450
    slot_duration: SlotDuration,
581
84450
) -> sp_timestamp::InherentDataProvider {
582
    const TIMESTAMP_AUX_KEY: &[u8] = b"__DEV_TIMESTAMP";
583

            
584
84450
    let maybe_last_timestamp = client
585
84450
        .get_aux(TIMESTAMP_AUX_KEY)
586
84450
        .expect("Should be able to query aux storage; qed");
587
84450
    if let Some(last_timestamp) = maybe_last_timestamp {
588
83346
        let last_inherent_data = sp_timestamp::InherentType::decode(&mut last_timestamp.as_slice())
589
83346
            .expect("Timestamp data must be decoded; qed");
590
83346
        let new_inherent_data: sp_timestamp::InherentType = max(
591
83346
            last_inherent_data.add(slot_duration.as_millis()),
592
83346
            sp_timestamp::InherentType::current(),
593
        );
594
83346
        client
595
83346
            .insert_aux(
596
83346
                &[(TIMESTAMP_AUX_KEY, new_inherent_data.encode().as_slice())],
597
83346
                &[],
598
83346
            )
599
83346
            .expect("Should be able to write to aux storage; qed");
600
83346
        sp_timestamp::InherentDataProvider::new(new_inherent_data)
601
    } else {
602
1104
        let current_timestamp = sp_timestamp::InherentType::current();
603
1104
        client
604
1104
            .insert_aux(
605
1104
                &[(TIMESTAMP_AUX_KEY, current_timestamp.encode().as_slice())],
606
1104
                &[],
607
1104
            )
608
1104
            .expect("Should be able to write to aux storage; qed");
609
1104
        sp_timestamp::InherentDataProvider::new(current_timestamp)
610
    }
611
84450
}
612

            
613
1344
fn new_full<
614
1344
    OverseerGenerator: OverseerGen,
615
1344
    Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
616
1344
>(
617
1344
    sealing: Sealing,
618
1344
    mut config: Configuration,
619
1344
    NewFullParams {
620
1344
        telemetry_worker_handle,
621
1344
        ..
622
1344
    }: NewFullParams<OverseerGenerator>,
623
1344
) -> Result<NewFull, Error> {
624
1344
    let role = config.role;
625

            
626
1344
    let basics = new_partial_basics(&mut config, telemetry_worker_handle)?;
627

            
628
1344
    let prometheus_registry = config.prometheus_registry().cloned();
629

            
630
1344
    let keystore = basics.keystore_container.local_keystore();
631

            
632
1344
    let select_chain = SelectRelayChain::new_longest_chain(basics.backend.clone());
633

            
634
1344
    let service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> {
635
1344
        client,
636
1344
        backend,
637
1344
        mut task_manager,
638
1344
        keystore_container,
639
1344
        select_chain,
640
1344
        import_queue,
641
1344
        transaction_pool,
642
1344
        other: (block_import, babe_link, slot_duration, mut telemetry),
643
1344
    } = new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
644

            
645
1344
    let metrics = Network::register_notification_metrics(
646
1344
        config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
647
    );
648

            
649
1344
    let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(
650
1344
        &config.network,
651
1344
        prometheus_registry.clone(),
652
    );
653

            
654
    // Create channels for mocked parachain candidates.
655
1344
    let (downward_mock_para_inherent_sender, downward_mock_para_inherent_receiver) =
656
1344
        flume::bounded::<Vec<u8>>(100);
657

            
658
1344
    let (upward_mock_sender, upward_mock_receiver) = flume::bounded::<Vec<u8>>(100);
659

            
660
1344
    let (network, system_rpc_tx, tx_handler_controller, sync_service) =
661
1344
        service::build_network(service::BuildNetworkParams {
662
1344
            config: &config,
663
1344
            net_config,
664
1344
            client: client.clone(),
665
1344
            transaction_pool: transaction_pool.clone(),
666
1344
            spawn_handle: task_manager.spawn_handle(),
667
1344
            import_queue,
668
1344
            block_announce_validator_builder: None,
669
1344
            warp_sync_config: None,
670
1344
            block_relay: None,
671
1344
            metrics,
672
1344
        })?;
673

            
674
1344
    if config.offchain_worker.enabled {
675
        use futures::FutureExt;
676

            
677
1344
        task_manager.spawn_handle().spawn(
678
            "offchain-workers-runner",
679
            "offchain-work",
680
1344
            sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
681
1344
                runtime_api_provider: client.clone(),
682
1344
                keystore: Some(keystore_container.keystore()),
683
1344
                offchain_db: backend.offchain_storage(),
684
1344
                transaction_pool: Some(OffchainTransactionPoolFactory::new(
685
1344
                    transaction_pool.clone(),
686
1344
                )),
687
1344
                network_provider: Arc::new(network.clone()),
688
1344
                is_validator: role.is_authority(),
689
                enable_http_requests: false,
690
                custom_extensions: move |_| vec![],
691
            })?
692
1344
            .run(client.clone(), task_manager.spawn_handle())
693
1344
            .boxed(),
694
        );
695
    }
696

            
697
1344
    let mut command_sink = None;
698
1344
    let mut container_chain_exclusion_sender = None;
699

            
700
1344
    if role.is_authority() {
701
1344
        let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording(
702
1344
            task_manager.spawn_handle(),
703
1344
            client.clone(),
704
1344
            transaction_pool.clone(),
705
1344
            prometheus_registry.as_ref(),
706
1344
            telemetry.as_ref().map(|x| x.handle()),
707
        );
708

            
709
1344
        let commands_stream: Box<
710
1344
            dyn Stream<Item = EngineCommand<<Block as BlockT>::Hash>> + Send + Sync + Unpin,
711
1344
        > = match sealing {
712
            Sealing::Instant => {
713
                Box::new(
714
                    // This bit cribbed from the implementation of instant seal.
715
                    transaction_pool.import_notification_stream().map(|_| {
716
                        EngineCommand::SealNewBlock {
717
                            create_empty: false,
718
                            finalize: false,
719
                            parent_hash: None,
720
                            sender: None,
721
                        }
722
                    }),
723
                )
724
            }
725
            Sealing::Manual => {
726
1344
                let (sink, stream) = futures::channel::mpsc::channel(1000);
727
                // Keep a reference to the other end of the channel. It goes to the RPC.
728
1344
                command_sink = Some(sink);
729
1344
                Box::new(stream)
730
            }
731
            Sealing::Interval(millis) => Box::new(StreamExt::map(
732
                Timer::interval(Duration::from_millis(millis)),
733
                |_| EngineCommand::SealNewBlock {
734
                    create_empty: true,
735
                    finalize: true,
736
                    parent_hash: None,
737
                    sender: None,
738
                },
739
            )),
740
        };
741
1344
        let keystore_clone = keystore.clone();
742

            
743
1344
        let babe_config = babe_link.config();
744
1344
        let babe_consensus_provider = BabeConsensusDataProvider::new(
745
1344
            client.clone(),
746
1344
            keystore,
747
1344
            babe_link.epoch_changes().clone(),
748
1344
            babe_config.authorities.clone(),
749
        )
750
1344
        .map_err(|babe_error| {
751
            Error::Consensus(consensus_common::Error::Other(babe_error.into()))
752
        })?;
753

            
754
1344
        let (mock_container_chains_exclusion_sender, mock_container_chains_exclusion_receiver) =
755
1344
            flume::bounded::<Vec<ParaId>>(100);
756
1344
        container_chain_exclusion_sender = Some(mock_container_chains_exclusion_sender);
757

            
758
        // Need to clone it and store here to avoid moving of `client`
759
        // variable in closure below.
760
1344
        let client_clone = client.clone();
761

            
762
1344
        task_manager.spawn_essential_handle().spawn_blocking(
763
            "authorship_task",
764
1344
            Some("block-authoring"),
765
1344
            run_manual_seal(ManualSealParams {
766
1344
                block_import,
767
1344
                env: proposer,
768
1344
                client: client.clone(),
769
1344
                pool: transaction_pool.clone(),
770
1344
                commands_stream,
771
1344
                select_chain,
772
84450
                create_inherent_data_providers: move |parent, ()| {
773
84450
                    let client_clone = client_clone.clone();
774
84450
                    let keystore = keystore_clone.clone();
775
84450
                    let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone();
776
84450
                    let upward_mock_receiver = upward_mock_receiver.clone();
777
84450
                    let mock_container_chains_exclusion_receiver = mock_container_chains_exclusion_receiver.clone();
778
84450
                    async move {
779

            
780
84450
                        let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone();
781
                        // here we only take the last one
782
84450
                        let para_inherent_decider_messages: Vec<Vec<u8>> = downward_mock_para_inherent_receiver.drain().collect();
783

            
784
84450
                        let upward_messages_receiver = upward_mock_receiver.clone();
785

            
786
                        // If there is a value to be updated, we update it
787
84450
                        if let Some(value) = para_inherent_decider_messages.last() {
788
84
                            client_clone
789
84
                            .insert_aux(
790
84
                                &[(PARA_INHERENT_SELECTOR_AUX_KEY, value.as_slice())],
791
84
                                &[],
792
84
                            )
793
84
                            .expect("Should be able to write to aux storage; qed");
794
84366
                        }
795

            
796
84450
                        let parachain = MockParachainsInherentDataProvider::new(
797
84450
                            client_clone.clone(),
798
84450
                            parent,
799
84450
                            keystore,
800
84450
                            upward_messages_receiver,
801
84450
                            mock_container_chains_exclusion_receiver
802
                        );
803

            
804
84450
                        let timestamp = get_next_timestamp(client_clone, slot_duration);
805

            
806
84450
                        let slot =
807
84450
                            sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
808
84450
                                *timestamp,
809
84450
                                slot_duration,
810
                            );
811

            
812
84450
                        Ok((slot, timestamp, parachain))
813
84450
                    }
814
84450
                },
815
1344
                consensus_data_provider: Some(Box::new(babe_consensus_provider)),
816
            }),
817
        );
818
    }
819

            
820
1344
    let dev_rpc = if role.clone().is_authority() {
821
1344
        Some(DevRpc {
822
1344
            mock_para_inherent_channel: downward_mock_para_inherent_sender,
823
1344
            upward_message_channel: upward_mock_sender,
824
1344
        })
825
    } else {
826
        None
827
    };
828

            
829
1344
    let rpc_extensions_builder = {
830
1344
        let client = client.clone();
831
1344
        let transaction_pool = transaction_pool.clone();
832

            
833
        move |_subscription_executor: polkadot_rpc::SubscriptionTaskExecutor|
834
2688
            -> Result<RpcExtension, service::Error> {
835
2688
            let deps = DevDeps {
836
2688
                client: client.clone(),
837
2688
                pool: transaction_pool.clone(),
838
2688
                command_sink: command_sink.clone(),
839
2688
                dev_rpc: dev_rpc.clone(),
840
2688
                container_chain_exclusion_sender: container_chain_exclusion_sender.clone(),
841
2688
            };
842

            
843
2688
            create_dev_rpc_extension(deps).map_err(Into::into)
844
2688
        }
845
    };
846

            
847
1344
    let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
848
1344
        config,
849
1344
        backend: backend.clone(),
850
1344
        client: client.clone(),
851
1344
        keystore: keystore_container.keystore(),
852
1344
        network: network.clone(),
853
1344
        sync_service: sync_service.clone(),
854
1344
        rpc_builder: Box::new(rpc_extensions_builder),
855
1344
        transaction_pool: transaction_pool.clone(),
856
1344
        task_manager: &mut task_manager,
857
1344
        system_rpc_tx,
858
1344
        tx_handler_controller,
859
1344
        telemetry: telemetry.as_mut(),
860
1344
    })?;
861

            
862
1344
    Ok(NewFull {
863
1344
        task_manager,
864
1344
        client,
865
1344
        overseer_handle: None,
866
1344
        network,
867
1344
        sync_service,
868
1344
        rpc_handlers,
869
1344
        backend,
870
1344
    })
871
1344
}
872

            
873
1344
fn new_partial<ChainSelection>(
874
1344
    config: &mut Configuration,
875
1344
    Basics {
876
1344
        task_manager,
877
1344
        backend,
878
1344
        client,
879
1344
        keystore_container,
880
1344
        telemetry,
881
1344
    }: Basics,
882
1344
    select_chain: ChainSelection,
883
1344
) -> Result<
884
1344
    service::PartialComponents<
885
1344
        FullClient,
886
1344
        FullBackend,
887
1344
        ChainSelection,
888
1344
        sc_consensus::DefaultImportQueue<Block>,
889
1344
        sc_transaction_pool::TransactionPoolHandle<Block, FullClient>,
890
1344
        (
891
1344
            BabeBlockImport<Block, FullClient, Arc<FullClient>>,
892
1344
            BabeLink<Block>,
893
1344
            SlotDuration,
894
1344
            Option<Telemetry>,
895
1344
        ),
896
1344
    >,
897
1344
    Error,
898
1344
>
899
1344
where
900
1344
    ChainSelection: 'static + SelectChain<Block>,
901
{
902
1344
    let transaction_pool = sc_transaction_pool::Builder::new(
903
1344
        task_manager.spawn_essential_handle(),
904
1344
        client.clone(),
905
1344
        config.role.is_authority().into(),
906
    )
907
1344
    .with_options(config.transaction_pool.clone())
908
1344
    .with_prometheus(config.prometheus_registry())
909
1344
    .build();
910

            
911
    // Create babe block import queue; this is required to have correct epoch data
912
    // available for manual seal to produce block
913
1344
    let babe_config = babe::configuration(&*client)?;
914
1344
    let (babe_block_import, babe_link) =
915
1344
        babe::block_import(babe_config.clone(), client.clone(), client.clone())?;
916
1344
    let slot_duration = babe_link.config().slot_duration();
917

            
918
    // Create manual seal block import with manual seal block import queue
919
1344
    let import_queue = sc_consensus_manual_seal::import_queue(
920
1344
        Box::new(babe_block_import.clone()),
921
1344
        &task_manager.spawn_essential_handle(),
922
1344
        config.prometheus_registry(),
923
    );
924

            
925
1344
    Ok(service::PartialComponents {
926
1344
        client,
927
1344
        backend,
928
1344
        task_manager,
929
1344
        keystore_container,
930
1344
        select_chain,
931
1344
        import_queue,
932
1344
        transaction_pool: transaction_pool.into(),
933
1344
        other: (babe_block_import, babe_link, slot_duration, telemetry),
934
1344
    })
935
1344
}
936

            
937
1344
fn new_partial_basics(
938
1344
    config: &mut Configuration,
939
1344
    telemetry_worker_handle: Option<TelemetryWorkerHandle>,
940
1344
) -> Result<Basics, Error> {
941
1344
    let telemetry = config
942
1344
        .telemetry_endpoints
943
1344
        .clone()
944
1344
        .filter(|x| !x.is_empty())
945
1344
        .map(move |endpoints| -> Result<_, telemetry::Error> {
946
            let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle {
947
                (None, worker_handle)
948
            } else {
949
                let worker = TelemetryWorker::new(16)?;
950
                let worker_handle = worker.handle();
951
                (Some(worker), worker_handle)
952
            };
953
            let telemetry = worker_handle.new_telemetry(endpoints);
954
            Ok((worker, telemetry))
955
        })
956
1344
        .transpose()?;
957

            
958
1344
    let heap_pages = config
959
1344
        .executor
960
1344
        .default_heap_pages
961
1344
        .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
962
            extra_pages: h as u32,
963
        });
964

            
965
1344
    let mut wasm_builder = WasmExecutor::builder()
966
1344
        .with_execution_method(config.executor.wasm_method)
967
1344
        .with_onchain_heap_alloc_strategy(heap_pages)
968
1344
        .with_offchain_heap_alloc_strategy(heap_pages)
969
1344
        .with_max_runtime_instances(config.executor.max_runtime_instances)
970
1344
        .with_runtime_cache_size(config.executor.runtime_cache_size);
971
1344
    if let Some(ref wasmtime_precompiled_path) = config.executor.wasmtime_precompiled {
972
1284
        wasm_builder = wasm_builder.with_wasmtime_precompiled_path(wasmtime_precompiled_path);
973
1284
    }
974
1344
    let executor = wasm_builder.build();
975

            
976
1344
    let (client, backend, keystore_container, task_manager) =
977
1344
        service::new_full_parts::<Block, RuntimeApi, _>(
978
1344
            config,
979
1344
            telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
980
1344
            executor,
981
        )?;
982
1344
    let client = Arc::new(client);
983

            
984
1344
    let telemetry = telemetry.map(|(worker, telemetry)| {
985
        if let Some(worker) = worker {
986
            task_manager.spawn_handle().spawn(
987
                "telemetry",
988
                Some("telemetry"),
989
                Box::pin(worker.run()),
990
            );
991
        }
992
        telemetry
993
    });
994

            
995
1344
    Ok(Basics {
996
1344
        task_manager,
997
1344
        client,
998
1344
        backend,
999
1344
        keystore_container,
1344
        telemetry,
1344
    })
1344
}
use {
    polkadot_primitives::{AvailabilityBitfield, UncheckedSigned, ValidatorId, ValidatorIndex},
    sp_keystore::Error as KeystoreError,
};
31080
fn keystore_sign<H: Encode, Payload: Encode>(
31080
    keystore: &KeystorePtr,
31080
    payload: Payload,
31080
    context: &SigningContext<H>,
31080
    validator_index: ValidatorIndex,
31080
    key: &ValidatorId,
31080
) -> Result<Option<UncheckedSigned<Payload>>, KeystoreError> {
31080
    let data = payload_data(&payload, context);
31080
    let signature = keystore
31080
        .sr25519_sign(ValidatorId::ID, key.as_ref(), &data)?
31080
        .map(|sig| UncheckedSigned::new(payload, validator_index, sig.into()));
31080
    Ok(signature)
31080
}
31080
fn payload_data<H: Encode, Payload: Encode>(
31080
    payload: &Payload,
31080
    context: &SigningContext<H>,
31080
) -> Vec<u8> {
    // equivalent to (`real_payload`, context).encode()
31080
    let mut out = payload.encode_as();
31080
    out.extend(context.encode());
31080
    out
31080
}
/// Create an `AvailabilityBitfield` with size `total_cores`. The first `used_cores` set to true (occupied),
/// and the remaining to false (available).
16344
fn availability_bitvec(used_cores: usize, total_cores: usize) -> AvailabilityBitfield {
16344
    let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0];
62688
    for i in 0..total_cores {
62688
        if i < used_cores {
15672
            bitfields.push(true);
15672
        } else {
47016
            bitfields.push(false)
        }
    }
16344
    bitfields.into()
16344
}