1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>
16

            
17
//! Development Polkadot service. Adapted from `polkadot_service` crate
18
//! and removed un-necessary components which are not required in dev node.
19
//!
20
//! Following major changes are made:
21
//! 1. Removed beefy and grandpa notification service and request response protocols
22
//! 2. Removed support for parachains which also eliminated the need to start overseer and all other subsystems associated with collation + network request/response protocols for the same
23
//! 3. Removed support for hardware benchmarking
24
//! 4. Removed authority discovery service
25
//! 5. Removed spawning of beefy, grandpa and MMR worker
26
//! 6. Removed rpc extensions for beefy, grandpa and babe and added support for manual seal
27
//! 7. Removed beefy and grandpa block import from block import pipeline (Babe remains)
28
//! 8. Using manual seal import queue instead of babe import queue
29
//! 9. Started manual seal worker
30
//! 10. If amount of time passed between two block is less than slot duration, we emulate passing of time babe block import and runtime
31
//!     by incrementing timestamp by slot duration.
32

            
33
use {
34
    crate::dev_rpcs::{DevApiServer, DevRpc},
35
    async_io::Timer,
36
    babe::{BabeBlockImport, BabeLink},
37
    codec::{Decode, Encode},
38
    consensus_common::SelectChain,
39
    cumulus_primitives_core::ParaId,
40
    dancelight_runtime::RuntimeApi,
41
    futures::{Stream, StreamExt},
42
    jsonrpsee::RpcModule,
43
    manual_container_chains_exclusion_rpc::{
44
        ManualContainerChainsExclusion, ManualContainerChainsExclusionApiServer,
45
    },
46
    node_common::service::Sealing,
47
    polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce},
48
    polkadot_node_core_parachains_inherent::Error as InherentError,
49
    polkadot_overseer::Handle,
50
    polkadot_parachain_primitives::primitives::UpwardMessages,
51
    polkadot_primitives::{
52
        runtime_api::ParachainHost, BackedCandidate, CandidateCommitments, CandidateDescriptor,
53
        CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs,
54
        InherentData as ParachainsInherentData, OccupiedCoreAssumption, SigningContext,
55
        ValidityAttestation,
56
    },
57
    polkadot_rpc::RpcExtension,
58
    polkadot_service::{
59
        BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain,
60
    },
61
    sc_client_api::{AuxStore, Backend},
62
    sc_consensus_manual_seal::{
63
        consensus::babe::BabeConsensusDataProvider,
64
        rpc::{ManualSeal, ManualSealApiServer},
65
        run_manual_seal, EngineCommand, ManualSealParams,
66
    },
67
    sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
68
    sc_keystore::Keystore,
69
    sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool},
70
    service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager},
71
    sp_api::ProvideRuntimeApi,
72
    sp_block_builder::BlockBuilder,
73
    sp_blockchain::{HeaderBackend, HeaderMetadata},
74
    sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID},
75
    sp_consensus_babe::SlotDuration,
76
    sp_core::{ByteArray, Pair, H256},
77
    sp_keystore::KeystorePtr,
78
    sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic},
79
    std::{cmp::max, ops::Add, sync::Arc, time::Duration},
80
    telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
81
};
82

            
83
// We use this key to store whether we want the para inherent mocker to be active
84
const PARA_INHERENT_SELECTOR_AUX_KEY: &[u8] = b"__DEV_PARA_INHERENT_SELECTOR";
85

            
86
const CONTAINER_CHAINS_EXCLUSION_AUX_KEY: &[u8] = b"__DEV_CONTAINER_CHAINS_EXCLUSION";
87

            
88
pub type FullBackend = service::TFullBackend<Block>;
89

            
90
pub type FullClient = service::TFullClient<
91
    Block,
92
    RuntimeApi,
93
    WasmExecutor<(
94
        sp_io::SubstrateHostFunctions,
95
        frame_benchmarking::benchmarking::HostFunctions,
96
    )>,
97
>;
98

            
99
pub struct NewFull {
100
    pub task_manager: TaskManager,
101
    pub client: Arc<FullClient>,
102
    pub overseer_handle: Option<Handle>,
103
    pub network: Arc<dyn sc_network::service::traits::NetworkService>,
104
    pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
105
    pub rpc_handlers: RpcHandlers,
106
    pub backend: Arc<FullBackend>,
107
}
108

            
109
/// Custom Deps for dev Rpc extension
110
struct DevDeps<C, P> {
111
    /// The client instance to use.
112
    pub client: Arc<C>,
113
    /// Transaction pool instance.
114
    pub pool: Arc<P>,
115
    /// Manual seal command sink
116
    pub command_sink: Option<futures::channel::mpsc::Sender<EngineCommand<Hash>>>,
117
    /// Dev rpcs
118
    pub dev_rpc: Option<DevRpc>,
119
    /// Channels for manually excluding container chains from producing blocks
120
    pub container_chain_exclusion_sender: Option<flume::Sender<Vec<ParaId>>>,
121
}
122

            
123
2064
fn create_dev_rpc_extension<C, P>(
124
2064
    DevDeps {
125
2064
        client,
126
2064
        pool,
127
2064
        command_sink: maybe_command_sink,
128
2064
        dev_rpc: maybe_dev_rpc,
129
2064
        container_chain_exclusion_sender: maybe_container_chain_exclusion_sender,
130
2064
    }: DevDeps<C, P>,
131
2064
) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
132
2064
where
133
2064
    C: ProvideRuntimeApi<Block>
134
2064
        + HeaderBackend<Block>
135
2064
        + AuxStore
136
2064
        + HeaderMetadata<Block, Error = sp_blockchain::Error>
137
2064
        + Send
138
2064
        + Sync
139
2064
        + 'static,
140
2064
    C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
141
2064
    C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
142
2064
    C::Api: BlockBuilder<Block>,
143
2064
    P: TransactionPool + Sync + Send + 'static,
144
2064
{
145
    use {
146
        pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer},
147
        substrate_frame_rpc_system::{System, SystemApiServer},
148
    };
149

            
150
2064
    let mut io = RpcModule::new(());
151
2064
    io.merge(System::new(client.clone(), pool.clone()).into_rpc())?;
152
2064
    io.merge(TransactionPayment::new(client.clone()).into_rpc())?;
153

            
154
2064
    if let Some(command_sink) = maybe_command_sink {
155
2064
        io.merge(ManualSeal::new(command_sink).into_rpc())?;
156
    }
157

            
158
2064
    if let Some(dev_rpc_data) = maybe_dev_rpc {
159
2064
        io.merge(dev_rpc_data.into_rpc())?;
160
    }
161

            
162
2064
    if let Some(container_chain_exclusion_message_channel) = maybe_container_chain_exclusion_sender
163
    {
164
2064
        io.merge(
165
2064
            ManualContainerChainsExclusion {
166
2064
                container_chain_exclusion_message_channel,
167
2064
            }
168
2064
            .into_rpc(),
169
2064
        )?;
170
    }
171

            
172
2064
    Ok(io)
173
2064
}
174

            
175
/// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block
176
/// to satisfy runtime
177
struct EmptyParachainsInherentDataProvider;
178

            
179
/// Copied from polkadot service just so that this code retains same structure as
180
/// polkadot_service crate.
181
struct Basics {
182
    task_manager: TaskManager,
183
    client: Arc<FullClient>,
184
    backend: Arc<FullBackend>,
185
    keystore_container: KeystoreContainer,
186
    telemetry: Option<Telemetry>,
187
}
188

            
189
impl EmptyParachainsInherentDataProvider {
190
64104
    pub async fn create<C: HeaderBackend<Block>>(
191
64104
        client: Arc<C>,
192
64104
        parent: Hash,
193
64104
    ) -> Result<ParachainsInherentData, InherentError> {
194
64104
        let parent_header = match client.header(parent) {
195
64104
            Ok(Some(h)) => h,
196
            Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)),
197
            Err(err) => return Err(InherentError::Blockchain(err)),
198
        };
199

            
200
64104
        Ok(ParachainsInherentData {
201
64104
            bitfields: Vec::new(),
202
64104
            backed_candidates: Vec::new(),
203
64104
            disputes: Vec::new(),
204
64104
            parent_header,
205
64104
        })
206
64104
    }
207
}
208

            
209
/// Creates new development full node with manual seal
210
1032
pub fn build_full<OverseerGenerator: OverseerGen>(
211
1032
    sealing: Sealing,
212
1032
    config: Configuration,
213
1032
    mut params: NewFullParams<OverseerGenerator>,
214
1032
) -> Result<NewFull, Error> {
215
1032
    let is_polkadot = config.chain_spec.is_polkadot();
216
1032

            
217
1032
    params.overseer_message_channel_capacity_override = params
218
1032
        .overseer_message_channel_capacity_override
219
1032
        .map(move |capacity| {
220
            if is_polkadot {
221
                gum::warn!("Channel capacity should _never_ be tampered with on polkadot!");
222
            }
223
            capacity
224
1032
        });
225
1032

            
226
1032
    match config
227
1032
        .network
228
1032
        .network_backend
229
1032
        .unwrap_or(sc_network::config::NetworkBackendType::Libp2p)
230
    {
231
        sc_network::config::NetworkBackendType::Libp2p => {
232
1032
            new_full::<_, sc_network::NetworkWorker<Block, Hash>>(sealing, config, params)
233
        }
234
        sc_network::config::NetworkBackendType::Litep2p => {
235
            new_full::<_, sc_network::Litep2pNetworkBackend>(sealing, config, params)
236
        }
237
    }
238
1032
}
239

            
240
/// We use MockParachainsInherentDataProvider to insert an parachain inherent with mocked
241
/// candidates
242
/// We detect whether any of the keys in our keystore is assigned to a core and provide
243
/// a mocked candidate in such core
244
struct MockParachainsInherentDataProvider<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> {
245
    pub client: Arc<C>,
246
    pub parent: Hash,
247
    pub keystore: KeystorePtr,
248
    pub upward_messages_receiver: flume::Receiver<Vec<u8>>,
249
    pub container_chain_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
250
}
251

            
252
impl<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> MockParachainsInherentDataProvider<C>
253
where
254
    C::Api: ParachainHost<Block>,
255
    C: AuxStore,
256
{
257
79740
    pub fn new(
258
79740
        client: Arc<C>,
259
79740
        parent: Hash,
260
79740
        keystore: KeystorePtr,
261
79740
        upward_messages_receiver: flume::Receiver<Vec<u8>>,
262
79740
        container_chain_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
263
79740
    ) -> Self {
264
79740
        MockParachainsInherentDataProvider {
265
79740
            client,
266
79740
            parent,
267
79740
            keystore,
268
79740
            upward_messages_receiver,
269
79740
            container_chain_exclusion_receiver,
270
79740
        }
271
79740
    }
272

            
273
15636
    pub async fn create(
274
15636
        client: Arc<C>,
275
15636
        parent: Hash,
276
15636
        keystore: KeystorePtr,
277
15636
        upward_messages_receiver: flume::Receiver<Vec<u8>>,
278
15636
        container_chains_exclusion_receiver: flume::Receiver<Vec<ParaId>>,
279
15636
    ) -> Result<ParachainsInherentData, InherentError> {
280
15636
        let parent_header = match client.header(parent) {
281
15636
            Ok(Some(h)) => h,
282
            Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)),
283
            Err(err) => return Err(InherentError::Blockchain(err)),
284
        };
285

            
286
        // Strategy:
287
        // we usually have 1 validator per core, and we usually run with --alice
288
        // the idea is that at least alice will be assigned to one core
289
        // if we find in the keystore the validator attached to a particular core,
290
        // we generate a signature for the parachain assigned to that core
291
        // To retrieve the validator keys, cal runtime api:
292

            
293
        // this following piece of code predicts whether the validator is assigned to a particular
294
        // core where a candidate for a parachain needs to be created
295
15636
        let runtime_api = client.runtime_api();
296
15636

            
297
15636
        // we get all validators
298
15636

            
299
15636
        // we get the current claim queue to know core availability
300
15636
        let claim_queue = runtime_api.claim_queue(parent).unwrap();
301
15636

            
302
15636
        // we get the validator groups
303
15636
        let (groups, rotation_info) = runtime_api.validator_groups(parent).unwrap();
304
15636

            
305
15636
        // we calculate rotation since start, which will define the core assignation
306
15636
        // to validators
307
15636
        let rotations_since_session_start = (parent_header.number
308
15636
            - rotation_info.session_start_block)
309
15636
            / rotation_info.group_rotation_frequency;
310
15636

            
311
15636
        // Get all the available keys in the keystore
312
15636
        let available_keys = keystore
313
15636
            .keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID)
314
15636
            .unwrap();
315
15636

            
316
15636
        // create a slot number identical to the parent block num
317
15636
        let slot_number = AuraInherentType::from(u64::from(parent_header.number));
318
15636

            
319
15636
        // create a mocked header
320
15636
        let parachain_mocked_header = sp_runtime::generic::Header::<u32, BlakeTwo256> {
321
15636
            parent_hash: Default::default(),
322
15636
            number: parent_header.number,
323
15636
            state_root: Default::default(),
324
15636
            extrinsics_root: Default::default(),
325
15636
            digest: sp_runtime::generic::Digest {
326
15636
                logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot_number.encode())],
327
15636
            },
328
15636
        };
329
15636

            
330
15636
        // retrieve availability cores
331
15636
        let availability_cores = runtime_api.availability_cores(parent).unwrap();
332
15636

            
333
15636
        // retrieve current session_idx
334
15636
        let session_idx = runtime_api.session_index_for_child(parent).unwrap();
335
15636

            
336
15636
        // retrieve all validators
337
15636
        let all_validators = runtime_api.validators(parent).unwrap();
338
15636

            
339
15636
        // construct full availability bitvec
340
15636
        let availability_bitvec = availability_bitvec(1, availability_cores.len());
341
15636

            
342
15636
        let signature_ctx = SigningContext {
343
15636
            parent_hash: parent,
344
15636
            session_index: session_idx,
345
15636
        };
346
15636

            
347
15636
        // we generate the availability bitfield sigs
348
15636
        // TODO: here we assume all validator keys are able to sign with our keystore
349
15636
        // we need to make sure the key is there before we try to sign
350
15636
        // this is mostly to indicate that the erasure coding chunks where received by all val
351
15636
        let bitfields: Vec<UncheckedSigned<AvailabilityBitfield>> = all_validators
352
15636
            .iter()
353
15636
            .enumerate()
354
15636
            .map(|(i, public)| {
355
15636
                keystore_sign(
356
15636
                    &keystore,
357
15636
                    availability_bitvec.clone(),
358
15636
                    &signature_ctx,
359
15636
                    ValidatorIndex(i as u32),
360
15636
                    public,
361
15636
                )
362
15636
                .unwrap()
363
15636
                .unwrap()
364
15636
            })
365
15636
            .collect();
366
15636

            
367
15636
        // generate a random collator pair
368
15636
        let collator_pair = CollatorPair::generate().0;
369
15636
        let mut backed_cand: Vec<BackedCandidate<H256>> = vec![];
370
15636

            
371
15636
        let container_chains_exclusion_messages: Vec<Vec<ParaId>> =
372
15636
            container_chains_exclusion_receiver.drain().collect();
373
        // If there is a new set of excluded container chains, we update it
374
15636
        if let Some(mock_excluded_container_chains) = container_chains_exclusion_messages.last() {
375
            client
376
                .insert_aux(
377
                    &[(
378
                        CONTAINER_CHAINS_EXCLUSION_AUX_KEY,
379
                        mock_excluded_container_chains.encode().as_slice(),
380
                    )],
381
                    &[],
382
                )
383
                .expect("Should be able to write to aux storage; qed");
384
15636
        }
385
15636
        let new_excluded_container_chains_value = client
386
15636
            .get_aux(CONTAINER_CHAINS_EXCLUSION_AUX_KEY)
387
15636
            .expect("Should be able to query aux storage; qed")
388
15636
            .unwrap_or(Vec::<ParaId>::new().encode());
389
15636
        let mock_excluded_container_chains: Vec<ParaId> =
390
15636
            Decode::decode(&mut new_excluded_container_chains_value.as_slice())
391
15636
                .expect("Vector non-decodable");
392

            
393
        // iterate over every core|para pair
394
43764
        for (core, para) in claim_queue {
395
            // allows preventing container chains from producing blocks in dev mode
396
28128
            let mut para = para.clone();
397
56232
            para.retain(|x| !mock_excluded_container_chains.contains(x));
398
28128
            // check which group is assigned to each core
399
28128
            let group_assigned_to_core =
400
28128
                core.0 + rotations_since_session_start % groups.len() as u32;
401
28128
            // check validator indices associated to the core
402
28128
            let indices_associated_to_core = groups.get(group_assigned_to_core as usize).unwrap();
403
42360
            for index in indices_associated_to_core {
404
                // fetch validator keys
405
14232
                let validator_keys_to_find = all_validators.get(index.0 as usize).unwrap();
406
                // Iterate keys until we find an eligible one, or run out of candidates.
407
28464
                for type_public_pair in &available_keys {
408
14232
                    if let Ok(validator) =
409
14232
                        polkadot_primitives::ValidatorId::from_slice(type_public_pair)
410
                    {
411
                        // if we find the validator in keystore, we try to create a backed cand
412
14232
                        if validator_keys_to_find == &validator {
413
14232
                            // we work with the previous included data
414
14232
                            let mut persisted_validation_data = runtime_api
415
14232
                                .persisted_validation_data(
416
14232
                                    parent,
417
14232
                                    para[0],
418
14232
                                    OccupiedCoreAssumption::Included,
419
14232
                                )
420
14232
                                .unwrap()
421
14232
                                .unwrap();
422
14232

            
423
14232
                            // if we dont do this we have a backed candidate every 2 blocks
424
14232
                            // we want
425
14232
                            persisted_validation_data.relay_parent_storage_root =
426
14232
                                parent_header.state_root;
427
14232

            
428
14232
                            let persisted_validation_data_hash = persisted_validation_data.hash();
429
14232
                            // retrieve the validation code hash
430
14232
                            let validation_code_hash = runtime_api
431
14232
                                .validation_code_hash(
432
14232
                                    parent,
433
14232
                                    para[0],
434
14232
                                    OccupiedCoreAssumption::Included,
435
14232
                                )
436
14232
                                .unwrap()
437
14232
                                .unwrap();
438
14232
                            let pov_hash = Default::default();
439
14232
                            // generate a fake collator signature
440
14232
                            let payload = polkadot_primitives::collator_signature_payload(
441
14232
                                &parent,
442
14232
                                &para[0],
443
14232
                                &persisted_validation_data_hash,
444
14232
                                &pov_hash,
445
14232
                                &validation_code_hash,
446
14232
                            );
447
14232
                            let collator_signature = collator_pair.sign(&payload);
448
14232

            
449
14232
                            let upward_messages = UpwardMessages::try_from(
450
14232
                                upward_messages_receiver.drain().collect::<Vec<_>>(),
451
14232
                            )
452
14232
                            .expect("create upward messages from raw messages");
453
14232

            
454
14232
                            // generate a candidate with most of the values mocked
455
14232
                            let candidate = CommittedCandidateReceipt::<H256> {
456
14232
                                descriptor: CandidateDescriptor::<H256> {
457
14232
                                    para_id: para[0],
458
14232
                                    relay_parent: parent,
459
14232
                                    collator: collator_pair.public(),
460
14232
                                    persisted_validation_data_hash,
461
14232
                                    pov_hash,
462
14232
                                    erasure_root: Default::default(),
463
14232
                                    signature: collator_signature,
464
14232
                                    para_head: parachain_mocked_header.clone().hash(),
465
14232
                                    validation_code_hash,
466
14232
                                },
467
14232
                                commitments: CandidateCommitments::<u32> {
468
14232
                                    upward_messages,
469
14232
                                    horizontal_messages: Default::default(),
470
14232
                                    new_validation_code: None,
471
14232
                                    head_data: parachain_mocked_header.clone().encode().into(),
472
14232
                                    processed_downward_messages: 0,
473
14232
                                    hrmp_watermark: parent_header.number,
474
14232
                                },
475
14232
                            };
476
14232
                            let candidate_hash = candidate.hash();
477
14232
                            let payload = CompactStatement::Valid(candidate_hash);
478
14232

            
479
14232
                            let signature_ctx = SigningContext {
480
14232
                                parent_hash: parent,
481
14232
                                session_index: session_idx,
482
14232
                            };
483
14232

            
484
14232
                            // sign the candidate with the validator key
485
14232
                            let signature = keystore_sign(
486
14232
                                &keystore,
487
14232
                                payload,
488
14232
                                &signature_ctx,
489
14232
                                *index,
490
14232
                                &validator,
491
14232
                            )
492
14232
                            .unwrap()
493
14232
                            .unwrap()
494
14232
                            .benchmark_signature();
495
14232

            
496
14232
                            // construct a validity vote
497
14232
                            let validity_votes = vec![ValidityAttestation::Explicit(signature)];
498
14232

            
499
14232
                            // push the candidate
500
14232
                            backed_cand.push(BackedCandidate::<H256>::new(
501
14232
                                candidate,
502
14232
                                validity_votes.clone(),
503
14232
                                bitvec::bitvec![u8, bitvec::order::Lsb0; 1; indices_associated_to_core.len()],
504
14232
                                core,
505
14232
                            ));
506
14232
                        }
507
                    }
508
                }
509
            }
510
        }
511

            
512
15636
        Ok(ParachainsInherentData {
513
15636
            bitfields,
514
15636
            backed_candidates: backed_cand,
515
15636
            disputes: Vec::new(),
516
15636
            parent_header,
517
15636
        })
518
15636
    }
519
}
520

            
521
#[async_trait::async_trait]
522
impl<C: HeaderBackend<Block> + ProvideRuntimeApi<Block>> sp_inherents::InherentDataProvider
523
    for MockParachainsInherentDataProvider<C>
524
where
525
    C::Api: ParachainHost<Block>,
526
    C: AuxStore,
527
{
528
    async fn provide_inherent_data(
529
        &self,
530
        dst_inherent_data: &mut sp_inherents::InherentData,
531
79740
    ) -> Result<(), sp_inherents::Error> {
532
        // fetch whether the para inherent selector has been set
533
79740
        let maybe_para_selector = self
534
79740
            .client
535
79740
            .get_aux(PARA_INHERENT_SELECTOR_AUX_KEY)
536
79740
            .expect("Should be able to query aux storage; qed");
537

            
538
79740
        let inherent_data = {
539
79740
            if let Some(aux) = maybe_para_selector {
540
                // if it is true, the candidates need to be mocked
541
                // else, we output the empty parachain inherent data provider
542
15636
                if aux == true.encode() {
543
15636
                    MockParachainsInherentDataProvider::create(
544
15636
                        self.client.clone(),
545
15636
                        self.parent,
546
15636
                        self.keystore.clone(),
547
15636
                        self.upward_messages_receiver.clone(),
548
15636
                        self.container_chain_exclusion_receiver.clone(),
549
15636
                    )
550
15636
                    .await
551
15636
                    .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
552
                } else {
553
                    EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent)
554
                        .await
555
                        .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
556
                }
557
            } else {
558
64104
                EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent)
559
64104
                    .await
560
64104
                    .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?
561
            }
562
        };
563

            
564
79740
        dst_inherent_data.put_data(
565
79740
            polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER,
566
79740
            &inherent_data,
567
79740
        )
568
159480
    }
569

            
570
    async fn try_handle_error(
571
        &self,
572
        _identifier: &sp_inherents::InherentIdentifier,
573
        _error: &[u8],
574
    ) -> Option<Result<(), sp_inherents::Error>> {
575
        // Inherent isn't checked and can not return any error
576
        None
577
    }
578
}
579

            
580
/// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by
581
/// slot duration from previous timestamp or current timestamp if in reality more time is passed.
582
79740
fn get_next_timestamp(
583
79740
    client: Arc<FullClient>,
584
79740
    slot_duration: SlotDuration,
585
79740
) -> sp_timestamp::InherentDataProvider {
586
    const TIMESTAMP_AUX_KEY: &[u8] = b"__DEV_TIMESTAMP";
587

            
588
79740
    let maybe_last_timestamp = client
589
79740
        .get_aux(TIMESTAMP_AUX_KEY)
590
79740
        .expect("Should be able to query aux storage; qed");
591
79740
    if let Some(last_timestamp) = maybe_last_timestamp {
592
78816
        let last_inherent_data = sp_timestamp::InherentType::decode(&mut last_timestamp.as_slice())
593
78816
            .expect("Timestamp data must be decoded; qed");
594
78816
        let new_inherent_data: sp_timestamp::InherentType = max(
595
78816
            last_inherent_data.add(slot_duration.as_millis()),
596
78816
            sp_timestamp::InherentType::current(),
597
78816
        );
598
78816
        client
599
78816
            .insert_aux(
600
78816
                &[(TIMESTAMP_AUX_KEY, new_inherent_data.encode().as_slice())],
601
78816
                &[],
602
78816
            )
603
78816
            .expect("Should be able to write to aux storage; qed");
604
78816
        sp_timestamp::InherentDataProvider::new(new_inherent_data)
605
    } else {
606
924
        let current_timestamp = sp_timestamp::InherentType::current();
607
924
        client
608
924
            .insert_aux(
609
924
                &[(TIMESTAMP_AUX_KEY, current_timestamp.encode().as_slice())],
610
924
                &[],
611
924
            )
612
924
            .expect("Should be able to write to aux storage; qed");
613
924
        sp_timestamp::InherentDataProvider::new(current_timestamp)
614
    }
615
79740
}
616

            
617
1032
fn new_full<
618
1032
    OverseerGenerator: OverseerGen,
619
1032
    Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
620
1032
>(
621
1032
    sealing: Sealing,
622
1032
    mut config: Configuration,
623
1032
    NewFullParams {
624
1032
        telemetry_worker_handle,
625
1032
        ..
626
1032
    }: NewFullParams<OverseerGenerator>,
627
1032
) -> Result<NewFull, Error> {
628
1032
    let role = config.role;
629

            
630
1032
    let basics = new_partial_basics(&mut config, telemetry_worker_handle)?;
631

            
632
1032
    let prometheus_registry = config.prometheus_registry().cloned();
633
1032

            
634
1032
    let keystore = basics.keystore_container.local_keystore();
635
1032

            
636
1032
    let select_chain = SelectRelayChain::new_longest_chain(basics.backend.clone());
637

            
638
1032
    let service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> {
639
1032
        client,
640
1032
        backend,
641
1032
        mut task_manager,
642
1032
        keystore_container,
643
1032
        select_chain,
644
1032
        import_queue,
645
1032
        transaction_pool,
646
1032
        other: (block_import, babe_link, slot_duration, mut telemetry),
647
1032
    } = new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
648

            
649
1032
    let metrics = Network::register_notification_metrics(
650
1032
        config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
651
1032
    );
652
1032

            
653
1032
    let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(
654
1032
        &config.network,
655
1032
        prometheus_registry.clone(),
656
1032
    );
657
1032

            
658
1032
    // Create channels for mocked parachain candidates.
659
1032
    let (downward_mock_para_inherent_sender, downward_mock_para_inherent_receiver) =
660
1032
        flume::bounded::<Vec<u8>>(100);
661
1032

            
662
1032
    let (upward_mock_sender, upward_mock_receiver) = flume::bounded::<Vec<u8>>(100);
663

            
664
1032
    let (network, system_rpc_tx, tx_handler_controller, sync_service) =
665
1032
        service::build_network(service::BuildNetworkParams {
666
1032
            config: &config,
667
1032
            net_config,
668
1032
            client: client.clone(),
669
1032
            transaction_pool: transaction_pool.clone(),
670
1032
            spawn_handle: task_manager.spawn_handle(),
671
1032
            import_queue,
672
1032
            block_announce_validator_builder: None,
673
1032
            warp_sync_config: None,
674
1032
            block_relay: None,
675
1032
            metrics,
676
1032
        })?;
677

            
678
1032
    if config.offchain_worker.enabled {
679
1032
        use futures::FutureExt;
680

            
681
1032
        task_manager.spawn_handle().spawn(
682
1032
            "offchain-workers-runner",
683
1032
            "offchain-work",
684
1032
            sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
685
1032
                runtime_api_provider: client.clone(),
686
1032
                keystore: Some(keystore_container.keystore()),
687
1032
                offchain_db: backend.offchain_storage(),
688
1032
                transaction_pool: Some(OffchainTransactionPoolFactory::new(
689
1032
                    transaction_pool.clone(),
690
1032
                )),
691
1032
                network_provider: Arc::new(network.clone()),
692
1032
                is_validator: role.is_authority(),
693
1032
                enable_http_requests: false,
694
79740
                custom_extensions: move |_| vec![],
695
1032
            })?
696
1032
            .run(client.clone(), task_manager.spawn_handle())
697
1032
            .boxed(),
698
        );
699
    }
700

            
701
1032
    let mut command_sink = None;
702
1032
    let mut container_chain_exclusion_sender = None;
703
1032

            
704
1032
    if role.is_authority() {
705
1032
        let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording(
706
1032
            task_manager.spawn_handle(),
707
1032
            client.clone(),
708
1032
            transaction_pool.clone(),
709
1032
            prometheus_registry.as_ref(),
710
1032
            telemetry.as_ref().map(|x| x.handle()),
711
1032
        );
712

            
713
1032
        let commands_stream: Box<
714
1032
            dyn Stream<Item = EngineCommand<<Block as BlockT>::Hash>> + Send + Sync + Unpin,
715
1032
        > = match sealing {
716
            Sealing::Instant => {
717
                Box::new(
718
                    // This bit cribbed from the implementation of instant seal.
719
                    transaction_pool.import_notification_stream().map(|_| {
720
                        EngineCommand::SealNewBlock {
721
                            create_empty: false,
722
                            finalize: false,
723
                            parent_hash: None,
724
                            sender: None,
725
                        }
726
                    }),
727
                )
728
            }
729
            Sealing::Manual => {
730
1032
                let (sink, stream) = futures::channel::mpsc::channel(1000);
731
1032
                // Keep a reference to the other end of the channel. It goes to the RPC.
732
1032
                command_sink = Some(sink);
733
1032
                Box::new(stream)
734
            }
735
            Sealing::Interval(millis) => Box::new(StreamExt::map(
736
                Timer::interval(Duration::from_millis(millis)),
737
                |_| EngineCommand::SealNewBlock {
738
                    create_empty: true,
739
                    finalize: true,
740
                    parent_hash: None,
741
                    sender: None,
742
                },
743
            )),
744
        };
745
1032
        let keystore_clone = keystore.clone();
746
1032

            
747
1032
        let babe_config = babe_link.config();
748
1032
        let babe_consensus_provider = BabeConsensusDataProvider::new(
749
1032
            client.clone(),
750
1032
            keystore,
751
1032
            babe_link.epoch_changes().clone(),
752
1032
            babe_config.authorities.clone(),
753
1032
        )
754
1032
        .map_err(|babe_error| {
755
            Error::Consensus(consensus_common::Error::Other(babe_error.into()))
756
1032
        })?;
757

            
758
1032
        let (mock_container_chains_exclusion_sender, mock_container_chains_exclusion_receiver) =
759
1032
            flume::bounded::<Vec<ParaId>>(100);
760
1032
        container_chain_exclusion_sender = Some(mock_container_chains_exclusion_sender);
761
1032

            
762
1032
        // Need to clone it and store here to avoid moving of `client`
763
1032
        // variable in closure below.
764
1032
        let client_clone = client.clone();
765
1032

            
766
1032
        task_manager.spawn_essential_handle().spawn_blocking(
767
1032
            "authorship_task",
768
1032
            Some("block-authoring"),
769
1032
            run_manual_seal(ManualSealParams {
770
1032
                block_import,
771
1032
                env: proposer,
772
1032
                client: client.clone(),
773
1032
                pool: transaction_pool.clone(),
774
1032
                commands_stream,
775
1032
                select_chain,
776
79740
                create_inherent_data_providers: move |parent, ()| {
777
79740
                    let client_clone = client_clone.clone();
778
79740
                    let keystore = keystore_clone.clone();
779
79740
                    let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone();
780
79740
                    let upward_mock_receiver = upward_mock_receiver.clone();
781
79740
                    let mock_container_chains_exclusion_receiver = mock_container_chains_exclusion_receiver.clone();
782
79740
                    async move {
783
79740

            
784
79740
                        let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone();
785
79740
                        // here we only take the last one
786
79740
                        let para_inherent_decider_messages: Vec<Vec<u8>> = downward_mock_para_inherent_receiver.drain().collect();
787
79740

            
788
79740
                        let upward_messages_receiver = upward_mock_receiver.clone();
789

            
790
                        // If there is a value to be updated, we update it
791
79740
                        if let Some(value) = para_inherent_decider_messages.last() {
792
60
                            client_clone
793
60
                            .insert_aux(
794
60
                                &[(PARA_INHERENT_SELECTOR_AUX_KEY, value.as_slice())],
795
60
                                &[],
796
60
                            )
797
60
                            .expect("Should be able to write to aux storage; qed");
798
79680
                        }
799

            
800
79740
                        let parachain = MockParachainsInherentDataProvider::new(
801
79740
                            client_clone.clone(),
802
79740
                            parent,
803
79740
                            keystore,
804
79740
                            upward_messages_receiver,
805
79740
                            mock_container_chains_exclusion_receiver
806
79740
                        );
807
79740

            
808
79740
                        let timestamp = get_next_timestamp(client_clone, slot_duration);
809
79740

            
810
79740
                        let slot =
811
79740
                            sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
812
79740
                                *timestamp,
813
79740
                                slot_duration,
814
79740
                            );
815
79740

            
816
79740
                        Ok((slot, timestamp, parachain))
817
79740
                    }
818
79740
                },
819
1032
                consensus_data_provider: Some(Box::new(babe_consensus_provider)),
820
1032
            }),
821
1032
        );
822
1032
    }
823

            
824
1032
    let dev_rpc = if role.clone().is_authority() {
825
1032
        Some(DevRpc {
826
1032
            mock_para_inherent_channel: downward_mock_para_inherent_sender,
827
1032
            upward_message_channel: upward_mock_sender,
828
1032
        })
829
    } else {
830
        None
831
    };
832

            
833
1032
    let rpc_extensions_builder = {
834
1032
        let client = client.clone();
835
1032
        let transaction_pool = transaction_pool.clone();
836

            
837
        move |_subscription_executor: polkadot_rpc::SubscriptionTaskExecutor|
838
2064
            -> Result<RpcExtension, service::Error> {
839
2064
            let deps = DevDeps {
840
2064
                client: client.clone(),
841
2064
                pool: transaction_pool.clone(),
842
2064
                command_sink: command_sink.clone(),
843
2064
                dev_rpc: dev_rpc.clone(),
844
2064
                container_chain_exclusion_sender: container_chain_exclusion_sender.clone(),
845
2064
            };
846
2064

            
847
2064
            create_dev_rpc_extension(deps).map_err(Into::into)
848
2064
        }
849
    };
850

            
851
1032
    let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
852
1032
        config,
853
1032
        backend: backend.clone(),
854
1032
        client: client.clone(),
855
1032
        keystore: keystore_container.keystore(),
856
1032
        network: network.clone(),
857
1032
        sync_service: sync_service.clone(),
858
1032
        rpc_builder: Box::new(rpc_extensions_builder),
859
1032
        transaction_pool: transaction_pool.clone(),
860
1032
        task_manager: &mut task_manager,
861
1032
        system_rpc_tx,
862
1032
        tx_handler_controller,
863
1032
        telemetry: telemetry.as_mut(),
864
1032
    })?;
865

            
866
1032
    Ok(NewFull {
867
1032
        task_manager,
868
1032
        client,
869
1032
        overseer_handle: None,
870
1032
        network,
871
1032
        sync_service,
872
1032
        rpc_handlers,
873
1032
        backend,
874
1032
    })
875
1032
}
876

            
877
1032
fn new_partial<ChainSelection>(
878
1032
    config: &mut Configuration,
879
1032
    Basics {
880
1032
        task_manager,
881
1032
        backend,
882
1032
        client,
883
1032
        keystore_container,
884
1032
        telemetry,
885
1032
    }: Basics,
886
1032
    select_chain: ChainSelection,
887
1032
) -> Result<
888
1032
    service::PartialComponents<
889
1032
        FullClient,
890
1032
        FullBackend,
891
1032
        ChainSelection,
892
1032
        sc_consensus::DefaultImportQueue<Block>,
893
1032
        sc_transaction_pool::TransactionPoolHandle<Block, FullClient>,
894
1032
        (
895
1032
            BabeBlockImport<Block, FullClient, Arc<FullClient>>,
896
1032
            BabeLink<Block>,
897
1032
            SlotDuration,
898
1032
            Option<Telemetry>,
899
1032
        ),
900
1032
    >,
901
1032
    Error,
902
1032
>
903
1032
where
904
1032
    ChainSelection: 'static + SelectChain<Block>,
905
1032
{
906
1032
    let transaction_pool = sc_transaction_pool::Builder::new(
907
1032
        task_manager.spawn_essential_handle(),
908
1032
        client.clone(),
909
1032
        config.role.is_authority().into(),
910
1032
    )
911
1032
    .with_options(config.transaction_pool.clone())
912
1032
    .with_prometheus(config.prometheus_registry())
913
1032
    .build();
914

            
915
    // Create babe block import queue; this is required to have correct epoch data
916
    // available for manual seal to produce block
917
1032
    let babe_config = babe::configuration(&*client)?;
918
1032
    let (babe_block_import, babe_link) =
919
1032
        babe::block_import(babe_config.clone(), client.clone(), client.clone())?;
920
1032
    let slot_duration = babe_link.config().slot_duration();
921
1032

            
922
1032
    // Create manual seal block import with manual seal block import queue
923
1032
    let import_queue = sc_consensus_manual_seal::import_queue(
924
1032
        Box::new(babe_block_import.clone()),
925
1032
        &task_manager.spawn_essential_handle(),
926
1032
        config.prometheus_registry(),
927
1032
    );
928
1032

            
929
1032
    Ok(service::PartialComponents {
930
1032
        client,
931
1032
        backend,
932
1032
        task_manager,
933
1032
        keystore_container,
934
1032
        select_chain,
935
1032
        import_queue,
936
1032
        transaction_pool: transaction_pool.into(),
937
1032
        other: (babe_block_import, babe_link, slot_duration, telemetry),
938
1032
    })
939
1032
}
940

            
941
1032
fn new_partial_basics(
942
1032
    config: &mut Configuration,
943
1032
    telemetry_worker_handle: Option<TelemetryWorkerHandle>,
944
1032
) -> Result<Basics, Error> {
945
1032
    let telemetry = config
946
1032
        .telemetry_endpoints
947
1032
        .clone()
948
1032
        .filter(|x| !x.is_empty())
949
1032
        .map(move |endpoints| -> Result<_, telemetry::Error> {
950
            let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle {
951
                (None, worker_handle)
952
            } else {
953
                let worker = TelemetryWorker::new(16)?;
954
                let worker_handle = worker.handle();
955
                (Some(worker), worker_handle)
956
            };
957
            let telemetry = worker_handle.new_telemetry(endpoints);
958
            Ok((worker, telemetry))
959
1032
        })
960
1032
        .transpose()?;
961

            
962
1032
    let heap_pages = config
963
1032
        .executor
964
1032
        .default_heap_pages
965
1032
        .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
966
            extra_pages: h as u32,
967
1032
        });
968
1032

            
969
1032
    let mut wasm_builder = WasmExecutor::builder()
970
1032
        .with_execution_method(config.executor.wasm_method)
971
1032
        .with_onchain_heap_alloc_strategy(heap_pages)
972
1032
        .with_offchain_heap_alloc_strategy(heap_pages)
973
1032
        .with_max_runtime_instances(config.executor.max_runtime_instances)
974
1032
        .with_runtime_cache_size(config.executor.runtime_cache_size);
975
1032
    if let Some(ref wasmtime_precompiled_path) = config.executor.wasmtime_precompiled {
976
972
        wasm_builder = wasm_builder.with_wasmtime_precompiled_path(wasmtime_precompiled_path);
977
972
    }
978
1032
    let executor = wasm_builder.build();
979

            
980
1032
    let (client, backend, keystore_container, task_manager) =
981
1032
        service::new_full_parts::<Block, RuntimeApi, _>(
982
1032
            config,
983
1032
            telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
984
1032
            executor,
985
1032
        )?;
986
1032
    let client = Arc::new(client);
987
1032

            
988
1032
    let telemetry = telemetry.map(|(worker, telemetry)| {
989
        if let Some(worker) = worker {
990
            task_manager.spawn_handle().spawn(
991
                "telemetry",
992
                Some("telemetry"),
993
                Box::pin(worker.run()),
994
            );
995
        }
996
        telemetry
997
1032
    });
998
1032

            
999
1032
    Ok(Basics {
1032
        task_manager,
1032
        client,
1032
        backend,
1032
        keystore_container,
1032
        telemetry,
1032
    })
1032
}
use {
    polkadot_primitives::{AvailabilityBitfield, UncheckedSigned, ValidatorId, ValidatorIndex},
    sp_keystore::Error as KeystoreError,
};
29868
fn keystore_sign<H: Encode, Payload: Encode>(
29868
    keystore: &KeystorePtr,
29868
    payload: Payload,
29868
    context: &SigningContext<H>,
29868
    validator_index: ValidatorIndex,
29868
    key: &ValidatorId,
29868
) -> Result<Option<UncheckedSigned<Payload>>, KeystoreError> {
29868
    let data = payload_data(&payload, context);
29868
    let signature = keystore
29868
        .sr25519_sign(ValidatorId::ID, key.as_ref(), &data)?
29868
        .map(|sig| UncheckedSigned::new(payload, validator_index, sig.into()));
29868
    Ok(signature)
29868
}
29868
fn payload_data<H: Encode, Payload: Encode>(
29868
    payload: &Payload,
29868
    context: &SigningContext<H>,
29868
) -> Vec<u8> {
29868
    // equivalent to (`real_payload`, context).encode()
29868
    let mut out = payload.encode_as();
29868
    out.extend(context.encode());
29868
    out
29868
}
/// Create an `AvailabilityBitfield` with size `total_cores`. The first `used_cores` set to true (occupied),
/// and the remaining to false (available).
15636
fn availability_bitvec(used_cores: usize, total_cores: usize) -> AvailabilityBitfield {
15636
    let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0];
60192
    for i in 0..total_cores {
60192
        if i < used_cores {
15048
            bitfields.push(true);
15048
        } else {
45144
            bitfields.push(false)
        }
    }
15636
    bitfields.into()
15636
}