1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>
16

            
17
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
18

            
19
use {
20
    crate::command::solochain::{copy_zombienet_keystore, dummy_config},
21
    core::marker::PhantomData,
22
    cumulus_client_cli::CollatorOptions,
23
    cumulus_client_collator::service::CollatorService,
24
    cumulus_client_consensus_proposer::Proposer,
25
    cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig},
26
    cumulus_client_service::{
27
        prepare_node_config, start_relay_chain_tasks, DARecoveryProfile, StartRelayChainTasksParams,
28
    },
29
    cumulus_primitives_core::{
30
        relay_chain::{well_known_keys as RelayWellKnownKeys, CollatorPair},
31
        CollectCollationInfo, ParaId,
32
    },
33
    cumulus_relay_chain_interface::{call_runtime_api, OverseerHandle, RelayChainInterface},
34
    dancebox_runtime::{
35
        opaque::{Block, Hash},
36
        AccountId, RuntimeApi,
37
    },
38
    dc_orchestrator_chain_interface::{
39
        BlockNumber, ContainerChainGenesisData, DataPreserverAssignment, DataPreserverProfileId,
40
        OrchestratorChainError, OrchestratorChainInterface, OrchestratorChainResult, PHash,
41
        PHeader,
42
    },
43
    frame_support::__private::sp_tracing::tracing::Instrument,
44
    futures::{Stream, StreamExt},
45
    nimbus_primitives::{NimbusId, NimbusPair},
46
    node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing},
47
    pallet_author_noting_runtime_api::AuthorNotingApi,
48
    pallet_collator_assignment_runtime_api::CollatorAssignmentApi,
49
    pallet_data_preservers_runtime_api::DataPreserversApi,
50
    pallet_registrar_runtime_api::RegistrarApi,
51
    parity_scale_codec::{Decode, Encode},
52
    polkadot_cli::ProvideRuntimeApi,
53
    polkadot_parachain_primitives::primitives::HeadData,
54
    polkadot_primitives::UpgradeGoAhead,
55
    polkadot_service::Handle,
56
    sc_cli::CliConfiguration,
57
    sc_client_api::{
58
        AuxStore, Backend as BackendT, BlockchainEvents, HeaderBackend, UsageProvider,
59
    },
60
    sc_consensus::BasicQueue,
61
    sc_network::NetworkBackend,
62
    sc_network::NetworkBlock,
63
    sc_network_common::role::Role,
64
    sc_network_sync::SyncingService,
65
    sc_service::{Configuration, KeystoreContainer, SpawnTaskHandle, TFullBackend, TaskManager},
66
    sc_telemetry::TelemetryHandle,
67
    sc_transaction_pool::TransactionPoolHandle,
68
    sp_api::ApiExt,
69
    sp_api::StorageProof,
70
    sp_consensus::SyncOracle,
71
    sp_consensus_slots::Slot,
72
    sp_core::{traits::SpawnEssentialNamed, H256},
73
    sp_keystore::KeystorePtr,
74
    sp_state_machine::{Backend as StateBackend, StorageValue},
75
    std::{pin::Pin, sync::Arc, time::Duration},
76
    tc_consensus::{
77
        collators::lookahead::{
78
            self as lookahead_tanssi_aura, BuyCoreParams, Params as LookaheadTanssiAuraParams,
79
        },
80
        OnDemandBlockProductionApi, OrchestratorAuraWorkerAuxData, TanssiAuthorityAssignmentApi,
81
    },
82
    tc_service_container_chain::{
83
        cli::ContainerChainCli,
84
        monitor,
85
        service::{
86
            DevParachainBlockImport, ParachainBlockImport, ParachainClient, ParachainExecutor,
87
            ParachainProposerFactory,
88
        },
89
        spawner::{self, CcSpawnMsg, ContainerChainSpawnParams, ContainerChainSpawner},
90
    },
91
    tokio::sync::mpsc::{unbounded_channel, UnboundedSender},
92
    tokio_util::sync::CancellationToken,
93
};
94

            
95
mod mocked_relay_keys;
96

            
97
// We use this to detect whether randomness is activated
98
const RANDOMNESS_ACTIVATED_AUX_KEY: &[u8] = b"__DEV_RANDOMNESS_ACTIVATED";
99

            
100
const CONTAINER_CHAINS_EXCLUSION_AUX_KEY: &[u8] = b"__DEV_CONTAINER_CHAINS_EXCLUSION";
101

            
102
type FullBackend = TFullBackend<Block>;
103

            
104
pub struct NodeConfig;
105
impl NodeBuilderConfig for NodeConfig {
106
    type Block = Block;
107
    type RuntimeApi = RuntimeApi;
108
    type ParachainExecutor = ParachainExecutor;
109
}
110

            
111
thread_local!(static TIMESTAMP: std::cell::RefCell<u64> = const { std::cell::RefCell::new(0) });
112

            
113
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
114
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
115
struct MockTimestampInherentDataProvider;
116
#[async_trait::async_trait]
117
impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
118
    async fn provide_inherent_data(
119
        &self,
120
        inherent_data: &mut sp_inherents::InherentData,
121
7890
    ) -> Result<(), sp_inherents::Error> {
122
7890
        TIMESTAMP.with(|x| {
123
7890
            *x.borrow_mut() += dancebox_runtime::SLOT_DURATION;
124
7890
            inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
125
7890
        })
126
15780
    }
127

            
128
    async fn try_handle_error(
129
        &self,
130
        _identifier: &sp_inherents::InherentIdentifier,
131
        _error: &[u8],
132
    ) -> Option<Result<(), sp_inherents::Error>> {
133
        // The pallet never reports error.
134
        None
135
    }
136
}
137

            
138
/// Background task used to detect changes to container chain assignment,
139
/// and start/stop container chains on demand. The check runs on every new block.
140
pub fn build_check_assigned_para_id(
141
    client: Arc<dyn OrchestratorChainInterface>,
142
    sync_keystore: KeystorePtr,
143
    cc_spawn_tx: UnboundedSender<CcSpawnMsg>,
144
    spawner: impl SpawnEssentialNamed,
145
) {
146
    let check_assigned_para_id_task = async move {
147
        // Subscribe to new blocks in order to react to para id assignment
148
        // This must be the stream of finalized blocks, otherwise the collators may rotate to a
149
        // different chain before the block is finalized, and that could lead to a stalled chain
150
        let mut import_notifications = client.finality_notification_stream().await.unwrap();
151

            
152
        while let Some(msg) = import_notifications.next().await {
153
            let block_hash = msg.hash();
154
            let client_set_aside_for_cidp = client.clone();
155
            let sync_keystore = sync_keystore.clone();
156
            let cc_spawn_tx = cc_spawn_tx.clone();
157

            
158
            check_assigned_para_id(
159
                cc_spawn_tx,
160
                sync_keystore,
161
                client_set_aside_for_cidp,
162
                block_hash,
163
            )
164
            .await
165
            .unwrap();
166
        }
167
    };
168

            
169
    spawner.spawn_essential(
170
        "check-assigned-para-id",
171
        None,
172
        Box::pin(check_assigned_para_id_task),
173
    );
174
}
175

            
176
/// Check the parachain assignment using the orchestrator chain client, and send a `CcSpawnMsg` to
177
/// start or stop the required container chains.
178
///
179
/// Checks the assignment for the next block, so if there is a session change on block 15, this will
180
/// detect the assignment change after importing block 14.
181
async fn check_assigned_para_id(
182
    cc_spawn_tx: UnboundedSender<CcSpawnMsg>,
183
    sync_keystore: KeystorePtr,
184
    client_set_aside_for_cidp: Arc<dyn OrchestratorChainInterface>,
185
    block_hash: H256,
186
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
187
    // Check current assignment
188
    let current_container_chain_para_id =
189
        tc_consensus::first_eligible_key::<dyn OrchestratorChainInterface, NimbusPair>(
190
            client_set_aside_for_cidp.as_ref(),
191
            &block_hash,
192
            sync_keystore.clone(),
193
        )
194
        .await
195
        .map(|(_nimbus_key, para_id)| para_id);
196

            
197
    // Check assignment in the next session
198
    let next_container_chain_para_id = tc_consensus::first_eligible_key_next_session::<
199
        dyn OrchestratorChainInterface,
200
        NimbusPair,
201
    >(
202
        client_set_aside_for_cidp.as_ref(),
203
        &block_hash,
204
        sync_keystore,
205
    )
206
    .await
207
    .map(|(_nimbus_key, para_id)| para_id);
208

            
209
    cc_spawn_tx.send(CcSpawnMsg::UpdateAssignment {
210
        current: current_container_chain_para_id,
211
        next: next_container_chain_para_id,
212
    })?;
213

            
214
    Ok(())
215
}
216

            
217
pub fn import_queue(
218
    parachain_config: &Configuration,
219
    node_builder: &NodeBuilder<NodeConfig>,
220
) -> (ParachainBlockImport, BasicQueue<Block>) {
221
    // The nimbus import queue ONLY checks the signature correctness
222
    // Any other checks corresponding to the author-correctness should be done
223
    // in the runtime
224
    let block_import =
225
        ParachainBlockImport::new(node_builder.client.clone(), node_builder.backend.clone());
226

            
227
    let import_queue = nimbus_consensus::import_queue(
228
        node_builder.client.clone(),
229
        block_import.clone(),
230
        move |_, _| async move {
231
            let time = sp_timestamp::InherentDataProvider::from_system_time();
232

            
233
            Ok((time,))
234
        },
235
        &node_builder.task_manager.spawn_essential_handle(),
236
        parachain_config.prometheus_registry(),
237
        false,
238
        false,
239
    )
240
    .expect("function never fails");
241

            
242
    (block_import, import_queue)
243
}
244

            
245
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
246
///
247
/// This is the actual implementation that is abstract over the executor and the runtime api.
248
async fn start_node_impl<Net>(
249
    orchestrator_config: Configuration,
250
    polkadot_config: Configuration,
251
    container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>,
252
    collator_options: CollatorOptions,
253
    para_id: ParaId,
254
    hwbench: Option<sc_sysinfo::HwBench>,
255
    max_pov_percentage: Option<u32>,
256
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)>
257
where
258
    Net: NetworkBackend<Block, Hash>,
259
{
260
    let parachain_config = prepare_node_config(orchestrator_config);
261
    let chain_type: sc_chain_spec::ChainType = parachain_config.chain_spec.chain_type();
262
    let relay_chain = crate::chain_spec::Extensions::try_get(&*parachain_config.chain_spec)
263
        .map(|e| e.relay_chain.clone())
264
        .ok_or("Could not find relay_chain extension in chain-spec.")?;
265

            
266
    // Channel to send messages to start/stop container chains
267
    let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel();
268

            
269
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
270
    let mut node_builder = NodeConfig::new_builder(&parachain_config, hwbench.clone())?;
271

            
272
    let (block_import, import_queue) = import_queue(&parachain_config, &node_builder);
273

            
274
    let (relay_chain_interface, collator_key) = node_builder
275
        .build_relay_chain_interface(&parachain_config, polkadot_config, collator_options.clone())
276
        .await?;
277

            
278
    let validator = parachain_config.role.is_authority();
279
    let force_authoring = parachain_config.force_authoring;
280

            
281
    let node_builder = node_builder
282
        .build_cumulus_network::<_, Net>(
283
            &parachain_config,
284
            para_id,
285
            import_queue,
286
            relay_chain_interface.clone(),
287
        )
288
        .await?;
289

            
290
    let rpc_builder = {
291
        let client = node_builder.client.clone();
292
        let transaction_pool = node_builder.transaction_pool.clone();
293

            
294
        Box::new(move |_| {
295
            let deps = crate::rpc::FullDeps {
296
                client: client.clone(),
297
                pool: transaction_pool.clone(),
298
                command_sink: None,
299
                xcm_senders: None,
300
                randomness_sender: None,
301
                container_chain_exclusion_sender: None,
302
            };
303

            
304
            crate::rpc::create_full(deps).map_err(Into::into)
305
        })
306
    };
307

            
308
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
309

            
310
    let relay_chain_slot_duration = Duration::from_secs(6);
311
    let overseer_handle = relay_chain_interface
312
        .overseer_handle()
313
        .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
314
    let sync_keystore = node_builder.keystore_container.keystore();
315
    let mut collate_on_tanssi: Arc<
316
        dyn Fn() -> (CancellationToken, futures::channel::oneshot::Receiver<()>) + Send + Sync,
317
    > = Arc::new(move || {
318
        if validator {
319
            panic!("Called uninitialized collate_on_tanssi");
320
        } else {
321
            panic!("Called collate_on_tanssi when node is not running as a validator");
322
        }
323
    });
324

            
325
    let announce_block = {
326
        let sync_service = node_builder.network.sync_service.clone();
327
        Arc::new(move |hash, data| sync_service.announce_block(hash, data))
328
    };
329

            
330
    let (mut node_builder, import_queue_service) = node_builder.extract_import_queue_service();
331

            
332
    start_relay_chain_tasks(StartRelayChainTasksParams {
333
        client: node_builder.client.clone(),
334
        announce_block: announce_block.clone(),
335
        para_id,
336
        relay_chain_interface: relay_chain_interface.clone(),
337
        task_manager: &mut node_builder.task_manager,
338
        da_recovery_profile: if validator {
339
            DARecoveryProfile::Collator
340
        } else {
341
            DARecoveryProfile::FullNode
342
        },
343
        import_queue: import_queue_service,
344
        relay_chain_slot_duration,
345
        recovery_handle: Box::new(overseer_handle.clone()),
346
        sync_service: node_builder.network.sync_service.clone(),
347
    })?;
348

            
349
    let orchestrator_chain_interface_builder = OrchestratorChainInProcessInterfaceBuilder {
350
        client: node_builder.client.clone(),
351
        backend: node_builder.backend.clone(),
352
        sync_oracle: node_builder.network.sync_service.clone(),
353
        overseer_handle: overseer_handle.clone(),
354
    };
355
    let orchestrator_chain_interface = orchestrator_chain_interface_builder.build();
356

            
357
    if validator {
358
        let collator_key = collator_key
359
            .clone()
360
            .expect("Command line arguments do not allow this. qed");
361

            
362
        // Start task which detects para id assignment, and starts/stops container chains.
363
        // Note that if this node was started without a `container_chain_config`, we don't
364
        // support collation on container chains, so there is no need to detect changes to assignment
365
        if container_chain_config.is_some() {
366
            build_check_assigned_para_id(
367
                orchestrator_chain_interface.clone(),
368
                sync_keystore.clone(),
369
                cc_spawn_tx.clone(),
370
                node_builder.task_manager.spawn_essential_handle(),
371
            );
372
        }
373

            
374
        let start_collation = {
375
            // Params for collate_on_tanssi closure
376
            let node_spawn_handle = node_builder.task_manager.spawn_handle().clone();
377
            let node_keystore = node_builder.keystore_container.keystore().clone();
378
            let node_telemetry_handle = node_builder.telemetry.as_ref().map(|t| t.handle()).clone();
379
            let node_client = node_builder.client.clone();
380
            let node_backend = node_builder.backend.clone();
381
            let relay_interface = relay_chain_interface.clone();
382
            let node_sync_service = node_builder.network.sync_service.clone();
383
            let orchestrator_tx_pool = node_builder.transaction_pool.clone();
384
            let overseer = overseer_handle.clone();
385
            let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
386
                node_spawn_handle.clone(),
387
                node_client.clone(),
388
                node_builder.transaction_pool.clone(),
389
                node_builder.prometheus_registry.as_ref(),
390
                node_telemetry_handle.clone(),
391
            );
392

            
393
            move || {
394
                start_consensus_orchestrator(
395
                    node_client.clone(),
396
                    node_backend.clone(),
397
                    block_import.clone(),
398
                    node_spawn_handle.clone(),
399
                    relay_interface.clone(),
400
                    node_sync_service.clone(),
401
                    node_keystore.clone(),
402
                    force_authoring,
403
                    relay_chain_slot_duration,
404
                    para_id,
405
                    collator_key.clone(),
406
                    overseer.clone(),
407
                    announce_block.clone(),
408
                    proposer_factory.clone(),
409
                    orchestrator_tx_pool.clone(),
410
                    max_pov_percentage,
411
                )
412
            }
413
        };
414
        // Save callback for later, used when collator rotates from container chain back to orchestrator chain
415
        collate_on_tanssi = Arc::new(start_collation);
416
    }
417

            
418
    let sync_keystore = node_builder.keystore_container.keystore();
419

            
420
    if let Some((container_chain_cli, tokio_handle)) = container_chain_config {
421
        // If the orchestrator chain is running as a full-node, we start a full node for the
422
        // container chain immediately, because only collator nodes detect their container chain
423
        // assignment so otherwise it will never start.
424
        if !validator {
425
            if let Some(container_chain_para_id) = container_chain_cli.base.para_id {
426
                // Spawn new container chain node
427
                cc_spawn_tx
428
                    .send(CcSpawnMsg::UpdateAssignment {
429
                        current: Some(container_chain_para_id.into()),
430
                        next: Some(container_chain_para_id.into()),
431
                    })
432
                    .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
433
            }
434
        }
435

            
436
        // Start container chain spawner task. This will start and stop container chains on demand.
437
        let orchestrator_client = node_builder.client.clone();
438
        let orchestrator_tx_pool = node_builder.transaction_pool.clone();
439
        let spawn_handle = node_builder.task_manager.spawn_handle();
440

            
441
        // This considers that the container chains have the same APIs as dancebox, which
442
        // is not the case. However the spawner don't call APIs that are not part of the expected
443
        // common APIs for a container chain.
444
        // TODO: Depend on the simple container chain runtime which should be the minimal api?
445
        let container_chain_spawner = ContainerChainSpawner {
446
            params: ContainerChainSpawnParams {
447
                orchestrator_chain_interface,
448
                container_chain_cli,
449
                tokio_handle,
450
                chain_type,
451
                relay_chain,
452
                relay_chain_interface,
453
                sync_keystore,
454
                orchestrator_para_id: para_id,
455
                data_preserver: false,
456
                collation_params: if validator {
457
                    Some(spawner::CollationParams {
458
                        orchestrator_client: Some(orchestrator_client.clone()),
459
                        orchestrator_tx_pool: Some(orchestrator_tx_pool),
460
                        orchestrator_para_id: para_id,
461
                        collator_key: collator_key
462
                            .expect("there should be a collator key if we're a validator"),
463
                        solochain: false,
464
                    })
465
                } else {
466
                    None
467
                },
468
                spawn_handle,
469
                generate_rpc_builder: tc_service_container_chain::rpc::GenerateSubstrateRpcBuilder::<
470
                    dancebox_runtime::RuntimeApi,
471
                >::new(),
472
                phantom: PhantomData,
473
            },
474
            state: Default::default(),
475
            db_folder_cleanup_done: false,
476
            collate_on_tanssi,
477
            collation_cancellation_constructs: None,
478
        };
479
        let state = container_chain_spawner.state.clone();
480

            
481
        node_builder.task_manager.spawn_essential_handle().spawn(
482
            "container-chain-spawner-rx-loop",
483
            None,
484
            container_chain_spawner.rx_loop(cc_spawn_rx, validator, false),
485
        );
486

            
487
        node_builder.task_manager.spawn_essential_handle().spawn(
488
            "container-chain-spawner-debug-state",
489
            None,
490
            monitor::monitor_task(state),
491
        )
492
    }
493

            
494
    Ok((node_builder.task_manager, node_builder.client))
495
}
496

            
497
/// Build the import queue for the parachain runtime (manual seal).
498
196
fn build_manual_seal_import_queue(
499
196
    _client: Arc<ParachainClient>,
500
196
    block_import: DevParachainBlockImport,
501
196
    config: &Configuration,
502
196
    _telemetry: Option<TelemetryHandle>,
503
196
    task_manager: &TaskManager,
504
196
) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error> {
505
196
    Ok(sc_consensus_manual_seal::import_queue(
506
196
        Box::new(block_import),
507
196
        &task_manager.spawn_essential_handle(),
508
196
        config.prometheus_registry(),
509
196
    ))
510
196
}
511

            
512
/// Start collator task for orchestrator chain.
513
/// Returns a `CancellationToken` that can be used to cancel the collator task,
514
/// and a `oneshot::Receiver<()>` that can be used to wait until the task has ended.
515
fn start_consensus_orchestrator(
516
    client: Arc<ParachainClient>,
517
    backend: Arc<FullBackend>,
518
    block_import: ParachainBlockImport,
519
    spawner: SpawnTaskHandle,
520
    relay_chain_interface: Arc<dyn RelayChainInterface>,
521
    sync_oracle: Arc<SyncingService<Block>>,
522
    keystore: KeystorePtr,
523
    force_authoring: bool,
524
    relay_chain_slot_duration: Duration,
525
    para_id: ParaId,
526
    collator_key: CollatorPair,
527
    overseer_handle: OverseerHandle,
528
    announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
529
    proposer_factory: ParachainProposerFactory,
530
    orchestrator_tx_pool: Arc<TransactionPoolHandle<Block, ParachainClient>>,
531
    max_pov_percentage: Option<u32>,
532
) -> (CancellationToken, futures::channel::oneshot::Receiver<()>) {
533
    let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)
534
        .expect("start_consensus_orchestrator: slot duration should exist");
535

            
536
    let proposer = Proposer::new(proposer_factory);
537

            
538
    let collator_service = CollatorService::new(
539
        client.clone(),
540
        Arc::new(spawner.clone()),
541
        announce_block,
542
        client.clone(),
543
    );
544

            
545
    let relay_chain_interace_for_cidp = relay_chain_interface.clone();
546
    let client_set_aside_for_cidp = client.clone();
547
    let client_set_aside_for_orch = client.clone();
548
    let client_for_hash_provider = client.clone();
549
    let client_for_slot_duration_provider = client.clone();
550

            
551
    let code_hash_provider = move |block_hash| {
552
        client_for_hash_provider
553
            .code_at(block_hash)
554
            .ok()
555
            .map(polkadot_primitives::ValidationCode)
556
            .map(|c| c.hash())
557
    };
558

            
559
    let cancellation_token = CancellationToken::new();
560
    let buy_core_params = BuyCoreParams::Orchestrator {
561
        orchestrator_tx_pool,
562
        orchestrator_client: client.clone(),
563
    };
564

            
565
    let params = LookaheadTanssiAuraParams {
566
        max_pov_percentage,
567
        get_current_slot_duration: move |block_hash| {
568
            sc_consensus_aura::standalone::slot_duration_at(
569
                &*client_for_slot_duration_provider,
570
                block_hash,
571
            )
572
            .expect("Slot duration should be set")
573
        },
574
        create_inherent_data_providers: move |block_hash, (relay_parent, _validation_data)| {
575
            let relay_chain_interface = relay_chain_interace_for_cidp.clone();
576
            let client_set_aside_for_cidp = client_set_aside_for_cidp.clone();
577
            async move {
578
                // We added a new runtime api that allows to know which parachains have
579
                // some collators assigned to them. We'll now only include those. For older
580
                // runtimes we continue to write all of them.
581
                let para_ids = match client_set_aside_for_cidp
582
                    .runtime_api()
583
                    .api_version::<dyn CollatorAssignmentApi<Block, AccountId, ParaId>>(
584
                    block_hash,
585
                )? {
586
                    Some(version) if version >= 2 => client_set_aside_for_cidp
587
                        .runtime_api()
588
                        .parachains_with_some_collators(block_hash)?,
589
                    _ => client_set_aside_for_cidp
590
                        .runtime_api()
591
                        .registered_paras(block_hash)?,
592
                };
593
                let para_ids: Vec<_> = para_ids.into_iter().collect();
594
                let author_noting_inherent =
595
                    tp_author_noting_inherent::OwnParachainInherentData::create_at(
596
                        relay_parent,
597
                        &relay_chain_interface,
598
                        &para_ids,
599
                    )
600
                    .await;
601

            
602
                // Fetch duration every block to avoid downtime when passing from 12 to 6s
603
                let slot_duration = sc_consensus_aura::standalone::slot_duration_at(
604
                    &*client_set_aside_for_cidp.clone(),
605
                    block_hash,
606
                )
607
                .expect("Slot duration should be set");
608

            
609
                let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
610

            
611
                let slot =
612
						sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
613
							*timestamp,
614
							slot_duration,
615
						);
616

            
617
                let author_noting_inherent = author_noting_inherent.ok_or_else(|| {
618
                    Box::<dyn std::error::Error + Send + Sync>::from(
619
                        "Failed to create author noting inherent",
620
                    )
621
                })?;
622

            
623
                Ok((slot, timestamp, author_noting_inherent))
624
            }
625
        },
626
        get_orchestrator_aux_data: move |block_hash: H256, (_relay_parent, _validation_data)| {
627
            let client_set_aside_for_orch = client_set_aside_for_orch.clone();
628

            
629
            async move {
630
                let authorities = tc_consensus::authorities::<Block, ParachainClient, NimbusPair>(
631
                    client_set_aside_for_orch.as_ref(),
632
                    &block_hash,
633
                    para_id,
634
                );
635

            
636
                let authorities = authorities.ok_or_else(|| {
637
                    Box::<dyn std::error::Error + Send + Sync>::from(
638
                        "Failed to fetch authorities with error",
639
                    )
640
                })?;
641

            
642
                log::info!(
643
                    "Authorities {:?} found for header {:?}",
644
                    authorities,
645
                    block_hash
646
                );
647

            
648
                let aux_data = OrchestratorAuraWorkerAuxData {
649
                    authorities,
650
                    // This is the orchestrator consensus, it does not have a slot frequency
651
                    slot_freq: None,
652
                };
653

            
654
                Ok(aux_data)
655
            }
656
        },
657
        block_import,
658
        para_client: client,
659
        relay_client: relay_chain_interface,
660
        sync_oracle,
661
        keystore,
662
        collator_key,
663
        para_id,
664
        overseer_handle,
665
        orchestrator_slot_duration: slot_duration,
666
        relay_chain_slot_duration,
667
        force_authoring,
668
        proposer,
669
        collator_service,
670
        authoring_duration: Duration::from_millis(2000),
671
        code_hash_provider,
672
        para_backend: backend,
673
        cancellation_token: cancellation_token.clone(),
674
        buy_core_params,
675
    };
676

            
677
    let (fut, exit_notification_receiver) =
678
        lookahead_tanssi_aura::run::<_, Block, NimbusPair, _, _, _, _, _, _, _, _, _, _, _, _, _>(
679
            params,
680
        );
681
    spawner.spawn("tanssi-aura", None, fut);
682

            
683
    (cancellation_token, exit_notification_receiver)
684
}
685

            
686
/// Start a parachain node.
687
pub async fn start_parachain_node<Net>(
688
    parachain_config: Configuration,
689
    polkadot_config: Configuration,
690
    container_config: Option<(ContainerChainCli, tokio::runtime::Handle)>,
691
    collator_options: CollatorOptions,
692
    para_id: ParaId,
693
    hwbench: Option<sc_sysinfo::HwBench>,
694
    max_pov_percentage: Option<u32>,
695
) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)>
696
where
697
    Net: NetworkBackend<Block, Hash>,
698
{
699
    start_node_impl::<Net>(
700
        parachain_config,
701
        polkadot_config,
702
        container_config,
703
        collator_options,
704
        para_id,
705
        hwbench,
706
        max_pov_percentage,
707
    )
708
    .instrument(sc_tracing::tracing::info_span!(
709
        sc_tracing::logging::PREFIX_LOG_SPAN,
710
        name = "Orchestrator",
711
    ))
712
    .await
713
}
714

            
715
/// Start a solochain node.
716
pub async fn start_solochain_node(
717
    polkadot_config: Configuration,
718
    container_chain_cli: ContainerChainCli,
719
    collator_options: CollatorOptions,
720
    hwbench: Option<sc_sysinfo::HwBench>,
721
) -> sc_service::error::Result<TaskManager> {
722
    let tokio_handle = polkadot_config.tokio_handle.clone();
723
    let orchestrator_para_id = Default::default();
724

            
725
    let chain_type = polkadot_config.chain_spec.chain_type().clone();
726
    let relay_chain = polkadot_config.chain_spec.id().to_string();
727

            
728
    // We use the relaychain keystore config for collators
729
    // Ensure that the user did not provide any custom keystore path for collators
730
    if container_chain_cli
731
        .base
732
        .base
733
        .keystore_params
734
        .keystore_path
735
        .is_some()
736
    {
737
        panic!(
738
            "--keystore-path not allowed here, must be set in relaychain args, after the first --"
739
        )
740
    }
741
    let keystore = &polkadot_config.keystore;
742

            
743
    // Instead of putting keystore in
744
    // Collator1000-01/data/chains/simple_container_2000/keystore
745
    // We put it in
746
    // Collator1000-01/relay-data/chains/dancelight_local_testnet/keystore
747
    // And same for "network" folder
748
    // But zombienet will put the keys in the old path, so we need to manually copy it if we
749
    // are running under zombienet
750
    copy_zombienet_keystore(keystore, container_chain_cli.base_path())?;
751

            
752
    let keystore_container = KeystoreContainer::new(keystore)?;
753

            
754
    // No metrics so no prometheus registry
755
    let prometheus_registry = None;
756
    let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?;
757

            
758
    // Each container chain will spawn its own telemetry
759
    let telemetry_worker_handle = None;
760

            
761
    // Dummy parachain config only needed because `build_relay_chain_interface` needs to know if we
762
    // are collators or not
763
    let validator = container_chain_cli.base.collator;
764
    let mut dummy_parachain_config = dummy_config(
765
        polkadot_config.tokio_handle.clone(),
766
        polkadot_config.base_path.clone(),
767
    );
768
    dummy_parachain_config.role = if validator {
769
        Role::Authority
770
    } else {
771
        Role::Full
772
    };
773
    let (relay_chain_interface, collator_key) =
774
        cumulus_client_service::build_relay_chain_interface(
775
            polkadot_config,
776
            &dummy_parachain_config,
777
            telemetry_worker_handle.clone(),
778
            &mut task_manager,
779
            collator_options.clone(),
780
            hwbench.clone(),
781
        )
782
        .await
783
        .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
784

            
785
    log::info!("start_solochain_node: is validator? {}", validator);
786

            
787
    let overseer_handle = relay_chain_interface
788
        .overseer_handle()
789
        .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
790
    let sync_keystore = keystore_container.keystore();
791
    let collate_on_tanssi: Arc<
792
        dyn Fn() -> (CancellationToken, futures::channel::oneshot::Receiver<()>) + Send + Sync,
793
    > = Arc::new(move || {
794
        // collate_on_tanssi will not be called in solochains because solochains use a different consensus
795
        // mechanism and need validators instead of collators.
796
        // The runtime enforces this because the orchestrator_chain is never assigned any collators.
797
        panic!("Called collate_on_tanssi on solochain collator. This is unsupported and the runtime shouldn't allow this, it is a bug")
798
    });
799

            
800
    let orchestrator_chain_interface_builder = OrchestratorChainSolochainInterfaceBuilder {
801
        overseer_handle: overseer_handle.clone(),
802
        relay_chain_interface: relay_chain_interface.clone(),
803
    };
804
    let orchestrator_chain_interface = orchestrator_chain_interface_builder.build();
805
    // Channel to send messages to start/stop container chains
806
    let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel();
807

            
808
    if validator {
809
        // Start task which detects para id assignment, and starts/stops container chains.
810
        build_check_assigned_para_id(
811
            orchestrator_chain_interface.clone(),
812
            sync_keystore.clone(),
813
            cc_spawn_tx.clone(),
814
            task_manager.spawn_essential_handle(),
815
        );
816
    }
817

            
818
    // If the orchestrator chain is running as a full-node, we start a full node for the
819
    // container chain immediately, because only collator nodes detect their container chain
820
    // assignment so otherwise it will never start.
821
    if !validator {
822
        if let Some(container_chain_para_id) = container_chain_cli.base.para_id {
823
            // Spawn new container chain node
824
            cc_spawn_tx
825
                .send(CcSpawnMsg::UpdateAssignment {
826
                    current: Some(container_chain_para_id.into()),
827
                    next: Some(container_chain_para_id.into()),
828
                })
829
                .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
830
        }
831
    }
832

            
833
    // Start container chain spawner task. This will start and stop container chains on demand.
834
    let spawn_handle = task_manager.spawn_handle();
835

            
836
    let container_chain_spawner = ContainerChainSpawner {
837
        params: ContainerChainSpawnParams {
838
            orchestrator_chain_interface,
839
            container_chain_cli,
840
            tokio_handle,
841
            chain_type,
842
            relay_chain,
843
            relay_chain_interface,
844
            sync_keystore,
845
            orchestrator_para_id,
846
            collation_params: if validator {
847
                Some(spawner::CollationParams {
848
                    // TODO: all these args must be solochain instead of orchestrator
849
                    orchestrator_client: None,
850
                    orchestrator_tx_pool: None,
851
                    orchestrator_para_id,
852
                    collator_key: collator_key
853
                        .expect("there should be a collator key if we're a validator"),
854
                    solochain: true,
855
                })
856
            } else {
857
                None
858
            },
859
            spawn_handle,
860
            data_preserver: false,
861
            generate_rpc_builder: tc_service_container_chain::rpc::GenerateSubstrateRpcBuilder::<
862
                dancebox_runtime::RuntimeApi,
863
            >::new(),
864
            phantom: PhantomData,
865
        },
866
        state: Default::default(),
867
        db_folder_cleanup_done: false,
868
        collate_on_tanssi,
869
        collation_cancellation_constructs: None,
870
    };
871
    let state = container_chain_spawner.state.clone();
872

            
873
    task_manager.spawn_essential_handle().spawn(
874
        "container-chain-spawner-rx-loop",
875
        None,
876
        container_chain_spawner.rx_loop(cc_spawn_rx, validator, true),
877
    );
878

            
879
    task_manager.spawn_essential_handle().spawn(
880
        "container-chain-spawner-debug-state",
881
        None,
882
        monitor::monitor_task(state),
883
    );
884

            
885
    Ok(task_manager)
886
}
887

            
888
pub const SOFT_DEADLINE_PERCENT: sp_runtime::Percent = sp_runtime::Percent::from_percent(100);
889

            
890
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
891
///
892
/// This is the actual implementation that is abstract over the executor and the runtime api.
893
#[sc_tracing::logging::prefix_logs_with("Orchestrator Dev Node")]
894
pub fn start_dev_node(
895
    orchestrator_config: Configuration,
896
    sealing: Sealing,
897
    hwbench: Option<sc_sysinfo::HwBench>,
898
    para_id: ParaId,
899
) -> sc_service::error::Result<TaskManager> {
900
    let parachain_config = prepare_node_config(orchestrator_config);
901

            
902
    // Create a `NodeBuilder` which helps setup parachain nodes common systems.
903
    let node_builder = NodeConfig::new_builder(&parachain_config, hwbench)?;
904

            
905
    // This node block import.
906
    let block_import = DevParachainBlockImport::new(node_builder.client.clone());
907
    let import_queue = build_manual_seal_import_queue(
908
        node_builder.client.clone(),
909
        block_import.clone(),
910
        &parachain_config,
911
        node_builder
912
            .telemetry
913
            .as_ref()
914
            .map(|telemetry| telemetry.handle()),
915
        &node_builder.task_manager,
916
    )?;
917

            
918
    // Build a Substrate Network. (not cumulus since it is a dev node, it mocks
919
    // the relaychain)
920
    let mut node_builder = node_builder
921
        .build_substrate_network::<sc_network::NetworkWorker<_, _>>(
922
            &parachain_config,
923
            import_queue,
924
        )?;
925

            
926
    // If we're running a collator dev node we must install manual seal block
927
    // production.
928
    let mut command_sink = None;
929
    let mut xcm_senders = None;
930
    let mut randomness_sender = None;
931
    let mut container_chains_exclusion_sender = None;
932
    if parachain_config.role.is_authority() {
933
        let client = node_builder.client.clone();
934
        let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::<Vec<u8>>(100);
935
        let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec<u8>)>(100);
936
        // Create channels for mocked parachain candidates.
937
        let (mock_randomness_sender, mock_randomness_receiver) =
938
            flume::bounded::<(bool, Option<[u8; 32]>)>(100);
939
        // Create channels for mocked exclusion of parachains from producing blocks
940
        let (mock_container_chains_exclusion_sender, mock_container_chains_exclusion_receiver) =
941
            flume::bounded::<Vec<ParaId>>(100);
942

            
943
        xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender));
944
        randomness_sender = Some(mock_randomness_sender);
945
        container_chains_exclusion_sender = Some(mock_container_chains_exclusion_sender);
946

            
947
        command_sink = node_builder.install_manual_seal(ManualSealConfiguration {
948
            block_import,
949
            sealing,
950
            soft_deadline: Some(SOFT_DEADLINE_PERCENT),
951
            select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()),
952
            consensus_data_provider: Some(Box::new(
953
                tc_consensus::OrchestratorManualSealAuraConsensusDataProvider::new(
954
                    node_builder.client.clone(),
955
                    node_builder.keystore_container.keystore(),
956
                    para_id,
957
                ),
958
            )),
959
7890
            create_inherent_data_providers: move |block: H256, ()| {
960
7890
                let current_para_block = client
961
7890
                    .number(block)
962
7890
                    .expect("Header lookup should succeed")
963
7890
                    .expect("Header passed in as parent should be present in backend.");
964
7890

            
965
7890
                let mut para_ids: Vec<ParaId> = client
966
7890
                    .runtime_api()
967
7890
                    .registered_paras(block)
968
7890
                    .expect("registered_paras runtime API should exist")
969
7890
                    .into_iter()
970
7890
                    .collect();
971
7890

            
972
7890
                let hash = client
973
7890
                    .hash(current_para_block.saturating_sub(1))
974
7890
                    .expect("Hash of the desired block must be present")
975
7890
                    .expect("Hash of the desired block should exist");
976
7890

            
977
7890
                let para_header = client
978
7890
                    .expect_header(hash)
979
7890
                    .expect("Expected parachain header should exist")
980
7890
                    .encode();
981
7890

            
982
7890
                let para_head_data = HeadData(para_header).encode();
983
7890
                let para_head_key = RelayWellKnownKeys::para_head(para_id);
984
7890
                let relay_slot_key = RelayWellKnownKeys::CURRENT_SLOT.to_vec();
985
7890

            
986
7890
                let slot_duration = sc_consensus_aura::standalone::slot_duration_at(
987
7890
                    &*client.clone(),
988
7890
                    block,
989
7890
                ).expect("Slot duration should be set");
990
7890

            
991
7890
                let mut timestamp = 0u64;
992
7890
                TIMESTAMP.with(|x| {
993
7890
                    timestamp = x.clone().take();
994
7890
                });
995
7890

            
996
7890
                timestamp += dancebox_runtime::SLOT_DURATION;
997
7890
                let relay_slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
998
7890
						timestamp.into(),
999
7890
						slot_duration,
7890
                    );
7890
                let relay_slot = u64::from(*relay_slot);
7890

            
7890
                let downward_xcm_receiver = downward_xcm_receiver.clone();
7890
                let hrmp_xcm_receiver = hrmp_xcm_receiver.clone();
7890

            
7890
                let randomness_enabler_messages: Vec<(bool, Option<[u8; 32]>)> = mock_randomness_receiver.drain().collect();
                // If there is a value to be updated, we update it
7890
                if let Some((enable_randomness, new_seed)) = randomness_enabler_messages.last() {
4
                    let value = client
4
                        .get_aux(RANDOMNESS_ACTIVATED_AUX_KEY)
4
                        .expect("Should be able to query aux storage; qed").unwrap_or((false, Option::<[u8; 32]>::None).encode());
4
                    let (_mock_additional_randomness, mut mock_randomness_seed): (bool, Option<[u8; 32]>) = Decode::decode(&mut value.as_slice()).expect("Boolean non-decodable");
4
                    if let Some(new_seed) = new_seed {
2
                        mock_randomness_seed = Some(*new_seed);
2
                    }
4
                    client
4
                    .insert_aux(
4
                        &[(RANDOMNESS_ACTIVATED_AUX_KEY, (enable_randomness, mock_randomness_seed).encode().as_slice())],
4
                        &[],
4
                    )
4
                    .expect("Should be able to write to aux storage; qed");
7886
                }
                // We read the value
                // If error when reading, we simply put false
7890
                let value = client
7890
                    .get_aux(RANDOMNESS_ACTIVATED_AUX_KEY)
7890
                    .expect("Should be able to query aux storage; qed").unwrap_or((false, Option::<[u8; 32]>::None).encode());
7890
                let (mock_additional_randomness, mock_randomness_seed): (bool, Option<[u8; 32]>) = Decode::decode(&mut value.as_slice()).expect("Boolean non-decodable");
7890

            
7890
                let container_chains_exclusion_messages: Vec<Vec<ParaId>> = mock_container_chains_exclusion_receiver.drain().collect();
                // If there is a new set of excluded container chains, we update it
7890
                if let Some(mock_excluded_container_chains) = container_chains_exclusion_messages.last() {
2
                    client
2
                        .insert_aux(
2
                            &[(CONTAINER_CHAINS_EXCLUSION_AUX_KEY, mock_excluded_container_chains.encode().as_slice())],
2
                            &[],
2
                        )
2
                        .expect("Should be able to write to aux storage; qed");
7888
                }
7890
                let new_excluded_container_chains_value = client
7890
                    .get_aux(CONTAINER_CHAINS_EXCLUSION_AUX_KEY)
7890
                    .expect("Should be able to query aux storage; qed").unwrap_or(Vec::<ParaId>::new().encode());
7890
                let mock_excluded_container_chains: Vec<ParaId> = Decode::decode(&mut new_excluded_container_chains_value.as_slice()).expect("Vector non-decodable");
15554
                para_ids.retain(|x| !mock_excluded_container_chains.contains(x));
7890
                let client_set_aside_for_cidp = client.clone();
7890
                let client_for_xcm = client.clone();
7890
                async move {
7890
                    let mocked_author_noting =
7890
                        tp_author_noting_inherent::MockAuthorNotingInherentDataProvider {
7890
                            current_para_block,
7890
                            relay_offset: 1000,
7890
                            relay_blocks_per_para_block: 2,
7890
                            para_ids,
7890
                            slots_per_para_block: 1,
7890
                        };
7890
                    let mut additional_keys = mocked_author_noting.get_key_values();
7890
                    // Mock only chain 2002 in relay.
7890
                    // This will allow any signed origin to deregister chains 2000 and 2001, and register 2002.
7890
                    let (registrar_paras_key_2002, para_info_2002) = mocked_relay_keys::get_mocked_registrar_paras(2002.into());
7890
                    additional_keys.extend([(para_head_key, para_head_data), (relay_slot_key, Slot::from(relay_slot).encode()), (registrar_paras_key_2002, para_info_2002)]);
7890

            
7890
                    if mock_additional_randomness {
200
                        let mut mock_randomness: [u8; 32] = [0u8; 32];
200
                        mock_randomness[..4].copy_from_slice(&current_para_block.to_be_bytes());
200
                        if let Some(seed) = mock_randomness_seed {
3300
                            for i in 0..32 {
3200
                                mock_randomness[i] ^= seed[i];
3200
                            }
100
                        }
200
                        additional_keys.extend([(RelayWellKnownKeys::CURRENT_BLOCK_RANDOMNESS.to_vec(), Some(mock_randomness).encode())]);
200
                        log::info!("mokcing randomnessss!!! {}", current_para_block);
7690
                    }
7890
                    let current_para_head = client_set_aside_for_cidp
7890
                            .header(block)
7890
                            .expect("Header lookup should succeed")
7890
                            .expect("Header passed in as parent should be present in backend.");
7890
                    let should_send_go_ahead = match client_set_aside_for_cidp
7890
                            .runtime_api()
7890
                            .collect_collation_info(block, &current_para_head)
                    {
7890
                            Ok(info) => info.new_validation_code.is_some(),
                            Err(e) => {
                                    log::error!("Failed to collect collation info: {:?}", e);
                                    false
                            },
                    };
7890
                    let time = MockTimestampInherentDataProvider;
7890
                    let mocked_parachain = MockValidationDataInherentDataProvider {
7890
                        current_para_block,
7890
                        current_para_block_head: None,
7890
                        relay_offset: 1000,
7890
                        relay_blocks_per_para_block: 2,
7890
                        para_blocks_per_relay_epoch: 10,
7890
                        relay_randomness_config: (),
7890
                        xcm_config: MockXcmConfig::new(
7890
                            &*client_for_xcm,
7890
                            block,
7890
                            Default::default(),
7890
                        ),
7890
                        raw_downward_messages: downward_xcm_receiver.drain().collect(),
7890
                        raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(),
7890
                        additional_key_values: Some(additional_keys),
7890
                        para_id,
7890
                        upgrade_go_ahead: should_send_go_ahead.then(|| {
2
                            log::info!(
2
                                "Detected pending validation code, sending go-ahead signal."
                            );
2
                            UpgradeGoAhead::GoAhead
7890
                        }),
7890
                    };
7890

            
7890
                    Ok((time, mocked_parachain, mocked_author_noting))
7890
                }
7890
            },
        })?;
    }
    // This node RPC builder.
    let rpc_builder = {
        let client = node_builder.client.clone();
        let transaction_pool = node_builder.transaction_pool.clone();
392
        Box::new(move |_| {
392
            let deps = crate::rpc::FullDeps {
392
                client: client.clone(),
392
                pool: transaction_pool.clone(),
392
                command_sink: command_sink.clone(),
392
                xcm_senders: xcm_senders.clone(),
392
                randomness_sender: randomness_sender.clone(),
392
                container_chain_exclusion_sender: container_chains_exclusion_sender.clone(),
392
            };
392

            
392
            crate::rpc::create_full(deps).map_err(Into::into)
392
        })
    };
    // We spawn all the common substrate tasks to properly run a node.
    let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
    log::info!("Development Service Ready");
    Ok(node_builder.task_manager)
}
/// Can be called for a `Configuration` to check if it is a configuration for
/// the orchestrator network.
pub trait IdentifyVariant {
    /// Returns `true` if this is a configuration for a dev network.
    fn is_dev(&self) -> bool;
}
impl IdentifyVariant for Box<dyn sc_service::ChainSpec> {
196
    fn is_dev(&self) -> bool {
196
        self.chain_type() == sc_chain_spec::ChainType::Development
196
    }
}
/// Builder for a concrete relay chain interface, created from a full node. Builds
/// a [`RelayChainInProcessInterface`] to access relay chain data necessary for parachain operation.
///
/// The builder takes a [`polkadot_client::Client`]
/// that wraps a concrete instance. By using [`polkadot_client::ExecuteWithClient`]
/// the builder gets access to this concrete instance and instantiates a [`RelayChainInProcessInterface`] with it.
struct OrchestratorChainInProcessInterfaceBuilder {
    client: Arc<ParachainClient>,
    backend: Arc<FullBackend>,
    sync_oracle: Arc<dyn SyncOracle + Send + Sync>,
    overseer_handle: Handle,
}
impl OrchestratorChainInProcessInterfaceBuilder {
    pub fn build(self) -> Arc<dyn OrchestratorChainInterface> {
        Arc::new(OrchestratorChainInProcessInterface::new(
            self.client,
            self.backend,
            self.sync_oracle,
            self.overseer_handle,
        ))
    }
}
/// Builder for a concrete relay chain interface, created from a full node. Builds
/// a [`RelayChainInProcessInterface`] to access relay chain data necessary for parachain operation.
///
/// The builder takes a [`polkadot_client::Client`]
/// that wraps a concrete instance. By using [`polkadot_client::ExecuteWithClient`]
/// the builder gets access to this concrete instance and instantiates a [`RelayChainInProcessInterface`] with it.
struct OrchestratorChainSolochainInterfaceBuilder {
    overseer_handle: Handle,
    relay_chain_interface: Arc<dyn RelayChainInterface>,
}
impl OrchestratorChainSolochainInterfaceBuilder {
    pub fn build(self) -> Arc<dyn OrchestratorChainInterface> {
        Arc::new(OrchestratorChainSolochainInterface::new(
            self.overseer_handle,
            self.relay_chain_interface,
        ))
    }
}
/// Provides an implementation of the [`RelayChainInterface`] using a local in-process relay chain node.
pub struct OrchestratorChainInProcessInterface<Client> {
    pub full_client: Arc<Client>,
    pub backend: Arc<FullBackend>,
    pub sync_oracle: Arc<dyn SyncOracle + Send + Sync>,
    pub overseer_handle: Handle,
}
impl<Client> OrchestratorChainInProcessInterface<Client> {
    /// Create a new instance of [`RelayChainInProcessInterface`]
    pub fn new(
        full_client: Arc<Client>,
        backend: Arc<FullBackend>,
        sync_oracle: Arc<dyn SyncOracle + Send + Sync>,
        overseer_handle: Handle,
    ) -> Self {
        Self {
            full_client,
            backend,
            sync_oracle,
            overseer_handle,
        }
    }
}
impl<T> Clone for OrchestratorChainInProcessInterface<T> {
    fn clone(&self) -> Self {
        Self {
            full_client: self.full_client.clone(),
            backend: self.backend.clone(),
            sync_oracle: self.sync_oracle.clone(),
            overseer_handle: self.overseer_handle.clone(),
        }
    }
}
#[async_trait::async_trait]
impl<Client> OrchestratorChainInterface for OrchestratorChainInProcessInterface<Client>
where
    Client: ProvideRuntimeApi<Block>
        + BlockchainEvents<Block>
        + AuxStore
        + UsageProvider<Block>
        + Sync
        + Send,
    Client::Api: TanssiAuthorityAssignmentApi<Block, NimbusId>
        + OnDemandBlockProductionApi<Block, ParaId, Slot>
        + RegistrarApi<Block, ParaId>
        + AuthorNotingApi<Block, AccountId, BlockNumber, ParaId>
        + DataPreserversApi<Block, DataPreserverProfileId, ParaId>,
{
    async fn get_storage_by_key(
        &self,
        orchestrator_parent: PHash,
        key: &[u8],
    ) -> OrchestratorChainResult<Option<StorageValue>> {
        let state = self.backend.state_at(orchestrator_parent)?;
        state
            .storage(key)
            .map_err(OrchestratorChainError::GenericError)
    }
    async fn prove_read(
        &self,
        orchestrator_parent: PHash,
        relevant_keys: &Vec<Vec<u8>>,
    ) -> OrchestratorChainResult<StorageProof> {
        let state_backend = self.backend.state_at(orchestrator_parent)?;
        sp_state_machine::prove_read(state_backend, relevant_keys)
            .map_err(OrchestratorChainError::StateMachineError)
    }
    fn overseer_handle(&self) -> OrchestratorChainResult<Handle> {
        Ok(self.overseer_handle.clone())
    }
    /// Get a stream of import block notifications.
    async fn import_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        let notification_stream = self
            .full_client
            .import_notification_stream()
            .map(|notification| notification.header);
        Ok(Box::pin(notification_stream))
    }
    /// Get a stream of new best block notifications.
    async fn new_best_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        let notifications_stream =
            self.full_client
                .import_notification_stream()
                .filter_map(|notification| async move {
                    notification.is_new_best.then_some(notification.header)
                });
        Ok(Box::pin(notifications_stream))
    }
    /// Get a stream of finality notifications.
    async fn finality_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        let notification_stream = self
            .full_client
            .finality_notification_stream()
            .map(|notification| notification.header);
        Ok(Box::pin(notification_stream))
    }
    async fn genesis_data(
        &self,
        orchestrator_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Option<ContainerChainGenesisData>> {
        let runtime_api = self.full_client.runtime_api();
        Ok(runtime_api.genesis_data(orchestrator_parent, para_id)?)
    }
    async fn boot_nodes(
        &self,
        orchestrator_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Vec<Vec<u8>>> {
        let runtime_api = self.full_client.runtime_api();
        Ok(runtime_api.boot_nodes(orchestrator_parent, para_id)?)
    }
    async fn latest_block_number(
        &self,
        orchestrator_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Option<BlockNumber>> {
        let runtime_api = self.full_client.runtime_api();
        Ok(runtime_api.latest_block_number(orchestrator_parent, para_id)?)
    }
    async fn best_block_hash(&self) -> OrchestratorChainResult<PHash> {
        Ok(self.backend.blockchain().info().best_hash)
    }
    async fn finalized_block_hash(&self) -> OrchestratorChainResult<PHash> {
        Ok(self.backend.blockchain().info().finalized_hash)
    }
    async fn data_preserver_active_assignment(
        &self,
        orchestrator_parent: PHash,
        profile_id: DataPreserverProfileId,
    ) -> OrchestratorChainResult<DataPreserverAssignment<ParaId>> {
        let runtime_api = self.full_client.runtime_api();
        use {
            dc_orchestrator_chain_interface::DataPreserverAssignment as InterfaceAssignment,
            pallet_data_preservers_runtime_api::Assignment as RuntimeAssignment,
        };
        Ok(
            match runtime_api.get_active_assignment(orchestrator_parent, profile_id)? {
                RuntimeAssignment::NotAssigned => InterfaceAssignment::NotAssigned,
                RuntimeAssignment::Active(para_id) => InterfaceAssignment::Active(para_id),
                RuntimeAssignment::Inactive(para_id) => InterfaceAssignment::Inactive(para_id),
            },
        )
    }
    async fn check_para_id_assignment(
        &self,
        orchestrator_parent: PHash,
        authority: NimbusId,
    ) -> OrchestratorChainResult<Option<ParaId>> {
        let runtime_api = self.full_client.runtime_api();
        Ok(runtime_api.check_para_id_assignment(orchestrator_parent, authority)?)
    }
    async fn check_para_id_assignment_next_session(
        &self,
        orchestrator_parent: PHash,
        authority: NimbusId,
    ) -> OrchestratorChainResult<Option<ParaId>> {
        let runtime_api = self.full_client.runtime_api();
        Ok(runtime_api.check_para_id_assignment_next_session(orchestrator_parent, authority)?)
    }
}
/// Provides an implementation of the [`RelayChainInterface`] using a local in-process relay chain node.
pub struct OrchestratorChainSolochainInterface {
    pub overseer_handle: Handle,
    pub relay_chain_interface: Arc<dyn RelayChainInterface>,
}
impl OrchestratorChainSolochainInterface {
    /// Create a new instance of [`RelayChainInProcessInterface`]
    pub fn new(
        overseer_handle: Handle,
        relay_chain_interface: Arc<dyn RelayChainInterface>,
    ) -> Self {
        Self {
            overseer_handle,
            relay_chain_interface,
        }
    }
}
#[async_trait::async_trait]
impl OrchestratorChainInterface for OrchestratorChainSolochainInterface {
    async fn get_storage_by_key(
        &self,
        relay_parent: PHash,
        key: &[u8],
    ) -> OrchestratorChainResult<Option<StorageValue>> {
        self.relay_chain_interface
            .get_storage_by_key(relay_parent, key)
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    async fn prove_read(
        &self,
        relay_parent: PHash,
        relevant_keys: &Vec<Vec<u8>>,
    ) -> OrchestratorChainResult<StorageProof> {
        self.relay_chain_interface
            .prove_read(relay_parent, relevant_keys)
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    fn overseer_handle(&self) -> OrchestratorChainResult<Handle> {
        Ok(self.overseer_handle.clone())
    }
    /// Get a stream of import block notifications.
    async fn import_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        self.relay_chain_interface
            .import_notification_stream()
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    /// Get a stream of new best block notifications.
    async fn new_best_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        self.relay_chain_interface
            .new_best_notification_stream()
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    /// Get a stream of finality notifications.
    async fn finality_notification_stream(
        &self,
    ) -> OrchestratorChainResult<Pin<Box<dyn Stream<Item = PHeader> + Send>>> {
        self.relay_chain_interface
            .finality_notification_stream()
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    async fn genesis_data(
        &self,
        relay_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Option<ContainerChainGenesisData>> {
        let res: Option<ContainerChainGenesisData> = call_runtime_api(
            &self.relay_chain_interface,
            "RegistrarApi_genesis_data",
            relay_parent,
            &para_id,
        )
        .await
        .map_err(|e| OrchestratorChainError::Application(Box::new(e)))?;
        Ok(res)
    }
    async fn boot_nodes(
        &self,
        relay_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Vec<Vec<u8>>> {
        let res: Vec<Vec<u8>> = call_runtime_api(
            &self.relay_chain_interface,
            "RegistrarApi_boot_nodes",
            relay_parent,
            &para_id,
        )
        .await
        .map_err(|e| OrchestratorChainError::Application(Box::new(e)))?;
        Ok(res)
    }
    async fn latest_block_number(
        &self,
        relay_parent: PHash,
        para_id: ParaId,
    ) -> OrchestratorChainResult<Option<BlockNumber>> {
        let res: Option<BlockNumber> = call_runtime_api(
            &self.relay_chain_interface,
            "AuthorNotingApi_latest_block_number",
            relay_parent,
            &para_id,
        )
        .await
        .map_err(|e| OrchestratorChainError::Application(Box::new(e)))?;
        Ok(res)
    }
    async fn best_block_hash(&self) -> OrchestratorChainResult<PHash> {
        self.relay_chain_interface
            .best_block_hash()
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    async fn finalized_block_hash(&self) -> OrchestratorChainResult<PHash> {
        self.relay_chain_interface
            .finalized_block_hash()
            .await
            .map_err(|e| OrchestratorChainError::Application(Box::new(e)))
    }
    async fn data_preserver_active_assignment(
        &self,
        _orchestrator_parent: PHash,
        _profile_id: DataPreserverProfileId,
    ) -> OrchestratorChainResult<DataPreserverAssignment<ParaId>> {
        unimplemented!("Data preserver node does not support Dancelight yet")
    }
    async fn check_para_id_assignment(
        &self,
        relay_parent: PHash,
        authority: NimbusId,
    ) -> OrchestratorChainResult<Option<ParaId>> {
        let res: Option<ParaId> = call_runtime_api(
            &self.relay_chain_interface,
            "TanssiAuthorityAssignmentApi_check_para_id_assignment",
            relay_parent,
            &authority,
        )
        .await
        .map_err(|e| OrchestratorChainError::Application(Box::new(e)))?;
        Ok(res)
    }
    async fn check_para_id_assignment_next_session(
        &self,
        relay_parent: PHash,
        authority: NimbusId,
    ) -> OrchestratorChainResult<Option<ParaId>> {
        let res: Option<ParaId> = call_runtime_api(
            &self.relay_chain_interface,
            "TanssiAuthorityAssignmentApi_check_para_id_assignment_next_session",
            relay_parent,
            &authority,
        )
        .await
        .map_err(|e| OrchestratorChainError::Application(Box::new(e)))?;
        Ok(res)
    }
}