1
// Copyright (C) Moondance Labs Ltd.
2
// This file is part of Tanssi.
3

            
4
// Tanssi is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Tanssi is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Tanssi.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
use {
18
    cumulus_client_cli::CollatorOptions,
19
    futures::FutureExt,
20
    log::{info, warn},
21
    node_common::{
22
        cli::RelayChainCli, service::solochain::RelayAsOrchestratorChainInterfaceBuilder,
23
    },
24
    sc_cli::{CliConfiguration, DefaultConfigurationValues, LoggerBuilder, Signals, SubstrateCli},
25
    sc_service::{
26
        config::{ExecutorConfiguration, KeystoreConfig, NetworkConfiguration, TransportConfig},
27
        BasePath, BlocksPruning, ChainType, Configuration, DatabaseSource, GenericChainSpec,
28
        KeystoreContainer, NoExtension, Role, TaskManager,
29
    },
30
    sp_keystore::KeystorePtr,
31
    std::{
32
        future::Future,
33
        marker::PhantomData,
34
        num::NonZeroUsize,
35
        path::{Path, PathBuf},
36
        sync::Arc,
37
        time::Duration,
38
    },
39
    tc_consensus::{OrchestratorChainInterface, RelayChainInterface},
40
    tc_service_container_chain_spawner::cli::ContainerChainCli,
41
    tc_service_container_chain_spawner::{
42
        spawner,
43
        spawner::{CcSpawnMsg, ContainerChainSpawnParams, ContainerChainSpawner},
44
    },
45
    tokio::sync::mpsc::unbounded_channel,
46
    tokio_util::sync::CancellationToken,
47
};
48

            
49
#[derive(Copy, Clone, PartialEq, Eq)]
50
pub enum EnableContainerChainSpawner {
51
    Yes,
52
    No,
53
}
54

            
55
pub struct SolochainNodeStarted {
56
    pub task_manager: TaskManager,
57
    pub relay_chain_interface: Arc<dyn RelayChainInterface>,
58
    pub orchestrator_chain_interface: Arc<dyn OrchestratorChainInterface>,
59
    pub keystore: KeystorePtr,
60
}
61

            
62
/// Start a solochain node.
63
pub async fn start_solochain_node(
64
    polkadot_config: Configuration,
65
    container_chain_cli: ContainerChainCli,
66
    collator_options: CollatorOptions,
67
    hwbench: Option<sc_sysinfo::HwBench>,
68
    // In container chain rpc provider mode, it manages its own spawner.
69
    enable_cc_spawner: EnableContainerChainSpawner,
70
) -> sc_service::error::Result<SolochainNodeStarted> {
71
    let tokio_handle = polkadot_config.tokio_handle.clone();
72
    let orchestrator_para_id = Default::default();
73

            
74
    let chain_type = polkadot_config.chain_spec.chain_type().clone();
75
    let relay_chain = polkadot_config.chain_spec.id().to_string();
76

            
77
    // We use the relaychain keystore config for collators
78
    // Ensure that the user did not provide any custom keystore path for collators
79
    if container_chain_cli
80
        .base
81
        .base
82
        .keystore_params
83
        .keystore_path
84
        .is_some()
85
    {
86
        panic!(
87
            "--keystore-path not allowed here, must be set in relaychain args, after the first --"
88
        )
89
    }
90
    let keystore = &polkadot_config.keystore;
91

            
92
    // Instead of putting keystore in
93
    // Collator1000-01/data/chains/simple_container_2000/keystore
94
    // We put it in
95
    // Collator1000-01/relay-data/chains/dancelight_local_testnet/keystore
96
    // And same for "network" folder
97
    // But zombienet will put the keys in the old path, so we need to manually copy it if we
98
    // are running under zombienet
99
    copy_zombienet_keystore(keystore, container_chain_cli.base_path())?;
100

            
101
    let keystore_container = KeystoreContainer::new(keystore)?;
102

            
103
    // No metrics so no prometheus registry
104
    let prometheus_registry = None;
105
    let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?;
106

            
107
    // Each container chain will spawn its own telemetry
108
    let telemetry_worker_handle = None;
109

            
110
    // Dummy parachain config only needed because `build_relay_chain_interface` needs to know if we
111
    // are collators or not
112
    let validator = container_chain_cli.base.collator;
113

            
114
    let mut dummy_parachain_config = dummy_config(
115
        polkadot_config.tokio_handle.clone(),
116
        polkadot_config.base_path.clone(),
117
    );
118
    dummy_parachain_config.role = if validator {
119
        Role::Authority
120
    } else {
121
        Role::Full
122
    };
123
    // TODO: this node does not implement DHT bootnode advertisement
124
    // Not sure if collators should implement it, maybe not, but data preservers should.
125
    // The problem is that at this point data preservers may not know the para id they will be
126
    // assigned to, and we need that for the input of `start_bootnode_tasks`
127
    let (relay_chain_interface, collator_key, _relay_chain_network, _paranode_rx) =
128
        cumulus_client_service::build_relay_chain_interface(
129
            polkadot_config,
130
            &dummy_parachain_config,
131
            telemetry_worker_handle.clone(),
132
            &mut task_manager,
133
            collator_options.clone(),
134
            hwbench.clone(),
135
        )
136
        .await
137
        .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
138

            
139
    log::info!("start_solochain_node: is validator? {}", validator);
140

            
141
    let overseer_handle = relay_chain_interface
142
        .overseer_handle()
143
        .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
144
    let sync_keystore = keystore_container.keystore();
145
    let collate_on_tanssi: Arc<
146
        dyn Fn() -> (CancellationToken, futures::channel::oneshot::Receiver<()>) + Send + Sync,
147
    > = Arc::new(move || {
148
        // collate_on_tanssi will not be called in solochains because solochains use a different consensus
149
        // mechanism and need validators instead of collators.
150
        // The runtime enforces this because the orchestrator_chain is never assigned any collators.
151
        panic!("Called collate_on_tanssi on solochain collator. This is unsupported and the runtime shouldn't allow this, it is a bug")
152
    });
153

            
154
    let orchestrator_chain_interface_builder = RelayAsOrchestratorChainInterfaceBuilder {
155
        overseer_handle: overseer_handle.clone(),
156
        relay_chain_interface: relay_chain_interface.clone(),
157
    };
158
    let orchestrator_chain_interface = orchestrator_chain_interface_builder.build();
159
    // Channel to send messages to start/stop container chains
160
    let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel();
161

            
162
    if validator {
163
        if enable_cc_spawner == EnableContainerChainSpawner::No {
164
            panic!("cannot be a validator if container chain spawner is disabled");
165
        }
166

            
167
        // Start task which detects para id assignment, and starts/stops container chains.
168
        crate::build_check_assigned_para_id(
169
            orchestrator_chain_interface.clone(),
170
            sync_keystore.clone(),
171
            cc_spawn_tx.clone(),
172
            task_manager.spawn_essential_handle(),
173
        );
174
    }
175

            
176
    // If the orchestrator chain is running as a full-node, we start a full node for the
177
    // container chain immediately, because only collator nodes detect their container chain
178
    // assignment so otherwise it will never start.
179
    if !validator && enable_cc_spawner == EnableContainerChainSpawner::Yes {
180
        if let Some(container_chain_para_id) = container_chain_cli.base.para_id {
181
            // Spawn new container chain node
182
            cc_spawn_tx
183
                .send(CcSpawnMsg::UpdateAssignment {
184
                    current: Some(container_chain_para_id.into()),
185
                    next: Some(container_chain_para_id.into()),
186
                })
187
                .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
188
        }
189
    }
190

            
191
    if enable_cc_spawner == EnableContainerChainSpawner::Yes {
192
        // Start container chain spawner task. This will start and stop container chains on demand.
193
        let spawn_handle = task_manager.spawn_handle();
194
        let relay_chain_interface = relay_chain_interface.clone();
195
        let orchestrator_chain_interface = orchestrator_chain_interface.clone();
196

            
197
        let container_chain_spawner = ContainerChainSpawner {
198
            params: ContainerChainSpawnParams {
199
                orchestrator_chain_interface,
200
                container_chain_cli,
201
                tokio_handle,
202
                chain_type,
203
                relay_chain,
204
                relay_chain_interface,
205
                sync_keystore,
206
                collation_params: if validator {
207
                    Some(spawner::CollationParams {
208
                        // TODO: all these args must be solochain instead of orchestrator
209
                        orchestrator_client: None,
210
                        orchestrator_tx_pool: None,
211
                        orchestrator_para_id,
212
                        collator_key: collator_key
213
                            .expect("there should be a collator key if we're a validator"),
214
                        solochain: true,
215
                    })
216
                } else {
217
                    None
218
                },
219
                spawn_handle,
220
                data_preserver: false,
221
                generate_rpc_builder:
222
                    tc_service_container_chain_spawner::rpc::GenerateSubstrateRpcBuilder::<
223
                        dancebox_runtime::RuntimeApi,
224
                    >::new(),
225
                override_sync_mode: Some(sc_cli::SyncMode::Warp),
226
                phantom: PhantomData,
227
            },
228
            state: Default::default(),
229
            db_folder_cleanup_done: false,
230
            collate_on_tanssi,
231
            collation_cancellation_constructs: None,
232
        };
233
        let state = container_chain_spawner.state.clone();
234

            
235
        task_manager.spawn_essential_handle().spawn(
236
            "container-chain-spawner-rx-loop",
237
            None,
238
            container_chain_spawner.rx_loop(cc_spawn_rx, validator, true),
239
        );
240

            
241
        task_manager.spawn_essential_handle().spawn(
242
            "container-chain-spawner-debug-state",
243
            None,
244
            tc_service_container_chain_spawner::monitor::monitor_task(state),
245
        );
246
    }
247

            
248
    Ok(SolochainNodeStarted {
249
        task_manager,
250
        relay_chain_interface,
251
        orchestrator_chain_interface,
252
        keystore: keystore_container.keystore(),
253
    })
254
}
255

            
256
/// Alternative to [Configuration] struct used in solochain context.
257
pub struct SolochainConfig {
258
    pub tokio_handle: tokio::runtime::Handle,
259
    pub base_path: BasePath,
260
    pub network_node_name: String,
261
    pub role: Role,
262
    pub relay_chain: String,
263
}
264

            
265
/// Alternative to [Runner](sc_cli::Runner) struct used in solochain context.
266
pub struct SolochainRunner {
267
    config: SolochainConfig,
268
    tokio_runtime: tokio::runtime::Runtime,
269
    signals: Signals,
270
}
271

            
272
impl SolochainRunner {
273
    /// Log information about the node itself.
274
    ///
275
    /// # Example:
276
    ///
277
    /// ```text
278
    /// 2020-06-03 16:14:21 Substrate Node
279
    /// 2020-06-03 16:14:21 ✌️  version 2.0.0-rc3-f4940588c-x86_64-linux-gnu
280
    /// 2020-06-03 16:14:21 ❤️  by Parity Technologies <admin@parity.io>, 2017-2020
281
    /// 2020-06-03 16:14:21 📋 Chain specification: Flaming Fir
282
    /// 2020-06-03 16:14:21 🏷  Node name: jolly-rod-7462
283
    /// 2020-06-03 16:14:21 👤 Role: FULL
284
    /// 2020-06-03 16:14:21 💾 Database: RocksDb at /tmp/c/chains/flamingfir7/db
285
    /// 2020-06-03 16:14:21 ⛓  Native runtime: node-251 (substrate-node-1.tx1.au10)
286
    /// ```
287
    fn print_node_infos(&self) {
288
        use chrono::{offset::Local, Datelike};
289
        type C = ContainerChainCli;
290
        info!("{}", C::impl_name());
291
        info!("✌️  version {}", C::impl_version());
292
        info!(
293
            "❤️  by {}, {}-{}",
294
            C::author(),
295
            C::copyright_start_year(),
296
            Local::now().year()
297
        );
298
        // No chain spec
299
        //info!("📋 Chain specification: {}", config.chain_spec.name());
300
        info!("🏷  Node name: {}", self.config.network_node_name);
301
        info!("👤 Role: {}", self.config.role);
302
        info!(
303
            "💾 Database: {} at {}",
304
            // Container chains only support paritydb
305
            "ParityDb",
306
            // Print base path instead of db path because each container will have its own db in a
307
            // different subdirectory.
308
            self.config.base_path.path().display(),
309
        );
310
    }
311

            
312
    /// A helper function that runs a node with tokio and stops if the process receives the signal
313
    /// `SIGTERM` or `SIGINT`.
314
    pub fn run_node_until_exit<F, E>(
315
        self,
316
        initialize: impl FnOnce(SolochainConfig) -> F,
317
    ) -> std::result::Result<(), E>
318
    where
319
        F: Future<Output = std::result::Result<TaskManager, E>>,
320
        E: std::error::Error + Send + Sync + 'static + From<sc_service::Error>,
321
    {
322
        self.print_node_infos();
323

            
324
        let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?;
325

            
326
        let res = self
327
            .tokio_runtime
328
            .block_on(self.signals.run_until_signal(task_manager.future().fuse()));
329
        // We need to drop the task manager here to inform all tasks that they should shut down.
330
        //
331
        // This is important to be done before we instruct the tokio runtime to shutdown. Otherwise
332
        // the tokio runtime will wait the full 60 seconds for all tasks to stop.
333
        let task_registry = task_manager.into_task_registry();
334

            
335
        // Give all futures 60 seconds to shutdown, before tokio "leaks" them.
336
        let shutdown_timeout = Duration::from_secs(60);
337
        self.tokio_runtime.shutdown_timeout(shutdown_timeout);
338

            
339
        let running_tasks = task_registry.running_tasks();
340

            
341
        if !running_tasks.is_empty() {
342
            log::error!("Detected running(potentially stalled) tasks on shutdown:");
343
            running_tasks.iter().for_each(|(task, count)| {
344
                let instances_desc = if *count > 1 {
345
                    format!("with {} instances ", count)
346
                } else {
347
                    "".to_string()
348
                };
349

            
350
                if task.is_default_group() {
351
                    log::error!(
352
                        "Task \"{}\" was still running {}after waiting {} seconds to finish.",
353
                        task.name,
354
                        instances_desc,
355
                        shutdown_timeout.as_secs(),
356
                    );
357
                } else {
358
                    log::error!(
359
						"Task \"{}\" (Group: {}) was still running {}after waiting {} seconds to finish.",
360
						task.name,
361
						task.group,
362
						instances_desc,
363
						shutdown_timeout.as_secs(),
364
					);
365
                }
366
            });
367
        }
368

            
369
        res.map_err(Into::into)
370
    }
371
}
372

            
373
/// Equivalent to [Cli::create_runner]
374
pub fn create_runner<T: SubstrateCli + CliConfiguration<DVC>, DVC: DefaultConfigurationValues>(
375
    command: &T,
376
) -> sc_cli::Result<SolochainRunner> {
377
    let tokio_runtime = sc_cli::build_runtime()?;
378

            
379
    // `capture` needs to be called in a tokio context.
380
    // Also capture them as early as possible.
381
    let signals = tokio_runtime.block_on(async { Signals::capture() })?;
382

            
383
    init_cmd(command, &T::support_url(), &T::impl_version())?;
384

            
385
    let base_path = command.base_path()?.unwrap();
386
    let network_node_name = command.node_name()?;
387
    let is_dev = command.is_dev()?;
388
    let role = command.role(is_dev)?;
389
    // This relay chain id is only used when the relay chain args have no `--chain` value
390
    // TODO: check if this works with an external relay rpc / light client
391
    let relay_chain_id = "dancelight_local_testnet".to_string();
392

            
393
    let config = SolochainConfig {
394
        tokio_handle: tokio_runtime.handle().clone(),
395
        base_path,
396
        network_node_name,
397
        role,
398
        relay_chain: relay_chain_id,
399
    };
400

            
401
    Ok(SolochainRunner {
402
        config,
403
        tokio_runtime,
404
        signals,
405
    })
406
}
407

            
408
/// The recommended open file descriptor limit to be configured for the process.
409
const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000;
410

            
411
/// Equivalent to [CliConfiguration::init]
412
fn init_cmd<T: CliConfiguration<DVC>, DVC: DefaultConfigurationValues>(
413
    this: &T,
414
    support_url: &str,
415
    impl_version: &str,
416
) -> sc_cli::Result<()> {
417
    sp_panic_handler::set(support_url, impl_version);
418

            
419
    let mut logger = LoggerBuilder::new(this.log_filters()?);
420
    logger
421
        .with_log_reloading(this.enable_log_reloading()?)
422
        .with_detailed_output(this.detailed_log_output()?);
423

            
424
    if let Some(tracing_targets) = this.tracing_targets()? {
425
        let tracing_receiver = this.tracing_receiver()?;
426
        logger.with_profiling(tracing_receiver, tracing_targets);
427
    }
428

            
429
    if this.disable_log_color()? {
430
        logger.with_colors(false);
431
    }
432

            
433
    logger.init()?;
434

            
435
    match fdlimit::raise_fd_limit() {
436
        Ok(fdlimit::Outcome::LimitRaised { to, .. }) => {
437
            if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT {
438
                warn!(
439
                    "Low open file descriptor limit configured for the process. \
440
                        Current value: {:?}, recommended value: {:?}.",
441
                    to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT,
442
                );
443
            }
444
        }
445
        Ok(fdlimit::Outcome::Unsupported) => {
446
            // Unsupported platform (non-Linux)
447
        }
448
        Err(error) => {
449
            warn!(
450
                "Failed to configure file descriptor limit for the process: \
451
                    {}, recommended value: {:?}.",
452
                error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT,
453
            );
454
        }
455
    }
456

            
457
    Ok(())
458
}
459

            
460
/// Equivalent to [RelayChainCli::new]
461
pub fn relay_chain_cli_new<'a>(
462
    config: &SolochainConfig,
463
    relay_chain_args: impl Iterator<Item = &'a String>,
464
) -> RelayChainCli {
465
    let base_path = config.base_path.path().join("polkadot");
466

            
467
    RelayChainCli {
468
        base_path,
469
        chain_id: Some(config.relay_chain.clone()),
470
        base: clap::Parser::parse_from(relay_chain_args),
471
        solochain: true,
472
    }
473
}
474

            
475
/// Create a dummy [Configuration] that should only be used as input to polkadot-sdk functions that
476
/// take this struct as input but only use one field of it.
477
/// This is needed because [Configuration] does not implement [Default].
478
pub fn dummy_config(tokio_handle: tokio::runtime::Handle, base_path: BasePath) -> Configuration {
479
    Configuration {
480
        impl_name: "".to_string(),
481
        impl_version: "".to_string(),
482
        role: Role::Full,
483
        tokio_handle,
484
        transaction_pool: Default::default(),
485
        network: NetworkConfiguration {
486
            net_config_path: None,
487
            listen_addresses: vec![],
488
            public_addresses: vec![],
489
            boot_nodes: vec![],
490
            node_key: Default::default(),
491
            default_peers_set: Default::default(),
492
            default_peers_set_num_full: 0,
493
            client_version: "".to_string(),
494
            node_name: "".to_string(),
495
            transport: TransportConfig::MemoryOnly,
496
            max_parallel_downloads: 0,
497
            max_blocks_per_request: 0,
498
            sync_mode: Default::default(),
499
            enable_dht_random_walk: false,
500
            allow_non_globals_in_dht: false,
501
            kademlia_disjoint_query_paths: false,
502
            kademlia_replication_factor: NonZeroUsize::new(20).unwrap(),
503
            ipfs_server: false,
504
            network_backend: Default::default(),
505
            min_peers_to_start_warp_sync: None,
506
            idle_connection_timeout: Default::default(),
507
        },
508
        keystore: KeystoreConfig::InMemory,
509
        database: DatabaseSource::ParityDb {
510
            path: Default::default(),
511
        },
512
        trie_cache_maximum_size: None,
513
        warm_up_trie_cache: None,
514
        state_pruning: None,
515
        blocks_pruning: BlocksPruning::KeepAll,
516
        chain_spec: Box::new(
517
            GenericChainSpec::<NoExtension, ()>::builder(Default::default(), NoExtension::None)
518
                .with_name("test")
519
                .with_id("test_id")
520
                .with_chain_type(ChainType::Development)
521
                .with_genesis_config_patch(Default::default())
522
                .build(),
523
        ),
524
        executor: ExecutorConfiguration {
525
            wasm_method: Default::default(),
526
            wasmtime_precompiled: None,
527
            default_heap_pages: None,
528
            max_runtime_instances: 0,
529
            runtime_cache_size: 0,
530
        },
531
        wasm_runtime_overrides: None,
532
        rpc: sc_service::config::RpcConfiguration {
533
            addr: None,
534
            max_connections: 0,
535
            cors: None,
536
            methods: Default::default(),
537
            max_request_size: 0,
538
            max_response_size: 0,
539
            id_provider: None,
540
            max_subs_per_conn: 0,
541
            port: 0,
542
            message_buffer_capacity: 0,
543
            batch_config: jsonrpsee::server::BatchRequestConfig::Disabled,
544
            rate_limit: None,
545
            rate_limit_whitelisted_ips: vec![],
546
            rate_limit_trust_proxy_headers: false,
547
        },
548
        prometheus_config: None,
549
        telemetry_endpoints: None,
550
        offchain_worker: Default::default(),
551
        force_authoring: false,
552
        disable_grandpa: false,
553
        dev_key_seed: None,
554
        tracing_targets: None,
555
        tracing_receiver: Default::default(),
556
        announce_block: false,
557
        data_path: Default::default(),
558
        base_path,
559
    }
560
}
561

            
562
/// Get the zombienet keystore path from the container base path.
563
fn zombienet_keystore_path(container_base_path: &Path) -> PathBuf {
564
    // container base path:
565
    // Collator-01/data/containers
566
    let mut zombienet_path = container_base_path.to_owned();
567
    zombienet_path.pop();
568
    // Collator-01/data/
569
    zombienet_path.push("chains/simple_container_2000/keystore/");
570
    // Collator-01/data/chains/simple_container_2000/keystore/
571

            
572
    zombienet_path
573
}
574

            
575
/// When running under zombienet, collator keys are injected in a different folder from what we
576
/// expect. This function will check if the zombienet folder exists, and if so, copy all the keys
577
/// from there into the expected folder.
578
pub fn copy_zombienet_keystore(
579
    keystore: &KeystoreConfig,
580
    container_base_path: sc_cli::Result<Option<BasePath>>,
581
) -> std::io::Result<()> {
582
    let container_base_path = match container_base_path {
583
        Ok(Some(base_path)) => base_path,
584
        _ => {
585
            // If base_path is not explicitly set, we are not running under zombienet, so there is nothing to do
586
            return Ok(());
587
        }
588
    };
589
    let keystore_path = keystore.path();
590
    let keystore_path = match keystore_path {
591
        Some(x) => x,
592
        None => {
593
            // In-memory keystore, zombienet does not use it by default so ignore it
594
            return Ok(());
595
        }
596
    };
597
    let zombienet_path = zombienet_keystore_path(container_base_path.path());
598

            
599
    if zombienet_path.exists() {
600
        // Copy to keystore folder
601
        let mut files_copied = 0;
602
        copy_dir_all(zombienet_path, keystore_path, &mut files_copied)?;
603
        log::info!("Copied {} keys from zombienet keystore", files_copied);
604

            
605
        Ok(())
606
    } else {
607
        // Zombienet folder does not exist, assume we are not running under zombienet
608
        Ok(())
609
    }
610
}
611

            
612
/// Equivalent to `cp -r src/* dst`
613
// https://stackoverflow.com/a/65192210
614
fn copy_dir_all(
615
    src: impl AsRef<Path>,
616
    dst: impl AsRef<Path>,
617
    files_copied: &mut u32,
618
) -> std::io::Result<()> {
619
    use std::fs;
620
    fs::create_dir_all(&dst)?;
621
    // no-op if src and dst are the same dir
622
    let src_root = src.as_ref().canonicalize()?;
623
    let dst_root = dst.as_ref().canonicalize()?;
624
    if src_root == dst_root {
625
        return Ok(());
626
    }
627
    for entry in fs::read_dir(src)? {
628
        let entry = entry?;
629
        let ty = entry.file_type()?;
630
        if ty.is_dir() {
631
            copy_dir_all(
632
                entry.path(),
633
                dst.as_ref().join(entry.file_name()),
634
                files_copied,
635
            )?;
636
        } else {
637
            fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?;
638
            *files_copied += 1;
639
        }
640
    }
641
    Ok(())
642
}