From 70c55363a91572790ba5d49b70c58040f112e55c Mon Sep 17 00:00:00 2001
From: notgne2
Date: Fri, 8 Jan 2021 18:24:04 -0700
Subject: Restructure project
---
Cargo.toml | 8 +-
src/activate.rs | 471 -------------------------------------------
src/bin/activate.rs | 471 +++++++++++++++++++++++++++++++++++++++++++
src/bin/deploy.rs | 564 ++++++++++++++++++++++++++++++++++++++++++++++++++++
src/data.rs | 73 +++++++
src/deploy.rs | 296 +++++++++++++++++++++++++++
src/lib.rs | 424 +++++++++++++++++++++++++++++++++++++++
src/main.rs | 564 ----------------------------------------------------
src/push.rs | 174 ++++++++++++++++
src/utils/data.rs | 73 -------
src/utils/deploy.rs | 296 ---------------------------
src/utils/mod.rs | 426 ---------------------------------------
src/utils/push.rs | 174 ----------------
13 files changed, 2004 insertions(+), 2010 deletions(-)
delete mode 100644 src/activate.rs
create mode 100644 src/bin/activate.rs
create mode 100644 src/bin/deploy.rs
create mode 100644 src/data.rs
create mode 100644 src/deploy.rs
create mode 100644 src/lib.rs
delete mode 100644 src/main.rs
create mode 100644 src/push.rs
delete mode 100644 src/utils/data.rs
delete mode 100644 src/utils/deploy.rs
delete mode 100644 src/utils/mod.rs
delete mode 100644 src/utils/push.rs
diff --git a/Cargo.toml b/Cargo.toml
index b48eea3..dc239e5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -35,10 +35,6 @@ signal-hook = "0.3"
smol_str = "=0.1.16"
-[[bin]]
+[lib]
name = "deploy"
-path = "src/main.rs"
-
-[[bin]]
-name = "activate"
-path = "src/activate.rs"
\ No newline at end of file
+path = "src/lib.rs"
\ No newline at end of file
diff --git a/src/activate.rs b/src/activate.rs
deleted file mode 100644
index 49d16af..0000000
--- a/src/activate.rs
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-// SPDX-FileCopyrightText: 2020 Andreas Fuchs
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use signal_hook::{consts::signal::SIGHUP, iterator::Signals};
-
-use clap::Clap;
-
-use tokio::fs;
-use tokio::process::Command;
-use tokio::sync::mpsc;
-use tokio::time::timeout;
-
-use std::time::Duration;
-
-use std::path::Path;
-
-use notify::{RecommendedWatcher, RecursiveMode, Watcher};
-
-use thiserror::Error;
-
-#[macro_use]
-extern crate log;
-
-#[macro_use]
-extern crate serde_derive;
-
-#[macro_use]
-mod utils;
-
-/// Remote activation utility for deploy-rs
-#[derive(Clap, Debug)]
-#[clap(version = "1.0", author = "Serokell ")]
-struct Opts {
- /// Print debug logs to output
- #[clap(short, long)]
- debug_logs: bool,
- /// Directory to print logs to
- #[clap(long)]
- log_dir: Option,
-
- /// Path for any temporary files that may be needed during activation
- #[clap(long)]
- temp_path: String,
-
- #[clap(subcommand)]
- subcmd: SubCommand,
-}
-
-#[derive(Clap, Debug)]
-enum SubCommand {
- Activate(ActivateOpts),
- Wait(WaitOpts),
-}
-
-/// Activate a profile
-#[derive(Clap, Debug)]
-struct ActivateOpts {
- /// The closure to activate
- closure: String,
- /// The profile path to install into
- profile_path: String,
-
- /// Maximum time to wait for confirmation after activation
- #[clap(long)]
- confirm_timeout: u16,
-
- /// Wait for confirmation after deployment and rollback if not confirmed
- #[clap(long)]
- magic_rollback: bool,
-
- /// Auto rollback if failure
- #[clap(long)]
- auto_rollback: bool,
-}
-
-/// Activate a profile
-#[derive(Clap, Debug)]
-struct WaitOpts {
- /// The closure to wait for
- closure: String,
-}
-
-#[derive(Error, Debug)]
-pub enum DeactivateError {
- #[error("Failed to execute the rollback command: {0}")]
- RollbackError(std::io::Error),
- #[error("The rollback resulted in a bad exit code: {0:?}")]
- RollbackExitError(Option),
- #[error("Failed to run command for listing generations: {0}")]
- ListGenError(std::io::Error),
- #[error("Command for listing generations resulted in a bad exit code: {0:?}")]
- ListGenExitError(Option),
- #[error("Error converting generation list output to utf8: {0}")]
- DecodeListGenUtf8Error(#[from] std::string::FromUtf8Error),
- #[error("Failed to run command for deleting generation: {0}")]
- DeleteGenError(std::io::Error),
- #[error("Command for deleting generations resulted in a bad exit code: {0:?}")]
- DeleteGenExitError(Option),
- #[error("Failed to run command for re-activating the last generation: {0}")]
- ReactivateError(std::io::Error),
- #[error("Command for re-activating the last generation resulted in a bad exit code: {0:?}")]
- ReactivateExitError(Option),
-}
-
-pub async fn deactivate(profile_path: &str) -> Result<(), DeactivateError> {
- warn!("De-activating due to error");
-
- let nix_env_rollback_exit_status = Command::new("nix-env")
- .arg("-p")
- .arg(&profile_path)
- .arg("--rollback")
- .status()
- .await
- .map_err(DeactivateError::RollbackError)?;
-
- match nix_env_rollback_exit_status.code() {
- Some(0) => (),
- a => return Err(DeactivateError::RollbackExitError(a)),
- };
-
- debug!("Listing generations");
-
- let nix_env_list_generations_out = Command::new("nix-env")
- .arg("-p")
- .arg(&profile_path)
- .arg("--list-generations")
- .output()
- .await
- .map_err(DeactivateError::ListGenError)?;
-
- match nix_env_list_generations_out.status.code() {
- Some(0) => (),
- a => return Err(DeactivateError::ListGenExitError(a)),
- };
-
- let generations_list = String::from_utf8(nix_env_list_generations_out.stdout)?;
-
- let last_generation_line = generations_list
- .lines()
- .last()
- .expect("Expected to find a generation in list");
-
- let last_generation_id = last_generation_line
- .split_whitespace()
- .next()
- .expect("Expected to get ID from generation entry");
-
- debug!("Removing generation entry {}", last_generation_line);
- warn!("Removing generation by ID {}", last_generation_id);
-
- let nix_env_delete_generation_exit_status = Command::new("nix-env")
- .arg("-p")
- .arg(&profile_path)
- .arg("--delete-generations")
- .arg(last_generation_id)
- .status()
- .await
- .map_err(DeactivateError::DeleteGenError)?;
-
- match nix_env_delete_generation_exit_status.code() {
- Some(0) => (),
- a => return Err(DeactivateError::DeleteGenExitError(a)),
- };
-
- info!("Attempting to re-activate the last generation");
-
- let re_activate_exit_status = Command::new(format!("{}/deploy-rs-activate", profile_path))
- .env("PROFILE", &profile_path)
- .current_dir(&profile_path)
- .status()
- .await
- .map_err(DeactivateError::ReactivateError)?;
-
- match re_activate_exit_status.code() {
- Some(0) => (),
- a => return Err(DeactivateError::ReactivateExitError(a)),
- };
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-pub enum ActivationConfirmationError {
- #[error("Failed to create activation confirmation directory: {0}")]
- CreateConfirmDirError(std::io::Error),
- #[error("Failed to create activation confirmation file: {0}")]
- CreateConfirmFileError(std::io::Error),
- #[error("Failed to create file system watcher instance: {0}")]
- CreateWatcherError(notify::Error),
- #[error("Error forking process: {0}")]
- ForkError(i32),
- #[error("Could not watch for activation sentinel: {0}")]
- WatcherError(#[from] notify::Error),
-}
-
-#[derive(Error, Debug)]
-pub enum DangerZoneError {
- #[error("Timeout elapsed for confirmation")]
- TimesUp,
- #[error("inotify stream ended without activation confirmation")]
- NoConfirmation,
- #[error("inotify encountered an error: {0}")]
- WatchError(notify::Error),
-}
-
-async fn danger_zone(
- mut events: mpsc::Receiver>,
- confirm_timeout: u16,
-) -> Result<(), DangerZoneError> {
- info!("Waiting for confirmation event...");
-
- match timeout(Duration::from_secs(confirm_timeout as u64), events.recv()).await {
- Ok(Some(Ok(()))) => Ok(()),
- Ok(Some(Err(e))) => Err(DangerZoneError::WatchError(e)),
- Ok(None) => Err(DangerZoneError::NoConfirmation),
- Err(_) => Err(DangerZoneError::TimesUp),
- }
-}
-
-pub async fn activation_confirmation(
- profile_path: String,
- temp_path: String,
- confirm_timeout: u16,
- closure: String,
-) -> Result<(), ActivationConfirmationError> {
- let lock_path = utils::make_lock_path(&temp_path, &closure);
-
- debug!("Ensuring parent directory exists for canary file");
-
- if let Some(parent) = Path::new(&lock_path).parent() {
- fs::create_dir_all(parent)
- .await
- .map_err(ActivationConfirmationError::CreateConfirmDirError)?;
- }
-
- debug!("Creating canary file");
-
- fs::File::create(&lock_path)
- .await
- .map_err(ActivationConfirmationError::CreateConfirmFileError)?;
-
- debug!("Creating notify watcher");
-
- let (deleted, done) = mpsc::channel(1);
-
- let mut watcher: RecommendedWatcher =
- Watcher::new_immediate(move |res: Result| {
- let send_result = match res {
- Ok(e) if e.kind == notify::EventKind::Remove(notify::event::RemoveKind::File) => {
- debug!("Got worthy removal event, sending on channel");
- deleted.try_send(Ok(()))
- }
- Err(e) => {
- debug!("Got error waiting for removal event, sending on channel");
- deleted.try_send(Err(e))
- }
- Ok(_) => Ok(()), // ignore non-removal events
- };
-
- if let Err(e) = send_result {
- error!("Could not send file system event to watcher: {}", e);
- }
- })?;
-
- watcher.watch(&lock_path, RecursiveMode::NonRecursive)?;
-
- if let Err(err) = danger_zone(done, confirm_timeout).await {
- error!("Error waiting for confirmation event: {}", err);
-
- if let Err(err) = deactivate(&profile_path).await {
- error!(
- "Error de-activating due to another error waiting for confirmation, oh no...: {}",
- err
- );
- }
- }
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-pub enum WaitError {
- #[error("Error creating watcher for activation: {0}")]
- Watcher(#[from] notify::Error),
- #[error("Error waiting for activation: {0}")]
- Waiting(#[from] DangerZoneError),
-}
-pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> {
- let lock_path = utils::make_lock_path(&temp_path, &closure);
-
- let (created, done) = mpsc::channel(1);
-
- let mut watcher: RecommendedWatcher = {
- // TODO: fix wasteful clone
- let lock_path = lock_path.clone();
-
- Watcher::new_immediate(move |res: Result| {
- let send_result = match res {
- Ok(e) if e.kind == notify::EventKind::Create(notify::event::CreateKind::File) => {
- match &e.paths[..] {
- [x] if x == Path::new(&lock_path) => created.try_send(Ok(())),
- _ => Ok(()),
- }
- }
- Err(e) => created.try_send(Err(e)),
- Ok(_) => Ok(()), // ignore non-removal events
- };
-
- if let Err(e) = send_result {
- error!("Could not send file system event to watcher: {}", e);
- }
- })?
- };
-
- watcher.watch(&temp_path, RecursiveMode::NonRecursive)?;
-
- // Avoid a potential race condition by checking for existence after watcher creation
- if fs::metadata(&lock_path).await.is_ok() {
- watcher.unwatch(&temp_path)?;
- return Ok(());
- }
-
- danger_zone(done, 60).await?;
-
- info!("Found canary file, done waiting!");
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-pub enum ActivateError {
- #[error("Failed to execute the command for setting profile: {0}")]
- SetProfileError(std::io::Error),
- #[error("The command for setting profile resulted in a bad exit code: {0:?}")]
- SetProfileExitError(Option),
-
- #[error("Failed to execute the activation script: {0}")]
- RunActivateError(std::io::Error),
- #[error("The activation script resulted in a bad exit code: {0:?}")]
- RunActivateExitError(Option),
-
- #[error("There was an error de-activating after an error was encountered: {0}")]
- DeactivateError(#[from] DeactivateError),
-
- #[error("Failed to get activation confirmation: {0}")]
- ActivationConfirmationError(#[from] ActivationConfirmationError),
-}
-
-pub async fn activate(
- profile_path: String,
- closure: String,
- auto_rollback: bool,
- temp_path: String,
- confirm_timeout: u16,
- magic_rollback: bool,
-) -> Result<(), ActivateError> {
- info!("Activating profile");
-
- let nix_env_set_exit_status = Command::new("nix-env")
- .arg("-p")
- .arg(&profile_path)
- .arg("--set")
- .arg(&closure)
- .status()
- .await
- .map_err(ActivateError::SetProfileError)?;
-
- match nix_env_set_exit_status.code() {
- Some(0) => (),
- a => {
- if auto_rollback {
- deactivate(&profile_path).await?;
- }
- return Err(ActivateError::SetProfileExitError(a));
- }
- };
-
- debug!("Running activation script");
-
- let activate_status = match Command::new(format!("{}/deploy-rs-activate", profile_path))
- .env("PROFILE", &profile_path)
- .current_dir(&profile_path)
- .status()
- .await
- .map_err(ActivateError::RunActivateError)
- {
- Ok(x) => x,
- Err(e) => {
- if auto_rollback {
- deactivate(&profile_path).await?;
- }
- return Err(e);
- }
- };
-
- match activate_status.code() {
- Some(0) => (),
- a => {
- if auto_rollback {
- deactivate(&profile_path).await?;
- }
- return Err(ActivateError::RunActivateExitError(a));
- }
- };
-
- info!("Activation succeeded!");
-
- if magic_rollback {
- info!("Magic rollback is enabled, setting up confirmation hook...");
-
- match activation_confirmation(profile_path.clone(), temp_path, confirm_timeout, closure)
- .await
- {
- Ok(()) => {}
- Err(err) => {
- deactivate(&profile_path).await?;
- return Err(ActivateError::ActivationConfirmationError(err));
- }
- };
- }
-
- Ok(())
-}
-
-#[tokio::main]
-async fn main() -> Result<(), Box> {
- // Ensure that this process stays alive after the SSH connection dies
- let mut signals = Signals::new(&[SIGHUP])?;
- std::thread::spawn(move || {
- for sig in signals.forever() {
- println!("Received NOHUP - ignoring...");
- }
- });
-
- let opts: Opts = Opts::parse();
-
- utils::init_logger(
- opts.debug_logs,
- opts.log_dir.as_deref(),
- match opts.subcmd {
- SubCommand::Activate(_) => utils::LoggerType::Activate,
- SubCommand::Wait(_) => utils::LoggerType::Wait,
- },
- )?;
-
- let r = match opts.subcmd {
- SubCommand::Activate(activate_opts) => activate(
- activate_opts.profile_path,
- activate_opts.closure,
- activate_opts.auto_rollback,
- opts.temp_path,
- activate_opts.confirm_timeout,
- activate_opts.magic_rollback,
- )
- .await
- .map_err(|x| Box::new(x) as Box),
-
- SubCommand::Wait(wait_opts) => wait(opts.temp_path, wait_opts.closure)
- .await
- .map_err(|x| Box::new(x) as Box),
- };
-
- match r {
- Ok(()) => (),
- Err(err) => good_panic!("{}", err),
- }
-
- Ok(())
-}
diff --git a/src/bin/activate.rs b/src/bin/activate.rs
new file mode 100644
index 0000000..554702c
--- /dev/null
+++ b/src/bin/activate.rs
@@ -0,0 +1,471 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+// SPDX-FileCopyrightText: 2020 Andreas Fuchs
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use signal_hook::{consts::signal::SIGHUP, iterator::Signals};
+
+use clap::Clap;
+
+use tokio::fs;
+use tokio::process::Command;
+use tokio::sync::mpsc;
+use tokio::time::timeout;
+
+use std::time::Duration;
+
+use std::path::Path;
+
+use notify::{RecommendedWatcher, RecursiveMode, Watcher};
+
+use thiserror::Error;
+
+#[macro_use]
+extern crate log;
+
+#[macro_use]
+extern crate serde_derive;
+
+/// Remote activation utility for deploy-rs
+#[derive(Clap, Debug)]
+#[clap(version = "1.0", author = "Serokell ")]
+struct Opts {
+ /// Print debug logs to output
+ #[clap(short, long)]
+ debug_logs: bool,
+ /// Directory to print logs to
+ #[clap(long)]
+ log_dir: Option,
+
+ /// Path for any temporary files that may be needed during activation
+ #[clap(long)]
+ temp_path: String,
+
+ #[clap(subcommand)]
+ subcmd: SubCommand,
+}
+
+#[derive(Clap, Debug)]
+enum SubCommand {
+ Activate(ActivateOpts),
+ Wait(WaitOpts),
+}
+
+/// Activate a profile
+#[derive(Clap, Debug)]
+struct ActivateOpts {
+ /// The closure to activate
+ closure: String,
+ /// The profile path to install into
+ profile_path: String,
+
+ /// Maximum time to wait for confirmation after activation
+ #[clap(long)]
+ confirm_timeout: u16,
+
+ /// Wait for confirmation after deployment and rollback if not confirmed
+ #[clap(long)]
+ magic_rollback: bool,
+
+ /// Auto rollback if failure
+ #[clap(long)]
+ auto_rollback: bool,
+}
+
+/// Activate a profile
+#[derive(Clap, Debug)]
+struct WaitOpts {
+ /// The closure to wait for
+ closure: String,
+}
+
+#[derive(Error, Debug)]
+pub enum DeactivateError {
+ #[error("Failed to execute the rollback command: {0}")]
+ RollbackError(std::io::Error),
+ #[error("The rollback resulted in a bad exit code: {0:?}")]
+ RollbackExitError(Option),
+ #[error("Failed to run command for listing generations: {0}")]
+ ListGenError(std::io::Error),
+ #[error("Command for listing generations resulted in a bad exit code: {0:?}")]
+ ListGenExitError(Option),
+ #[error("Error converting generation list output to utf8: {0}")]
+ DecodeListGenUtf8Error(#[from] std::string::FromUtf8Error),
+ #[error("Failed to run command for deleting generation: {0}")]
+ DeleteGenError(std::io::Error),
+ #[error("Command for deleting generations resulted in a bad exit code: {0:?}")]
+ DeleteGenExitError(Option),
+ #[error("Failed to run command for re-activating the last generation: {0}")]
+ ReactivateError(std::io::Error),
+ #[error("Command for re-activating the last generation resulted in a bad exit code: {0:?}")]
+ ReactivateExitError(Option),
+}
+
+pub async fn deactivate(profile_path: &str) -> Result<(), DeactivateError> {
+ warn!("De-activating due to error");
+
+ let nix_env_rollback_exit_status = Command::new("nix-env")
+ .arg("-p")
+ .arg(&profile_path)
+ .arg("--rollback")
+ .status()
+ .await
+ .map_err(DeactivateError::RollbackError)?;
+
+ match nix_env_rollback_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeactivateError::RollbackExitError(a)),
+ };
+
+ debug!("Listing generations");
+
+ let nix_env_list_generations_out = Command::new("nix-env")
+ .arg("-p")
+ .arg(&profile_path)
+ .arg("--list-generations")
+ .output()
+ .await
+ .map_err(DeactivateError::ListGenError)?;
+
+ match nix_env_list_generations_out.status.code() {
+ Some(0) => (),
+ a => return Err(DeactivateError::ListGenExitError(a)),
+ };
+
+ let generations_list = String::from_utf8(nix_env_list_generations_out.stdout)?;
+
+ let last_generation_line = generations_list
+ .lines()
+ .last()
+ .expect("Expected to find a generation in list");
+
+ let last_generation_id = last_generation_line
+ .split_whitespace()
+ .next()
+ .expect("Expected to get ID from generation entry");
+
+ debug!("Removing generation entry {}", last_generation_line);
+ warn!("Removing generation by ID {}", last_generation_id);
+
+ let nix_env_delete_generation_exit_status = Command::new("nix-env")
+ .arg("-p")
+ .arg(&profile_path)
+ .arg("--delete-generations")
+ .arg(last_generation_id)
+ .status()
+ .await
+ .map_err(DeactivateError::DeleteGenError)?;
+
+ match nix_env_delete_generation_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeactivateError::DeleteGenExitError(a)),
+ };
+
+ info!("Attempting to re-activate the last generation");
+
+ let re_activate_exit_status = Command::new(format!("{}/deploy-rs-activate", profile_path))
+ .env("PROFILE", &profile_path)
+ .current_dir(&profile_path)
+ .status()
+ .await
+ .map_err(DeactivateError::ReactivateError)?;
+
+ match re_activate_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeactivateError::ReactivateExitError(a)),
+ };
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+pub enum ActivationConfirmationError {
+ #[error("Failed to create activation confirmation directory: {0}")]
+ CreateConfirmDirError(std::io::Error),
+ #[error("Failed to create activation confirmation file: {0}")]
+ CreateConfirmFileError(std::io::Error),
+ #[error("Failed to create file system watcher instance: {0}")]
+ CreateWatcherError(notify::Error),
+ #[error("Error forking process: {0}")]
+ ForkError(i32),
+ #[error("Could not watch for activation sentinel: {0}")]
+ WatcherError(#[from] notify::Error),
+}
+
+#[derive(Error, Debug)]
+pub enum DangerZoneError {
+ #[error("Timeout elapsed for confirmation")]
+ TimesUp,
+ #[error("inotify stream ended without activation confirmation")]
+ NoConfirmation,
+ #[error("inotify encountered an error: {0}")]
+ WatchError(notify::Error),
+}
+
+async fn danger_zone(
+ mut events: mpsc::Receiver>,
+ confirm_timeout: u16,
+) -> Result<(), DangerZoneError> {
+ info!("Waiting for confirmation event...");
+
+ match timeout(Duration::from_secs(confirm_timeout as u64), events.recv()).await {
+ Ok(Some(Ok(()))) => Ok(()),
+ Ok(Some(Err(e))) => Err(DangerZoneError::WatchError(e)),
+ Ok(None) => Err(DangerZoneError::NoConfirmation),
+ Err(_) => Err(DangerZoneError::TimesUp),
+ }
+}
+
+pub async fn activation_confirmation(
+ profile_path: String,
+ temp_path: String,
+ confirm_timeout: u16,
+ closure: String,
+) -> Result<(), ActivationConfirmationError> {
+ let lock_path = deploy::make_lock_path(&temp_path, &closure);
+
+ debug!("Ensuring parent directory exists for canary file");
+
+ if let Some(parent) = Path::new(&lock_path).parent() {
+ fs::create_dir_all(parent)
+ .await
+ .map_err(ActivationConfirmationError::CreateConfirmDirError)?;
+ }
+
+ debug!("Creating canary file");
+
+ fs::File::create(&lock_path)
+ .await
+ .map_err(ActivationConfirmationError::CreateConfirmFileError)?;
+
+ debug!("Creating notify watcher");
+
+ let (deleted, done) = mpsc::channel(1);
+
+ let mut watcher: RecommendedWatcher =
+ Watcher::new_immediate(move |res: Result| {
+ let send_result = match res {
+ Ok(e) if e.kind == notify::EventKind::Remove(notify::event::RemoveKind::File) => {
+ debug!("Got worthy removal event, sending on channel");
+ deleted.try_send(Ok(()))
+ }
+ Err(e) => {
+ debug!("Got error waiting for removal event, sending on channel");
+ deleted.try_send(Err(e))
+ }
+ Ok(_) => Ok(()), // ignore non-removal events
+ };
+
+ if let Err(e) = send_result {
+ error!("Could not send file system event to watcher: {}", e);
+ }
+ })?;
+
+ watcher.watch(&lock_path, RecursiveMode::NonRecursive)?;
+
+ if let Err(err) = danger_zone(done, confirm_timeout).await {
+ error!("Error waiting for confirmation event: {}", err);
+
+ if let Err(err) = deactivate(&profile_path).await {
+ error!(
+ "Error de-activating due to another error waiting for confirmation, oh no...: {}",
+ err
+ );
+ }
+ }
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+pub enum WaitError {
+ #[error("Error creating watcher for activation: {0}")]
+ Watcher(#[from] notify::Error),
+ #[error("Error waiting for activation: {0}")]
+ Waiting(#[from] DangerZoneError),
+}
+pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> {
+ let lock_path = deploy::make_lock_path(&temp_path, &closure);
+
+ let (created, done) = mpsc::channel(1);
+
+ let mut watcher: RecommendedWatcher = {
+ // TODO: fix wasteful clone
+ let lock_path = lock_path.clone();
+
+ Watcher::new_immediate(move |res: Result| {
+ let send_result = match res {
+ Ok(e) if e.kind == notify::EventKind::Create(notify::event::CreateKind::File) => {
+ match &e.paths[..] {
+ [x] if x == Path::new(&lock_path) => created.try_send(Ok(())),
+ _ => Ok(()),
+ }
+ }
+ Err(e) => created.try_send(Err(e)),
+ Ok(_) => Ok(()), // ignore non-removal events
+ };
+
+ if let Err(e) = send_result {
+ error!("Could not send file system event to watcher: {}", e);
+ }
+ })?
+ };
+
+ watcher.watch(&temp_path, RecursiveMode::NonRecursive)?;
+
+ // Avoid a potential race condition by checking for existence after watcher creation
+ if fs::metadata(&lock_path).await.is_ok() {
+ watcher.unwatch(&temp_path)?;
+ return Ok(());
+ }
+
+ danger_zone(done, 60).await?;
+
+ info!("Found canary file, done waiting!");
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+pub enum ActivateError {
+ #[error("Failed to execute the command for setting profile: {0}")]
+ SetProfileError(std::io::Error),
+ #[error("The command for setting profile resulted in a bad exit code: {0:?}")]
+ SetProfileExitError(Option),
+
+ #[error("Failed to execute the activation script: {0}")]
+ RunActivateError(std::io::Error),
+ #[error("The activation script resulted in a bad exit code: {0:?}")]
+ RunActivateExitError(Option),
+
+ #[error("There was an error de-activating after an error was encountered: {0}")]
+ DeactivateError(#[from] DeactivateError),
+
+ #[error("Failed to get activation confirmation: {0}")]
+ ActivationConfirmationError(#[from] ActivationConfirmationError),
+}
+
+pub async fn activate(
+ profile_path: String,
+ closure: String,
+ auto_rollback: bool,
+ temp_path: String,
+ confirm_timeout: u16,
+ magic_rollback: bool,
+) -> Result<(), ActivateError> {
+ info!("Activating profile");
+
+ let nix_env_set_exit_status = Command::new("nix-env")
+ .arg("-p")
+ .arg(&profile_path)
+ .arg("--set")
+ .arg(&closure)
+ .status()
+ .await
+ .map_err(ActivateError::SetProfileError)?;
+
+ match nix_env_set_exit_status.code() {
+ Some(0) => (),
+ a => {
+ if auto_rollback {
+ deactivate(&profile_path).await?;
+ }
+ return Err(ActivateError::SetProfileExitError(a));
+ }
+ };
+
+ debug!("Running activation script");
+
+ let activate_status = match Command::new(format!("{}/deploy-rs-activate", profile_path))
+ .env("PROFILE", &profile_path)
+ .current_dir(&profile_path)
+ .status()
+ .await
+ .map_err(ActivateError::RunActivateError)
+ {
+ Ok(x) => x,
+ Err(e) => {
+ if auto_rollback {
+ deactivate(&profile_path).await?;
+ }
+ return Err(e);
+ }
+ };
+
+ match activate_status.code() {
+ Some(0) => (),
+ a => {
+ if auto_rollback {
+ deactivate(&profile_path).await?;
+ }
+ return Err(ActivateError::RunActivateExitError(a));
+ }
+ };
+
+ info!("Activation succeeded!");
+
+ if magic_rollback {
+ info!("Magic rollback is enabled, setting up confirmation hook...");
+
+ match activation_confirmation(profile_path.clone(), temp_path, confirm_timeout, closure)
+ .await
+ {
+ Ok(()) => {}
+ Err(err) => {
+ deactivate(&profile_path).await?;
+ return Err(ActivateError::ActivationConfirmationError(err));
+ }
+ };
+ }
+
+ Ok(())
+}
+
+#[tokio::main]
+async fn main() -> Result<(), Box> {
+ // Ensure that this process stays alive after the SSH connection dies
+ let mut signals = Signals::new(&[SIGHUP])?;
+ std::thread::spawn(move || {
+ for sig in signals.forever() {
+ println!("Received NOHUP - ignoring...");
+ }
+ });
+
+ let opts: Opts = Opts::parse();
+
+ deploy::init_logger(
+ opts.debug_logs,
+ opts.log_dir.as_deref(),
+ match opts.subcmd {
+ SubCommand::Activate(_) => deploy::LoggerType::Activate,
+ SubCommand::Wait(_) => deploy::LoggerType::Wait,
+ },
+ )?;
+
+ let r = match opts.subcmd {
+ SubCommand::Activate(activate_opts) => activate(
+ activate_opts.profile_path,
+ activate_opts.closure,
+ activate_opts.auto_rollback,
+ opts.temp_path,
+ activate_opts.confirm_timeout,
+ activate_opts.magic_rollback,
+ )
+ .await
+ .map_err(|x| Box::new(x) as Box),
+
+ SubCommand::Wait(wait_opts) => wait(opts.temp_path, wait_opts.closure)
+ .await
+ .map_err(|x| Box::new(x) as Box),
+ };
+
+ match r {
+ Ok(()) => (),
+ Err(err) => {
+ error!("{}", err);
+ std::process::exit(1)
+ }
+ }
+
+ Ok(())
+}
diff --git a/src/bin/deploy.rs b/src/bin/deploy.rs
new file mode 100644
index 0000000..0381525
--- /dev/null
+++ b/src/bin/deploy.rs
@@ -0,0 +1,564 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use std::collections::HashMap;
+use std::io::{stdin, stdout, Write};
+
+use clap::Clap;
+
+use std::process::Stdio;
+use tokio::process::Command;
+
+use thiserror::Error;
+
+#[macro_use]
+extern crate log;
+
+#[macro_use]
+extern crate serde_derive;
+
+/// Simple Rust rewrite of a simple Nix Flake deployment tool
+#[derive(Clap, Debug)]
+#[clap(version = "1.0", author = "Serokell ")]
+struct Opts {
+ /// The flake to deploy
+ #[clap(default_value = ".")]
+ flake: String,
+ /// Check signatures when using `nix copy`
+ #[clap(short, long)]
+ checksigs: bool,
+ /// Use the interactive prompt before deployment
+ #[clap(short, long)]
+ interactive: bool,
+ /// Extra arguments to be passed to nix build
+ extra_build_args: Vec,
+
+ /// Print debug logs to output
+ #[clap(short, long)]
+ debug_logs: bool,
+ /// Directory to print logs to (including the background activation process)
+ #[clap(long)]
+ log_dir: Option,
+
+ /// Keep the build outputs of each built profile
+ #[clap(short, long)]
+ keep_result: bool,
+ /// Location to keep outputs from built profiles in
+ #[clap(short, long)]
+ result_path: Option,
+
+ /// Skip the automatic pre-build checks
+ #[clap(short, long)]
+ skip_checks: bool,
+
+ /// Override the SSH user with the given value
+ #[clap(long)]
+ ssh_user: Option,
+ /// Override the profile user with the given value
+ #[clap(long)]
+ profile_user: Option,
+ /// Override the SSH options used
+ #[clap(long)]
+ ssh_opts: Option,
+ /// Override if the connecting to the target node should be considered fast
+ #[clap(long)]
+ fast_connection: Option,
+ /// Override if a rollback should be attempted if activation fails
+ #[clap(long)]
+ auto_rollback: Option,
+ /// Override hostname used for the node
+ #[clap(long)]
+ hostname: Option,
+ /// Make activation wait for confirmation, or roll back after a period of time
+ #[clap(long)]
+ magic_rollback: Option,
+ /// How long activation should wait for confirmation (if using magic-rollback)
+ #[clap(long)]
+ confirm_timeout: Option,
+ /// Where to store temporary files (only used by magic-rollback)
+ #[clap(long)]
+ temp_path: Option,
+}
+
+/// Returns if the available Nix installation supports flakes
+async fn test_flake_support() -> Result {
+ debug!("Checking for flake support");
+
+ Ok(Command::new("nix")
+ .arg("eval")
+ .arg("--expr")
+ .arg("builtins.getFlake")
+ // This will error on some machines "intentionally", and we don't really need that printing
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .status()
+ .await?
+ .success())
+}
+
+#[derive(Error, Debug)]
+enum CheckDeploymentError {
+ #[error("Failed to execute Nix checking command: {0}")]
+ NixCheckError(#[from] std::io::Error),
+ #[error("Nix checking command resulted in a bad exit code: {0:?}")]
+ NixCheckExitError(Option),
+}
+
+async fn check_deployment(
+ supports_flakes: bool,
+ repo: &str,
+ extra_build_args: &[String],
+) -> Result<(), CheckDeploymentError> {
+ info!("Running checks for flake in {}", repo);
+
+ let mut c = match supports_flakes {
+ true => Command::new("nix"),
+ false => Command::new("nix-build"),
+ };
+
+ let mut check_command = match supports_flakes {
+ true => {
+ c.arg("flake")
+ .arg("check")
+ .arg(repo)
+ }
+ false => {
+ c.arg("-E")
+ .arg("--no-out-link")
+ .arg(format!("let r = import {}/.; x = (if builtins.isFunction r then (r {{}}) else r); in if x ? checks then x.checks.${{builtins.currentSystem}} else {{}}", repo))
+ }
+ };
+
+ for extra_arg in extra_build_args {
+ check_command = check_command.arg(extra_arg);
+ }
+
+ let check_status = check_command.status().await?;
+
+ match check_status.code() {
+ Some(0) => (),
+ a => return Err(CheckDeploymentError::NixCheckExitError(a)),
+ };
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+enum GetDeploymentDataError {
+ #[error("Failed to execute nix eval command: {0}")]
+ NixEval(std::io::Error),
+ #[error("Failed to read output from evaluation: {0}")]
+ NixEvalOut(std::io::Error),
+ #[error("Evaluation resulted in a bad exit code: {0:?}")]
+ NixEvalExit(Option),
+ #[error("Error converting evaluation output to utf8: {0}")]
+ DecodeUtf8(#[from] std::string::FromUtf8Error),
+ #[error("Error decoding the JSON from evaluation: {0}")]
+ DecodeJson(#[from] serde_json::error::Error),
+}
+
+/// Evaluates the Nix in the given `repo` and return the processed Data from it
+async fn get_deployment_data(
+ supports_flakes: bool,
+ repo: &str,
+ extra_build_args: &[String],
+) -> Result {
+ info!("Evaluating flake in {}", repo);
+
+ let mut c = match supports_flakes {
+ true => Command::new("nix"),
+ false => Command::new("nix-instantiate"),
+ };
+
+ let mut build_command = match supports_flakes {
+ true => {
+ c.arg("eval")
+ .arg("--json")
+ .arg(format!("{}#deploy", repo))
+ }
+ false => {
+ c
+ .arg("--strict")
+ .arg("--read-write-mode")
+ .arg("--json")
+ .arg("--eval")
+ .arg("-E")
+ .arg(format!("let r = import {}/.; in if builtins.isFunction r then (r {{}}).deploy else r.deploy", repo))
+ }
+ };
+
+ for extra_arg in extra_build_args {
+ build_command = build_command.arg(extra_arg);
+ }
+
+ let build_child = build_command
+ .stdout(Stdio::piped())
+ .spawn()
+ .map_err(GetDeploymentDataError::NixEval)?;
+
+ let build_output = build_child
+ .wait_with_output()
+ .await
+ .map_err(GetDeploymentDataError::NixEvalOut)?;
+
+ match build_output.status.code() {
+ Some(0) => (),
+ a => return Err(GetDeploymentDataError::NixEvalExit(a)),
+ };
+
+ let data_json = String::from_utf8(build_output.stdout)?;
+
+ Ok(serde_json::from_str(&data_json)?)
+}
+
+#[derive(Serialize)]
+struct PromptPart<'a> {
+ user: &'a str,
+ ssh_user: &'a str,
+ path: &'a str,
+ hostname: &'a str,
+ ssh_opts: &'a [String],
+}
+
+fn print_deployment(
+ parts: &[(deploy::DeployData, deploy::DeployDefs)],
+) -> Result<(), toml::ser::Error> {
+ let mut part_map: HashMap> = HashMap::new();
+
+ for (data, defs) in parts {
+ part_map
+ .entry(data.node_name.to_string())
+ .or_insert(HashMap::new())
+ .insert(
+ data.profile_name.to_string(),
+ PromptPart {
+ user: &defs.profile_user,
+ ssh_user: &defs.ssh_user,
+ path: &data.profile.profile_settings.path,
+ hostname: &data.node.node_settings.hostname,
+ ssh_opts: &data.merged_settings.ssh_opts,
+ },
+ );
+ }
+
+ let toml = toml::to_string(&part_map)?;
+
+ info!("The following profiles are going to be deployed:\n{}", toml);
+
+ Ok(())
+}
+#[derive(Error, Debug)]
+enum PromptDeploymentError {
+ #[error("Failed to make printable TOML of deployment: {0}")]
+ TomlFormat(#[from] toml::ser::Error),
+ #[error("Failed to flush stdout prior to query: {0}")]
+ StdoutFlush(std::io::Error),
+ #[error("Failed to read line from stdin: {0}")]
+ StdinRead(std::io::Error),
+ #[error("User cancelled deployment")]
+ Cancelled,
+}
+
+fn prompt_deployment(
+ parts: &[(deploy::DeployData, deploy::DeployDefs)],
+) -> Result<(), PromptDeploymentError> {
+ print_deployment(parts)?;
+
+ info!("Are you sure you want to deploy these profiles?");
+ print!("> ");
+
+ stdout()
+ .flush()
+ .map_err(PromptDeploymentError::StdoutFlush)?;
+
+ let mut s = String::new();
+ stdin()
+ .read_line(&mut s)
+ .map_err(PromptDeploymentError::StdinRead)?;
+
+ if !yn::yes(&s) {
+ if yn::is_somewhat_yes(&s) {
+ info!("Sounds like you might want to continue, to be more clear please just say \"yes\". Do you want to deploy these profiles?");
+ print!("> ");
+
+ stdout()
+ .flush()
+ .map_err(PromptDeploymentError::StdoutFlush)?;
+
+ let mut s = String::new();
+ stdin()
+ .read_line(&mut s)
+ .map_err(PromptDeploymentError::StdinRead)?;
+
+ if !yn::yes(&s) {
+ return Err(PromptDeploymentError::Cancelled);
+ }
+ } else {
+ if !yn::no(&s) {
+ info!(
+ "That was unclear, but sounded like a no to me. Please say \"yes\" or \"no\" to be more clear."
+ );
+ }
+
+ return Err(PromptDeploymentError::Cancelled);
+ }
+ }
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+enum RunDeployError {
+ #[error("Failed to deploy profile: {0}")]
+ DeployProfileError(#[from] deploy::deploy::DeployProfileError),
+ #[error("Failed to push profile: {0}")]
+ PushProfileError(#[from] deploy::push::PushProfileError),
+ #[error("No profile named `{0}` was found")]
+ ProfileNotFound(String),
+ #[error("No node named `{0}` was found")]
+ NodeNotFound(String),
+ #[error("Profile was provided without a node name")]
+ ProfileWithoutNode,
+ #[error("Error processing deployment definitions: {0}")]
+ DeployDataDefsError(#[from] deploy::DeployDataDefsError),
+ #[error("Failed to make printable TOML of deployment: {0}")]
+ TomlFormat(#[from] toml::ser::Error),
+ #[error("{0}")]
+ PromptDeploymentError(#[from] PromptDeploymentError),
+}
+
+async fn run_deploy(
+ deploy_flake: deploy::DeployFlake<'_>,
+ data: deploy::data::Data,
+ supports_flakes: bool,
+ check_sigs: bool,
+ interactive: bool,
+ cmd_overrides: deploy::CmdOverrides,
+ keep_result: bool,
+ result_path: Option<&str>,
+ extra_build_args: &[String],
+ debug_logs: bool,
+ log_dir: Option,
+) -> Result<(), RunDeployError> {
+ let to_deploy: Vec<((&str, &deploy::data::Node), (&str, &deploy::data::Profile))> =
+ match (&deploy_flake.node, &deploy_flake.profile) {
+ (Some(node_name), Some(profile_name)) => {
+ let node = match data.nodes.get(node_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
+ };
+ let profile = match node.node_settings.profiles.get(profile_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
+ };
+
+ vec![((node_name, node), (profile_name, profile))]
+ }
+ (Some(node_name), None) => {
+ let node = match data.nodes.get(node_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
+ };
+
+ let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
+
+ for profile_name in [
+ node.node_settings.profiles_order.iter().collect(),
+ node.node_settings.profiles.keys().collect::>(),
+ ]
+ .concat()
+ {
+ let profile = match node.node_settings.profiles.get(profile_name) {
+ Some(x) => x,
+ None => {
+ return Err(RunDeployError::ProfileNotFound(profile_name.to_owned()))
+ }
+ };
+
+ if !profiles_list.iter().any(|(n, _)| n == profile_name) {
+ profiles_list.push((&profile_name, profile));
+ }
+ }
+
+ profiles_list
+ .into_iter()
+ .map(|x| ((node_name.as_str(), node), x))
+ .collect()
+ }
+ (None, None) => {
+ let mut l = Vec::new();
+
+ for (node_name, node) in &data.nodes {
+ let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
+
+ for profile_name in [
+ node.node_settings.profiles_order.iter().collect(),
+ node.node_settings.profiles.keys().collect::>(),
+ ]
+ .concat()
+ {
+ let profile = match node.node_settings.profiles.get(profile_name) {
+ Some(x) => x,
+ None => {
+ return Err(RunDeployError::ProfileNotFound(
+ profile_name.to_owned(),
+ ))
+ }
+ };
+
+ if !profiles_list.iter().any(|(n, _)| n == profile_name) {
+ profiles_list.push((&profile_name, profile));
+ }
+ }
+
+ let ll: Vec<((&str, &deploy::data::Node), (&str, &deploy::data::Profile))> =
+ profiles_list
+ .into_iter()
+ .map(|x| ((node_name.as_str(), node), x))
+ .collect();
+
+ l.extend(ll);
+ }
+
+ l
+ }
+ (None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
+ };
+
+ let mut parts: Vec<(deploy::DeployData, deploy::DeployDefs)> = Vec::new();
+
+ for ((node_name, node), (profile_name, profile)) in to_deploy {
+ let deploy_data = deploy::make_deploy_data(
+ &data.generic_settings,
+ node,
+ node_name,
+ profile,
+ profile_name,
+ &cmd_overrides,
+ debug_logs,
+ log_dir.as_deref(),
+ );
+
+ let deploy_defs = deploy_data.defs()?;
+
+ parts.push((deploy_data, deploy_defs));
+ }
+
+ if interactive {
+ prompt_deployment(&parts[..])?;
+ } else {
+ print_deployment(&parts[..])?;
+ }
+
+ for (deploy_data, deploy_defs) in &parts {
+ deploy::push::push_profile(
+ supports_flakes,
+ check_sigs,
+ deploy_flake.repo,
+ &deploy_data,
+ &deploy_defs,
+ keep_result,
+ result_path,
+ extra_build_args,
+ )
+ .await?;
+ }
+
+ for (deploy_data, deploy_defs) in &parts {
+ deploy::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
+ }
+
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+enum RunError {
+ #[error("Failed to deploy profile: {0}")]
+ DeployProfileError(#[from] deploy::deploy::DeployProfileError),
+ #[error("Failed to push profile: {0}")]
+ PushProfileError(#[from] deploy::push::PushProfileError),
+ #[error("Failed to test for flake support: {0}")]
+ FlakeTestError(std::io::Error),
+ #[error("Failed to check deployment: {0}")]
+ CheckDeploymentError(#[from] CheckDeploymentError),
+ #[error("Failed to evaluate deployment data: {0}")]
+ GetDeploymentDataError(#[from] GetDeploymentDataError),
+ #[error("Error parsing flake: {0}")]
+ ParseFlakeError(#[from] deploy::ParseFlakeError),
+ #[error("Error initiating logger: {0}")]
+ LoggerError(#[from] flexi_logger::FlexiLoggerError),
+ #[error("{0}")]
+ RunDeployError(#[from] RunDeployError),
+}
+
+async fn run() -> Result<(), RunError> {
+ let opts: Opts = Opts::parse();
+
+ deploy::init_logger(
+ opts.debug_logs,
+ opts.log_dir.as_deref(),
+ deploy::LoggerType::Deploy,
+ )?;
+
+ let deploy_flake = deploy::parse_flake(opts.flake.as_str())?;
+
+ let cmd_overrides = deploy::CmdOverrides {
+ ssh_user: opts.ssh_user,
+ profile_user: opts.profile_user,
+ ssh_opts: opts.ssh_opts,
+ fast_connection: opts.fast_connection,
+ auto_rollback: opts.auto_rollback,
+ hostname: opts.hostname,
+ magic_rollback: opts.magic_rollback,
+ temp_path: opts.temp_path,
+ confirm_timeout: opts.confirm_timeout,
+ };
+
+ let supports_flakes = test_flake_support()
+ .await
+ .map_err(RunError::FlakeTestError)?;
+
+ if !supports_flakes {
+ warn!("A Nix version without flakes support was detected, support for this is work in progress");
+ }
+
+ if !opts.skip_checks {
+ check_deployment(supports_flakes, deploy_flake.repo, &opts.extra_build_args).await?;
+ }
+
+ let data =
+ get_deployment_data(supports_flakes, deploy_flake.repo, &opts.extra_build_args).await?;
+
+ let result_path = opts.result_path.as_deref();
+
+ run_deploy(
+ deploy_flake,
+ data,
+ supports_flakes,
+ opts.checksigs,
+ opts.interactive,
+ cmd_overrides,
+ opts.keep_result,
+ result_path,
+ &opts.extra_build_args,
+ opts.debug_logs,
+ opts.log_dir,
+ )
+ .await?;
+
+ Ok(())
+}
+
+#[tokio::main]
+async fn main() -> Result<(), Box> {
+ match run().await {
+ Ok(()) => (),
+ Err(err) => {
+ error!("{}", err);
+ std::process::exit(1);
+ }
+ }
+
+ Ok(())
+}
diff --git a/src/data.rs b/src/data.rs
new file mode 100644
index 0000000..f557e41
--- /dev/null
+++ b/src/data.rs
@@ -0,0 +1,73 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use merge::Merge;
+
+use std::collections::HashMap;
+
+#[derive(Deserialize, Debug, Clone, Merge)]
+pub struct GenericSettings {
+ #[serde(rename(deserialize = "sshUser"))]
+ pub ssh_user: Option,
+ pub user: Option,
+ #[serde(
+ skip_serializing_if = "Vec::is_empty",
+ default,
+ rename(deserialize = "sshOpts")
+ )]
+ #[merge(strategy = merge::vec::append)]
+ pub ssh_opts: Vec,
+ #[serde(rename(deserialize = "fastConnection"))]
+ pub fast_connection: Option,
+ #[serde(rename(deserialize = "autoRollback"))]
+ pub auto_rollback: Option,
+ #[serde(rename(deserialize = "confirmTimeout"))]
+ pub confirm_timeout: Option,
+ #[serde(rename(deserialize = "tempPath"))]
+ pub temp_path: Option,
+ #[serde(rename(deserialize = "magicRollback"))]
+ pub magic_rollback: Option,
+}
+
+#[derive(Deserialize, Debug, Clone)]
+pub struct NodeSettings {
+ pub hostname: String,
+ pub profiles: HashMap,
+ #[serde(
+ skip_serializing_if = "Vec::is_empty",
+ default,
+ rename(deserialize = "profilesOrder")
+ )]
+ pub profiles_order: Vec,
+}
+
+#[derive(Deserialize, Debug, Clone)]
+pub struct ProfileSettings {
+ pub path: String,
+ #[serde(rename(deserialize = "profilePath"))]
+ pub profile_path: Option,
+}
+
+#[derive(Deserialize, Debug, Clone)]
+pub struct Profile {
+ #[serde(flatten)]
+ pub profile_settings: ProfileSettings,
+ #[serde(flatten)]
+ pub generic_settings: GenericSettings,
+}
+
+#[derive(Deserialize, Debug, Clone)]
+pub struct Node {
+ #[serde(flatten)]
+ pub generic_settings: GenericSettings,
+ #[serde(flatten)]
+ pub node_settings: NodeSettings,
+}
+
+#[derive(Deserialize, Debug, Clone)]
+pub struct Data {
+ #[serde(flatten)]
+ pub generic_settings: GenericSettings,
+ pub nodes: HashMap,
+}
diff --git a/src/deploy.rs b/src/deploy.rs
new file mode 100644
index 0000000..3371160
--- /dev/null
+++ b/src/deploy.rs
@@ -0,0 +1,296 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+// SPDX-FileCopyrightText: 2020 Andreas Fuchs
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use std::borrow::Cow;
+use tokio::process::Command;
+
+use thiserror::Error;
+
+fn build_activate_command(
+ sudo: &Option,
+ profile_path: &str,
+ closure: &str,
+ auto_rollback: bool,
+ temp_path: &Cow,
+ confirm_timeout: u16,
+ magic_rollback: bool,
+ debug_logs: bool,
+ log_dir: Option<&str>,
+) -> String {
+ let mut self_activate_command = format!("{}/activate-rs", closure);
+
+ if debug_logs {
+ self_activate_command = format!("{} --debug-logs", self_activate_command);
+ }
+
+ if let Some(log_dir) = log_dir {
+ self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
+ }
+
+ self_activate_command = format!(
+ "{} --temp-path '{}' activate '{}' '{}'",
+ self_activate_command, temp_path, closure, profile_path
+ );
+
+ self_activate_command = format!(
+ "{} --confirm-timeout {}",
+ self_activate_command, confirm_timeout
+ );
+
+ if magic_rollback {
+ self_activate_command = format!("{} --magic-rollback", self_activate_command);
+ }
+
+ if auto_rollback {
+ self_activate_command = format!("{} --auto-rollback", self_activate_command);
+ }
+
+ if let Some(sudo_cmd) = &sudo {
+ self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
+ }
+
+ self_activate_command
+}
+
+#[test]
+fn test_activation_command_builder() {
+ let sudo = Some("sudo -u test".to_string());
+ let profile_path = "/blah/profiles/test";
+ let closure = "/nix/store/blah/etc";
+ let auto_rollback = true;
+ let temp_path = &"/tmp".into();
+ let confirm_timeout = 30;
+ let magic_rollback = true;
+ let debug_logs = true;
+ let log_dir = Some("/tmp/something.txt");
+
+ assert_eq!(
+ build_activate_command(
+ &sudo,
+ profile_path,
+ closure,
+ auto_rollback,
+ temp_path,
+ confirm_timeout,
+ magic_rollback,
+ debug_logs,
+ log_dir
+ ),
+ "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' activate '/nix/store/blah/etc' '/blah/profiles/test' --confirm-timeout 30 --magic-rollback --auto-rollback"
+ .to_string(),
+ );
+}
+
+fn build_wait_command(
+ sudo: &Option,
+ closure: &str,
+ temp_path: &Cow,
+ debug_logs: bool,
+ log_dir: Option<&str>,
+) -> String {
+ let mut self_activate_command = format!("{}/activate-rs", closure);
+
+ if debug_logs {
+ self_activate_command = format!("{} --debug-logs", self_activate_command);
+ }
+
+ if let Some(log_dir) = log_dir {
+ self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
+ }
+
+ self_activate_command = format!(
+ "{} --temp-path '{}' wait '{}'",
+ self_activate_command, temp_path, closure
+ );
+
+ if let Some(sudo_cmd) = &sudo {
+ self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
+ }
+
+ self_activate_command
+}
+
+#[test]
+fn test_wait_command_builder() {
+ let sudo = Some("sudo -u test".to_string());
+ let closure = "/nix/store/blah/etc";
+ let temp_path = &"/tmp".into();
+ let debug_logs = true;
+ let log_dir = Some("/tmp/something.txt");
+
+ assert_eq!(
+ build_wait_command(
+ &sudo,
+ closure,
+ temp_path,
+ debug_logs,
+ log_dir
+ ),
+ "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' wait '/nix/store/blah/etc'"
+ .to_string(),
+ );
+}
+
+#[derive(Error, Debug)]
+pub enum DeployProfileError {
+ #[error("Failed to calculate activate bin path from deploy bin path: {0}")]
+ DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
+
+ #[error("Failed to spawn activation command over SSH: {0}")]
+ SSHSpawnActivateError(std::io::Error),
+
+ #[error("Failed to run activation command over SSH: {0}")]
+ SSHActivateError(std::io::Error),
+ #[error("Activating over SSH resulted in a bad exit code: {0:?}")]
+ SSHActivateExitError(Option),
+
+ #[error("Failed to run wait command over SSH: {0}")]
+ SSHWaitError(std::io::Error),
+ #[error("Waiting over SSH resulted in a bad exit code: {0:?}")]
+ SSHWaitExitError(Option),
+
+ #[error("Failed to run confirmation command over SSH (the server should roll back): {0}")]
+ SSHConfirmError(std::io::Error),
+ #[error(
+ "Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}"
+ )]
+ SSHConfirmExitError(Option),
+}
+
+pub async fn deploy_profile(
+ deploy_data: &super::DeployData<'_>,
+ deploy_defs: &super::DeployDefs,
+) -> Result<(), DeployProfileError> {
+ info!(
+ "Activating profile `{}` for node `{}`",
+ deploy_data.profile_name, deploy_data.node_name
+ );
+
+ let temp_path: Cow = match &deploy_data.merged_settings.temp_path {
+ Some(x) => x.into(),
+ None => "/tmp".into(),
+ };
+
+ let confirm_timeout = deploy_data.merged_settings.confirm_timeout.unwrap_or(30);
+
+ let magic_rollback = deploy_data.merged_settings.magic_rollback.unwrap_or(true);
+
+ let auto_rollback = deploy_data.merged_settings.auto_rollback.unwrap_or(true);
+
+ let self_activate_command = build_activate_command(
+ &deploy_defs.sudo,
+ &deploy_defs.profile_path,
+ &deploy_data.profile.profile_settings.path,
+ auto_rollback,
+ &temp_path,
+ confirm_timeout,
+ magic_rollback,
+ deploy_data.debug_logs,
+ deploy_data.log_dir,
+ );
+
+ debug!("Constructed activation command: {}", self_activate_command);
+
+ let self_wait_command = build_wait_command(
+ &deploy_defs.sudo,
+ &deploy_data.profile.profile_settings.path,
+ &temp_path,
+ deploy_data.debug_logs,
+ deploy_data.log_dir,
+ );
+
+ debug!("Constructed wait command: {}", self_wait_command);
+
+ let hostname = match deploy_data.cmd_overrides.hostname {
+ Some(ref x) => x,
+ None => &deploy_data.node.node_settings.hostname,
+ };
+
+ let ssh_addr = format!("ssh://{}@{}", deploy_defs.ssh_user, hostname);
+
+ let mut ssh_activate_command_ = Command::new("ssh");
+ let ssh_activate_command = ssh_activate_command_.arg(&ssh_addr);
+
+ for ssh_opt in &deploy_data.merged_settings.ssh_opts {
+ ssh_activate_command.arg(&ssh_opt);
+ }
+
+ if !magic_rollback {
+ let ssh_activate_exit_status = ssh_activate_command
+ .arg(self_activate_command)
+ .status()
+ .await
+ .map_err(DeployProfileError::SSHActivateError)?;
+
+ match ssh_activate_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeployProfileError::SSHActivateExitError(a)),
+ };
+
+ info!("Success activating, done!");
+ } else {
+ let ssh_activate = ssh_activate_command
+ .arg(self_activate_command)
+ .spawn()
+ .map_err(DeployProfileError::SSHSpawnActivateError)?;
+
+ info!("Creating activation waiter");
+
+ let mut ssh_wait_command_ = Command::new("ssh");
+ let ssh_wait_command = ssh_wait_command_.arg(&ssh_addr);
+
+ for ssh_opt in &deploy_data.merged_settings.ssh_opts {
+ ssh_wait_command.arg(ssh_opt);
+ }
+
+ let ssh_wait_exit_status = ssh_wait_command
+ .arg(self_wait_command)
+ .status()
+ .await
+ .map_err(DeployProfileError::SSHWaitError)?;
+
+ match ssh_wait_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeployProfileError::SSHWaitExitError(a)),
+ };
+
+ info!("Success activating, attempting to confirm activation");
+
+ let mut c = Command::new("ssh");
+ let mut ssh_confirm_command = c.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname));
+
+ for ssh_opt in &deploy_data.merged_settings.ssh_opts {
+ ssh_confirm_command = ssh_confirm_command.arg(ssh_opt);
+ }
+
+ let lock_path =
+ super::make_lock_path(&temp_path, &deploy_data.profile.profile_settings.path);
+
+ let mut confirm_command = format!("rm {}", lock_path);
+ if let Some(sudo_cmd) = &deploy_defs.sudo {
+ confirm_command = format!("{} {}", sudo_cmd, confirm_command);
+ }
+
+ debug!(
+ "Attempting to run command to confirm deployment: {}",
+ confirm_command
+ );
+
+ let ssh_exit_status = ssh_confirm_command
+ .arg(confirm_command)
+ .status()
+ .await
+ .map_err(DeployProfileError::SSHConfirmError)?;
+
+ match ssh_exit_status.code() {
+ Some(0) => (),
+ a => return Err(DeployProfileError::SSHConfirmExitError(a)),
+ };
+
+ info!("Deployment confirmed.");
+ }
+
+ Ok(())
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..21bfb8c
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,424 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+// SPDX-FileCopyrightText: 2020 Andreas Fuchs
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use rnix::{types::*, SyntaxKind::*};
+
+use merge::Merge;
+
+use thiserror::Error;
+
+use flexi_logger::*;
+
+#[macro_use]
+extern crate log;
+
+#[macro_use]
+extern crate serde_derive;
+
+pub fn make_lock_path(temp_path: &str, closure: &str) -> String {
+ let lock_hash =
+ &closure["/nix/store/".len()..closure.find("-").unwrap_or_else(|| closure.len())];
+ format!("{}/deploy-rs-canary-{}", temp_path, lock_hash)
+}
+
+fn make_emoji(level: log::Level) -> &'static str {
+ match level {
+ log::Level::Error => "❌",
+ log::Level::Warn => "⚠️",
+ log::Level::Info => "ℹ️",
+ log::Level::Debug => "❓",
+ log::Level::Trace => "🖊️",
+ }
+}
+
+pub fn logger_formatter_activate(
+ w: &mut dyn std::io::Write,
+ _now: &mut DeferredNow,
+ record: &Record,
+) -> Result<(), std::io::Error> {
+ let level = record.level();
+
+ write!(
+ w,
+ "⭐ {} [activate] [{}] {}",
+ make_emoji(level),
+ style(level, level.to_string()),
+ record.args()
+ )
+}
+
+pub fn logger_formatter_wait(
+ w: &mut dyn std::io::Write,
+ _now: &mut DeferredNow,
+ record: &Record,
+) -> Result<(), std::io::Error> {
+ let level = record.level();
+
+ write!(
+ w,
+ "👀 {} [wait] [{}] {}",
+ make_emoji(level),
+ style(level, level.to_string()),
+ record.args()
+ )
+}
+
+pub fn logger_formatter_deploy(
+ w: &mut dyn std::io::Write,
+ _now: &mut DeferredNow,
+ record: &Record,
+) -> Result<(), std::io::Error> {
+ let level = record.level();
+
+ write!(
+ w,
+ "🚀 {} [deploy] [{}] {}",
+ make_emoji(level),
+ style(level, level.to_string()),
+ record.args()
+ )
+}
+
+pub enum LoggerType {
+ Deploy,
+ Activate,
+ Wait,
+}
+
+pub fn init_logger(
+ debug_logs: bool,
+ log_dir: Option<&str>,
+ logger_type: LoggerType,
+) -> Result<(), FlexiLoggerError> {
+ let logger_formatter = match logger_type {
+ LoggerType::Deploy => logger_formatter_deploy,
+ LoggerType::Activate => logger_formatter_activate,
+ LoggerType::Wait => logger_formatter_wait,
+ };
+
+ if let Some(log_dir) = log_dir {
+ let mut logger = Logger::with_env_or_str("debug")
+ .log_to_file()
+ .format_for_stderr(logger_formatter)
+ .set_palette("196;208;51;7;8".to_string())
+ .directory(log_dir)
+ .duplicate_to_stderr(match debug_logs {
+ true => Duplicate::Debug,
+ false => Duplicate::Info,
+ })
+ .print_message();
+
+ match logger_type {
+ LoggerType::Activate => logger = logger.discriminant("activate"),
+ LoggerType::Wait => logger = logger.discriminant("wait"),
+ LoggerType::Deploy => (),
+ }
+
+ logger.start()?;
+ } else {
+ Logger::with_env_or_str(match debug_logs {
+ true => "debug",
+ false => "info",
+ })
+ .log_target(LogTarget::StdErr)
+ .format(logger_formatter)
+ .set_palette("196;208;51;7;8".to_string())
+ .start()?;
+ }
+
+ Ok(())
+}
+
+pub mod data;
+pub mod deploy;
+pub mod push;
+
+#[derive(Debug)]
+pub struct CmdOverrides {
+ pub ssh_user: Option,
+ pub profile_user: Option,
+ pub ssh_opts: Option,
+ pub fast_connection: Option,
+ pub auto_rollback: Option,
+ pub hostname: Option,
+ pub magic_rollback: Option,
+ pub temp_path: Option,
+ pub confirm_timeout: Option,
+}
+
+#[derive(PartialEq, Debug)]
+pub struct DeployFlake<'a> {
+ pub repo: &'a str,
+ pub node: Option,
+ pub profile: Option,
+}
+
+#[derive(Error, Debug)]
+pub enum ParseFlakeError {
+ #[error("The given path was too long, did you mean to put something in quotes?")]
+ PathTooLong,
+ #[error("Unrecognized node or token encountered")]
+ Unrecognized,
+}
+pub fn parse_flake(flake: &str) -> Result {
+ let flake_fragment_start = flake.find('#');
+ let (repo, maybe_fragment) = match flake_fragment_start {
+ Some(s) => (&flake[..s], Some(&flake[s + 1..])),
+ None => (flake, None),
+ };
+
+ let mut node: Option = None;
+ let mut profile: Option = None;
+
+ if let Some(fragment) = maybe_fragment {
+ let ast = rnix::parse(fragment);
+
+ let first_child = match ast.root().node().first_child() {
+ Some(x) => x,
+ None => {
+ return Ok(DeployFlake {
+ repo,
+ node: None,
+ profile: None,
+ })
+ }
+ };
+
+ let mut node_over = false;
+
+ for entry in first_child.children_with_tokens() {
+ let x: Option = match (entry.kind(), node_over) {
+ (TOKEN_DOT, false) => {
+ node_over = true;
+ None
+ }
+ (TOKEN_DOT, true) => {
+ return Err(ParseFlakeError::PathTooLong);
+ }
+ (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()),
+ (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()),
+ (NODE_STRING, _) => {
+ let c = entry
+ .into_node()
+ .unwrap()
+ .children_with_tokens()
+ .nth(1)
+ .unwrap();
+
+ Some(c.into_token().unwrap().text().to_string())
+ }
+ _ => return Err(ParseFlakeError::Unrecognized),
+ };
+
+ if !node_over {
+ node = x;
+ } else {
+ profile = x;
+ }
+ }
+ }
+
+ Ok(DeployFlake {
+ repo,
+ node,
+ profile,
+ })
+}
+
+#[test]
+fn test_parse_flake() {
+ assert_eq!(
+ parse_flake("../deploy/examples/system").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: None,
+ profile: None,
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system#").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: None,
+ profile: None,
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: Some("computer".to_string()),
+ profile: Some("something.nix".to_string()),
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: Some("example.com".to_string()),
+ profile: Some("system".to_string()),
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system#example").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: Some("example".to_string()),
+ profile: None
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system#example.system").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: Some("example".to_string()),
+ profile: Some("system".to_string())
+ }
+ );
+
+ assert_eq!(
+ parse_flake("../deploy/examples/system").unwrap(),
+ DeployFlake {
+ repo: "../deploy/examples/system",
+ node: None,
+ profile: None,
+ }
+ );
+}
+
+#[derive(Debug, Clone)]
+pub struct DeployData<'a> {
+ pub node_name: &'a str,
+ pub node: &'a data::Node,
+ pub profile_name: &'a str,
+ pub profile: &'a data::Profile,
+
+ pub cmd_overrides: &'a CmdOverrides,
+
+ pub merged_settings: data::GenericSettings,
+
+ pub debug_logs: bool,
+ pub log_dir: Option<&'a str>,
+}
+
+#[derive(Debug)]
+pub struct DeployDefs {
+ pub ssh_user: String,
+ pub profile_user: String,
+ pub profile_path: String,
+ pub sudo: Option,
+}
+
+#[derive(Error, Debug)]
+pub enum DeployDataDefsError {
+ #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")]
+ NoProfileUser(String, String),
+}
+
+impl<'a> DeployData<'a> {
+ pub fn defs(&'a self) -> Result {
+ let ssh_user = match self.merged_settings.ssh_user {
+ Some(ref u) => u.clone(),
+ None => whoami::username(),
+ };
+
+ let profile_user = match self.merged_settings.user {
+ Some(ref x) => x.clone(),
+ None => match self.merged_settings.ssh_user {
+ Some(ref x) => x.clone(),
+ None => {
+ return Err(DeployDataDefsError::NoProfileUser(
+ self.profile_name.to_owned(),
+ self.node_name.to_owned(),
+ ))
+ }
+ },
+ };
+
+ let profile_path = match self.profile.profile_settings.profile_path {
+ None => match &profile_user[..] {
+ "root" => format!("/nix/var/nix/profiles/{}", self.profile_name),
+ _ => format!(
+ "/nix/var/nix/profiles/per-user/{}/{}",
+ profile_user, self.profile_name
+ ),
+ },
+ Some(ref x) => x.clone(),
+ };
+
+ let sudo: Option = match self.merged_settings.user {
+ Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)),
+ _ => None,
+ };
+
+ Ok(DeployDefs {
+ ssh_user,
+ profile_user,
+ profile_path,
+ sudo,
+ })
+ }
+}
+
+pub fn make_deploy_data<'a, 's>(
+ top_settings: &'s data::GenericSettings,
+ node: &'a data::Node,
+ node_name: &'a str,
+ profile: &'a data::Profile,
+ profile_name: &'a str,
+ cmd_overrides: &'a CmdOverrides,
+ debug_logs: bool,
+ log_dir: Option<&'a str>,
+) -> DeployData<'a> {
+ let mut merged_settings = profile.generic_settings.clone();
+ merged_settings.merge(node.generic_settings.clone());
+ merged_settings.merge(top_settings.clone());
+
+ if cmd_overrides.ssh_user.is_some() {
+ merged_settings.ssh_user = cmd_overrides.ssh_user.clone();
+ }
+ if cmd_overrides.profile_user.is_some() {
+ merged_settings.user = cmd_overrides.profile_user.clone();
+ }
+ if let Some(ref ssh_opts) = cmd_overrides.ssh_opts {
+ merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect();
+ }
+ if let Some(fast_connection) = cmd_overrides.fast_connection {
+ merged_settings.fast_connection = Some(fast_connection);
+ }
+ if let Some(auto_rollback) = cmd_overrides.auto_rollback {
+ merged_settings.auto_rollback = Some(auto_rollback);
+ }
+ if let Some(magic_rollback) = cmd_overrides.magic_rollback {
+ merged_settings.magic_rollback = Some(magic_rollback);
+ }
+
+ DeployData {
+ profile,
+ profile_name,
+ node,
+ node_name,
+
+ cmd_overrides,
+
+ merged_settings,
+
+ debug_logs,
+ log_dir,
+ }
+}
+
+#[derive(Error, Debug)]
+pub enum DeployPathToActivatePathError {
+ #[error("Deploy path did not have a parent directory")]
+ PathTooShort,
+ #[error("Deploy path was not valid utf8")]
+ InvalidUtf8,
+}
diff --git a/src/main.rs b/src/main.rs
deleted file mode 100644
index 1544fed..0000000
--- a/src/main.rs
+++ /dev/null
@@ -1,564 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use std::collections::HashMap;
-use std::io::{stdin, stdout, Write};
-
-use clap::Clap;
-
-use std::process::Stdio;
-use tokio::process::Command;
-
-use thiserror::Error;
-
-#[macro_use]
-extern crate log;
-
-#[macro_use]
-extern crate serde_derive;
-
-#[macro_use]
-mod utils;
-
-/// Simple Rust rewrite of a simple Nix Flake deployment tool
-#[derive(Clap, Debug)]
-#[clap(version = "1.0", author = "Serokell ")]
-struct Opts {
- /// The flake to deploy
- #[clap(default_value = ".")]
- flake: String,
- /// Check signatures when using `nix copy`
- #[clap(short, long)]
- checksigs: bool,
- /// Use the interactive prompt before deployment
- #[clap(short, long)]
- interactive: bool,
- /// Extra arguments to be passed to nix build
- extra_build_args: Vec,
-
- /// Print debug logs to output
- #[clap(short, long)]
- debug_logs: bool,
- /// Directory to print logs to (including the background activation process)
- #[clap(long)]
- log_dir: Option,
-
- /// Keep the build outputs of each built profile
- #[clap(short, long)]
- keep_result: bool,
- /// Location to keep outputs from built profiles in
- #[clap(short, long)]
- result_path: Option,
-
- /// Skip the automatic pre-build checks
- #[clap(short, long)]
- skip_checks: bool,
-
- /// Override the SSH user with the given value
- #[clap(long)]
- ssh_user: Option,
- /// Override the profile user with the given value
- #[clap(long)]
- profile_user: Option,
- /// Override the SSH options used
- #[clap(long)]
- ssh_opts: Option,
- /// Override if the connecting to the target node should be considered fast
- #[clap(long)]
- fast_connection: Option,
- /// Override if a rollback should be attempted if activation fails
- #[clap(long)]
- auto_rollback: Option,
- /// Override hostname used for the node
- #[clap(long)]
- hostname: Option,
- /// Make activation wait for confirmation, or roll back after a period of time
- #[clap(long)]
- magic_rollback: Option,
- /// How long activation should wait for confirmation (if using magic-rollback)
- #[clap(long)]
- confirm_timeout: Option,
- /// Where to store temporary files (only used by magic-rollback)
- #[clap(long)]
- temp_path: Option,
-}
-
-/// Returns if the available Nix installation supports flakes
-async fn test_flake_support() -> Result {
- debug!("Checking for flake support");
-
- Ok(Command::new("nix")
- .arg("eval")
- .arg("--expr")
- .arg("builtins.getFlake")
- // This will error on some machines "intentionally", and we don't really need that printing
- .stdout(Stdio::null())
- .stderr(Stdio::null())
- .status()
- .await?
- .success())
-}
-
-#[derive(Error, Debug)]
-enum CheckDeploymentError {
- #[error("Failed to execute Nix checking command: {0}")]
- NixCheckError(#[from] std::io::Error),
- #[error("Nix checking command resulted in a bad exit code: {0:?}")]
- NixCheckExitError(Option),
-}
-
-async fn check_deployment(
- supports_flakes: bool,
- repo: &str,
- extra_build_args: &[String],
-) -> Result<(), CheckDeploymentError> {
- info!("Running checks for flake in {}", repo);
-
- let mut c = match supports_flakes {
- true => Command::new("nix"),
- false => Command::new("nix-build"),
- };
-
- let mut check_command = match supports_flakes {
- true => {
- c.arg("flake")
- .arg("check")
- .arg(repo)
- }
- false => {
- c.arg("-E")
- .arg("--no-out-link")
- .arg(format!("let r = import {}/.; x = (if builtins.isFunction r then (r {{}}) else r); in if x ? checks then x.checks.${{builtins.currentSystem}} else {{}}", repo))
- }
- };
-
- for extra_arg in extra_build_args {
- check_command = check_command.arg(extra_arg);
- }
-
- let check_status = check_command.status().await?;
-
- match check_status.code() {
- Some(0) => (),
- a => return Err(CheckDeploymentError::NixCheckExitError(a)),
- };
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-enum GetDeploymentDataError {
- #[error("Failed to execute nix eval command: {0}")]
- NixEvalError(std::io::Error),
- #[error("Failed to read output from evaluation: {0}")]
- NixEvalOutError(std::io::Error),
- #[error("Evaluation resulted in a bad exit code: {0:?}")]
- NixEvalExitError(Option),
- #[error("Error converting evaluation output to utf8: {0}")]
- DecodeUtf8Error(#[from] std::string::FromUtf8Error),
- #[error("Error decoding the JSON from evaluation: {0}")]
- DecodeJsonError(#[from] serde_json::error::Error),
-}
-
-/// Evaluates the Nix in the given `repo` and return the processed Data from it
-async fn get_deployment_data(
- supports_flakes: bool,
- repo: &str,
- extra_build_args: &[String],
-) -> Result {
- info!("Evaluating flake in {}", repo);
-
- let mut c = match supports_flakes {
- true => Command::new("nix"),
- false => Command::new("nix-instantiate"),
- };
-
- let mut build_command = match supports_flakes {
- true => {
- c.arg("eval")
- .arg("--json")
- .arg(format!("{}#deploy", repo))
- }
- false => {
- c
- .arg("--strict")
- .arg("--read-write-mode")
- .arg("--json")
- .arg("--eval")
- .arg("-E")
- .arg(format!("let r = import {}/.; in if builtins.isFunction r then (r {{}}).deploy else r.deploy", repo))
- }
- };
-
- for extra_arg in extra_build_args {
- build_command = build_command.arg(extra_arg);
- }
-
- let build_child = build_command
- .stdout(Stdio::piped())
- .spawn()
- .map_err(GetDeploymentDataError::NixEvalError)?;
-
- let build_output = build_child
- .wait_with_output()
- .await
- .map_err(GetDeploymentDataError::NixEvalOutError)?;
-
- match build_output.status.code() {
- Some(0) => (),
- a => return Err(GetDeploymentDataError::NixEvalExitError(a)),
- };
-
- let data_json = String::from_utf8(build_output.stdout)?;
-
- Ok(serde_json::from_str(&data_json)?)
-}
-
-#[derive(Serialize)]
-struct PromptPart<'a> {
- user: &'a str,
- ssh_user: &'a str,
- path: &'a str,
- hostname: &'a str,
- ssh_opts: &'a [String],
-}
-
-fn print_deployment(
- parts: &[(utils::DeployData, utils::DeployDefs)],
-) -> Result<(), toml::ser::Error> {
- let mut part_map: HashMap> = HashMap::new();
-
- for (data, defs) in parts {
- part_map
- .entry(data.node_name.to_string())
- .or_insert(HashMap::new())
- .insert(
- data.profile_name.to_string(),
- PromptPart {
- user: &defs.profile_user,
- ssh_user: &defs.ssh_user,
- path: &data.profile.profile_settings.path,
- hostname: &data.node.node_settings.hostname,
- ssh_opts: &data.merged_settings.ssh_opts,
- },
- );
- }
-
- let toml = toml::to_string(&part_map)?;
-
- info!("The following profiles are going to be deployed:\n{}", toml);
-
- Ok(())
-}
-#[derive(Error, Debug)]
-enum PromptDeploymentError {
- #[error("Failed to make printable TOML of deployment: {0}")]
- TomlFormat(#[from] toml::ser::Error),
- #[error("Failed to flush stdout prior to query: {0}")]
- StdoutFlush(std::io::Error),
- #[error("Failed to read line from stdin: {0}")]
- StdinRead(std::io::Error),
- #[error("User cancelled deployment")]
- Cancelled,
-}
-
-fn prompt_deployment(
- parts: &[(utils::DeployData, utils::DeployDefs)],
-) -> Result<(), PromptDeploymentError> {
- print_deployment(parts)?;
-
- info!("Are you sure you want to deploy these profiles?");
- print!("> ");
-
- stdout()
- .flush()
- .map_err(PromptDeploymentError::StdoutFlush)?;
-
- let mut s = String::new();
- stdin()
- .read_line(&mut s)
- .map_err(PromptDeploymentError::StdinRead)?;
-
- if !yn::yes(&s) {
- if yn::is_somewhat_yes(&s) {
- info!("Sounds like you might want to continue, to be more clear please just say \"yes\". Do you want to deploy these profiles?");
- print!("> ");
-
- stdout()
- .flush()
- .map_err(PromptDeploymentError::StdoutFlush)?;
-
- let mut s = String::new();
- stdin()
- .read_line(&mut s)
- .map_err(PromptDeploymentError::StdinRead)?;
-
- if !yn::yes(&s) {
- return Err(PromptDeploymentError::Cancelled);
- }
- } else {
- if !yn::no(&s) {
- info!(
- "That was unclear, but sounded like a no to me. Please say \"yes\" or \"no\" to be more clear."
- );
- }
-
- return Err(PromptDeploymentError::Cancelled);
- }
- }
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-enum RunDeployError {
- #[error("Failed to deploy profile: {0}")]
- DeployProfileError(#[from] utils::deploy::DeployProfileError),
- #[error("Failed to push profile: {0}")]
- PushProfileError(#[from] utils::push::PushProfileError),
- #[error("No profile named `{0}` was found")]
- ProfileNotFound(String),
- #[error("No node named `{0}` was found")]
- NodeNotFound(String),
- #[error("Profile was provided without a node name")]
- ProfileWithoutNode,
- #[error("Error processing deployment definitions: {0}")]
- DeployDataDefsError(#[from] utils::DeployDataDefsError),
- #[error("Failed to make printable TOML of deployment: {0}")]
- TomlFormat(#[from] toml::ser::Error),
- #[error("{0}")]
- PromptDeploymentError(#[from] PromptDeploymentError),
-}
-
-async fn run_deploy(
- deploy_flake: utils::DeployFlake<'_>,
- data: utils::data::Data,
- supports_flakes: bool,
- check_sigs: bool,
- interactive: bool,
- cmd_overrides: utils::CmdOverrides,
- keep_result: bool,
- result_path: Option<&str>,
- extra_build_args: &[String],
- debug_logs: bool,
- log_dir: Option,
-) -> Result<(), RunDeployError> {
- let to_deploy: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
- match (&deploy_flake.node, &deploy_flake.profile) {
- (Some(node_name), Some(profile_name)) => {
- let node = match data.nodes.get(node_name) {
- Some(x) => x,
- None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
- };
- let profile = match node.node_settings.profiles.get(profile_name) {
- Some(x) => x,
- None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
- };
-
- vec![((node_name, node), (profile_name, profile))]
- }
- (Some(node_name), None) => {
- let node = match data.nodes.get(node_name) {
- Some(x) => x,
- None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
- };
-
- let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
-
- for profile_name in [
- node.node_settings.profiles_order.iter().collect(),
- node.node_settings.profiles.keys().collect::>(),
- ]
- .concat()
- {
- let profile = match node.node_settings.profiles.get(profile_name) {
- Some(x) => x,
- None => {
- return Err(RunDeployError::ProfileNotFound(profile_name.to_owned()))
- }
- };
-
- if !profiles_list.iter().any(|(n, _)| n == profile_name) {
- profiles_list.push((&profile_name, profile));
- }
- }
-
- profiles_list
- .into_iter()
- .map(|x| ((node_name.as_str(), node), x))
- .collect()
- }
- (None, None) => {
- let mut l = Vec::new();
-
- for (node_name, node) in &data.nodes {
- let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
-
- for profile_name in [
- node.node_settings.profiles_order.iter().collect(),
- node.node_settings.profiles.keys().collect::>(),
- ]
- .concat()
- {
- let profile = match node.node_settings.profiles.get(profile_name) {
- Some(x) => x,
- None => {
- return Err(RunDeployError::ProfileNotFound(
- profile_name.to_owned(),
- ))
- }
- };
-
- if !profiles_list.iter().any(|(n, _)| n == profile_name) {
- profiles_list.push((&profile_name, profile));
- }
- }
-
- let ll: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
- profiles_list
- .into_iter()
- .map(|x| ((node_name.as_str(), node), x))
- .collect();
-
- l.extend(ll);
- }
-
- l
- }
- (None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
- };
-
- let mut parts: Vec<(utils::DeployData, utils::DeployDefs)> = Vec::new();
-
- for ((node_name, node), (profile_name, profile)) in to_deploy {
- let deploy_data = utils::make_deploy_data(
- &data.generic_settings,
- node,
- node_name,
- profile,
- profile_name,
- &cmd_overrides,
- debug_logs,
- log_dir.as_deref(),
- );
-
- let deploy_defs = deploy_data.defs()?;
-
- parts.push((deploy_data, deploy_defs));
- }
-
- if interactive {
- prompt_deployment(&parts[..])?;
- } else {
- print_deployment(&parts[..])?;
- }
-
- for (deploy_data, deploy_defs) in &parts {
- utils::push::push_profile(
- supports_flakes,
- check_sigs,
- deploy_flake.repo,
- &deploy_data,
- &deploy_defs,
- keep_result,
- result_path,
- extra_build_args,
- )
- .await?;
- }
-
- for (deploy_data, deploy_defs) in &parts {
- utils::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
- }
-
- Ok(())
-}
-
-#[derive(Error, Debug)]
-enum RunError {
- #[error("Failed to deploy profile: {0}")]
- DeployProfileError(#[from] utils::deploy::DeployProfileError),
- #[error("Failed to push profile: {0}")]
- PushProfileError(#[from] utils::push::PushProfileError),
- #[error("Failed to test for flake support: {0}")]
- FlakeTestError(std::io::Error),
- #[error("Failed to check deployment: {0}")]
- CheckDeploymentError(#[from] CheckDeploymentError),
- #[error("Failed to evaluate deployment data: {0}")]
- GetDeploymentDataError(#[from] GetDeploymentDataError),
- #[error("Error parsing flake: {0}")]
- ParseFlakeError(#[from] utils::ParseFlakeError),
- #[error("Error initiating logger: {0}")]
- LoggerError(#[from] flexi_logger::FlexiLoggerError),
- #[error("{0}")]
- RunDeployError(#[from] RunDeployError),
-}
-
-async fn run() -> Result<(), RunError> {
- let opts: Opts = Opts::parse();
-
- utils::init_logger(
- opts.debug_logs,
- opts.log_dir.as_deref(),
- utils::LoggerType::Deploy,
- )?;
-
- let deploy_flake = utils::parse_flake(opts.flake.as_str())?;
-
- let cmd_overrides = utils::CmdOverrides {
- ssh_user: opts.ssh_user,
- profile_user: opts.profile_user,
- ssh_opts: opts.ssh_opts,
- fast_connection: opts.fast_connection,
- auto_rollback: opts.auto_rollback,
- hostname: opts.hostname,
- magic_rollback: opts.magic_rollback,
- temp_path: opts.temp_path,
- confirm_timeout: opts.confirm_timeout,
- };
-
- let supports_flakes = test_flake_support()
- .await
- .map_err(RunError::FlakeTestError)?;
-
- if !supports_flakes {
- warn!("A Nix version without flakes support was detected, support for this is work in progress");
- }
-
- if !opts.skip_checks {
- check_deployment(supports_flakes, deploy_flake.repo, &opts.extra_build_args).await?;
- }
-
- let data =
- get_deployment_data(supports_flakes, deploy_flake.repo, &opts.extra_build_args).await?;
-
- let result_path = opts.result_path.as_deref();
-
- run_deploy(
- deploy_flake,
- data,
- supports_flakes,
- opts.checksigs,
- opts.interactive,
- cmd_overrides,
- opts.keep_result,
- result_path,
- &opts.extra_build_args,
- opts.debug_logs,
- opts.log_dir,
- )
- .await?;
-
- Ok(())
-}
-
-#[tokio::main]
-async fn main() -> Result<(), Box> {
- match run().await {
- Ok(()) => (),
- Err(err) => good_panic!("{}", err),
- }
-
- Ok(())
-}
diff --git a/src/push.rs b/src/push.rs
new file mode 100644
index 0000000..503e062
--- /dev/null
+++ b/src/push.rs
@@ -0,0 +1,174 @@
+// SPDX-FileCopyrightText: 2020 Serokell
+//
+// SPDX-License-Identifier: MPL-2.0
+
+use std::process::Stdio;
+use tokio::process::Command;
+use std::path::Path;
+
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum PushProfileError {
+ #[error("Failed to calculate activate bin path from deploy bin path: {0}")]
+ DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
+ #[error("Failed to run Nix build command: {0}")]
+ BuildError(std::io::Error),
+ #[error("Nix build command resulted in a bad exit code: {0:?}")]
+ BuildExitError(Option),
+ #[error("Activation script deploy-rs-activate does not exist in profile.\n\
+ Did you forget to use deploy-rs#lib.<...>.activate.<...> on your profile path?")]
+ DeployRsActivateDoesntExist,
+ #[error("Activation script activate-rs does not exist in profile.\n\
+ Is there a mismatch in deploy-rs used in the flake you're deploying and deploy-rs command you're running?")]
+ ActivateRsDoesntExist,
+ #[error("Failed to run Nix sign command: {0}")]
+ SignError(std::io::Error),
+ #[error("Nix sign command resulted in a bad exit code: {0:?}")]
+ SignExitError(Option),
+ #[error("Failed to run Nix copy command: {0}")]
+ CopyError(std::io::Error),
+ #[error("Nix copy command resulted in a bad exit code: {0:?}")]
+ CopyExitError(Option),
+}
+
+pub async fn push_profile(
+ supports_flakes: bool,
+ check_sigs: bool,
+ repo: &str,
+ deploy_data: &super::DeployData<'_>,
+ deploy_defs: &super::DeployDefs,
+ keep_result: bool,
+ result_path: Option<&str>,
+ extra_build_args: &[String],
+) -> Result<(), PushProfileError> {
+ info!(
+ "Building profile `{}` for node `{}`",
+ deploy_data.profile_name, deploy_data.node_name
+ );
+
+ let mut build_c = if supports_flakes {
+ Command::new("nix")
+ } else {
+ Command::new("nix-build")
+ };
+
+ let mut build_command = if supports_flakes {
+ build_c.arg("build").arg(format!(
+ "{}#deploy.nodes.\"{}\".profiles.\"{}\".path",
+ repo, deploy_data.node_name, deploy_data.profile_name
+ ))
+ } else {
+ build_c.arg(&repo).arg("-A").arg(format!(
+ "deploy.nodes.\"{}\".profiles.\"{}\".path",
+ deploy_data.node_name, deploy_data.profile_name
+ ))
+ };
+
+ build_command = match (keep_result, supports_flakes) {
+ (true, _) => {
+ let result_path = result_path.unwrap_or("./.deploy-gc");
+
+ build_command.arg("--out-link").arg(format!(
+ "{}/{}/{}",
+ result_path, deploy_data.node_name, deploy_data.profile_name
+ ))
+ }
+ (false, false) => build_command.arg("--no-out-link"),
+ (false, true) => build_command.arg("--no-link"),
+ };
+
+ for extra_arg in extra_build_args {
+ build_command = build_command.arg(extra_arg);
+ }
+
+ let build_exit_status = build_command
+ // Logging should be in stderr, this just stops the store path from printing for no reason
+ .stdout(Stdio::null())
+ .status()
+ .await
+ .map_err(PushProfileError::BuildError)?;
+
+ match build_exit_status.code() {
+ Some(0) => (),
+ a => return Err(PushProfileError::BuildExitError(a)),
+ };
+
+ if ! Path::new(format!("{}/deploy-rs-activate", deploy_data.profile.profile_settings.path).as_str()).exists() {
+ return Err(PushProfileError::DeployRsActivateDoesntExist);
+ }
+
+ if ! Path::new(format!("{}/activate-rs", deploy_data.profile.profile_settings.path).as_str()).exists() {
+ return Err(PushProfileError::ActivateRsDoesntExist);
+ }
+
+
+
+ if let Ok(local_key) = std::env::var("LOCAL_KEY") {
+ info!(
+ "Signing key present! Signing profile `{}` for node `{}`",
+ deploy_data.profile_name, deploy_data.node_name
+ );
+
+ let sign_exit_status = Command::new("nix")
+ .arg("sign-paths")
+ .arg("-r")
+ .arg("-k")
+ .arg(local_key)
+ .arg(&deploy_data.profile.profile_settings.path)
+ .status()
+ .await
+ .map_err(PushProfileError::SignError)?;
+
+ match sign_exit_status.code() {
+ Some(0) => (),
+ a => return Err(PushProfileError::SignExitError(a)),
+ };
+ }
+
+ debug!(
+ "Copying profile `{}` to node `{}`",
+ deploy_data.profile_name, deploy_data.node_name
+ );
+
+ let mut copy_command_ = Command::new("nix");
+ let mut copy_command = copy_command_.arg("copy");
+
+ if deploy_data.merged_settings.fast_connection != Some(true) {
+ copy_command = copy_command.arg("--substitute-on-destination");
+ }
+
+ if !check_sigs {
+ copy_command = copy_command.arg("--no-check-sigs");
+ }
+
+ let ssh_opts_str = deploy_data
+ .merged_settings
+ .ssh_opts
+ // This should provide some extra safety, but it also breaks for some reason, oh well
+ // .iter()
+ // .map(|x| format!("'{}'", x))
+ // .collect::>()
+ .join(" ");
+
+ let hostname = match deploy_data.cmd_overrides.hostname {
+ Some(ref x) => x,
+ None => &deploy_data.node.node_settings.hostname,
+ };
+
+ let copy_exit_status = copy_command
+ .arg("--to")
+ .arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname))
+ .arg(&deploy_data.profile.profile_settings.path)
+ .env("NIX_SSHOPTS", ssh_opts_str)
+ .status()
+ .await
+ .map_err(PushProfileError::CopyError)?;
+
+ match copy_exit_status.code() {
+ Some(0) => (),
+ a => return Err(PushProfileError::CopyExitError(a)),
+ };
+
+ Ok(())
+}
diff --git a/src/utils/data.rs b/src/utils/data.rs
deleted file mode 100644
index f557e41..0000000
--- a/src/utils/data.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use merge::Merge;
-
-use std::collections::HashMap;
-
-#[derive(Deserialize, Debug, Clone, Merge)]
-pub struct GenericSettings {
- #[serde(rename(deserialize = "sshUser"))]
- pub ssh_user: Option,
- pub user: Option,
- #[serde(
- skip_serializing_if = "Vec::is_empty",
- default,
- rename(deserialize = "sshOpts")
- )]
- #[merge(strategy = merge::vec::append)]
- pub ssh_opts: Vec,
- #[serde(rename(deserialize = "fastConnection"))]
- pub fast_connection: Option,
- #[serde(rename(deserialize = "autoRollback"))]
- pub auto_rollback: Option,
- #[serde(rename(deserialize = "confirmTimeout"))]
- pub confirm_timeout: Option,
- #[serde(rename(deserialize = "tempPath"))]
- pub temp_path: Option,
- #[serde(rename(deserialize = "magicRollback"))]
- pub magic_rollback: Option,
-}
-
-#[derive(Deserialize, Debug, Clone)]
-pub struct NodeSettings {
- pub hostname: String,
- pub profiles: HashMap,
- #[serde(
- skip_serializing_if = "Vec::is_empty",
- default,
- rename(deserialize = "profilesOrder")
- )]
- pub profiles_order: Vec,
-}
-
-#[derive(Deserialize, Debug, Clone)]
-pub struct ProfileSettings {
- pub path: String,
- #[serde(rename(deserialize = "profilePath"))]
- pub profile_path: Option,
-}
-
-#[derive(Deserialize, Debug, Clone)]
-pub struct Profile {
- #[serde(flatten)]
- pub profile_settings: ProfileSettings,
- #[serde(flatten)]
- pub generic_settings: GenericSettings,
-}
-
-#[derive(Deserialize, Debug, Clone)]
-pub struct Node {
- #[serde(flatten)]
- pub generic_settings: GenericSettings,
- #[serde(flatten)]
- pub node_settings: NodeSettings,
-}
-
-#[derive(Deserialize, Debug, Clone)]
-pub struct Data {
- #[serde(flatten)]
- pub generic_settings: GenericSettings,
- pub nodes: HashMap,
-}
diff --git a/src/utils/deploy.rs b/src/utils/deploy.rs
deleted file mode 100644
index 3371160..0000000
--- a/src/utils/deploy.rs
+++ /dev/null
@@ -1,296 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-// SPDX-FileCopyrightText: 2020 Andreas Fuchs
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use std::borrow::Cow;
-use tokio::process::Command;
-
-use thiserror::Error;
-
-fn build_activate_command(
- sudo: &Option,
- profile_path: &str,
- closure: &str,
- auto_rollback: bool,
- temp_path: &Cow,
- confirm_timeout: u16,
- magic_rollback: bool,
- debug_logs: bool,
- log_dir: Option<&str>,
-) -> String {
- let mut self_activate_command = format!("{}/activate-rs", closure);
-
- if debug_logs {
- self_activate_command = format!("{} --debug-logs", self_activate_command);
- }
-
- if let Some(log_dir) = log_dir {
- self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
- }
-
- self_activate_command = format!(
- "{} --temp-path '{}' activate '{}' '{}'",
- self_activate_command, temp_path, closure, profile_path
- );
-
- self_activate_command = format!(
- "{} --confirm-timeout {}",
- self_activate_command, confirm_timeout
- );
-
- if magic_rollback {
- self_activate_command = format!("{} --magic-rollback", self_activate_command);
- }
-
- if auto_rollback {
- self_activate_command = format!("{} --auto-rollback", self_activate_command);
- }
-
- if let Some(sudo_cmd) = &sudo {
- self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
- }
-
- self_activate_command
-}
-
-#[test]
-fn test_activation_command_builder() {
- let sudo = Some("sudo -u test".to_string());
- let profile_path = "/blah/profiles/test";
- let closure = "/nix/store/blah/etc";
- let auto_rollback = true;
- let temp_path = &"/tmp".into();
- let confirm_timeout = 30;
- let magic_rollback = true;
- let debug_logs = true;
- let log_dir = Some("/tmp/something.txt");
-
- assert_eq!(
- build_activate_command(
- &sudo,
- profile_path,
- closure,
- auto_rollback,
- temp_path,
- confirm_timeout,
- magic_rollback,
- debug_logs,
- log_dir
- ),
- "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' activate '/nix/store/blah/etc' '/blah/profiles/test' --confirm-timeout 30 --magic-rollback --auto-rollback"
- .to_string(),
- );
-}
-
-fn build_wait_command(
- sudo: &Option,
- closure: &str,
- temp_path: &Cow,
- debug_logs: bool,
- log_dir: Option<&str>,
-) -> String {
- let mut self_activate_command = format!("{}/activate-rs", closure);
-
- if debug_logs {
- self_activate_command = format!("{} --debug-logs", self_activate_command);
- }
-
- if let Some(log_dir) = log_dir {
- self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
- }
-
- self_activate_command = format!(
- "{} --temp-path '{}' wait '{}'",
- self_activate_command, temp_path, closure
- );
-
- if let Some(sudo_cmd) = &sudo {
- self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
- }
-
- self_activate_command
-}
-
-#[test]
-fn test_wait_command_builder() {
- let sudo = Some("sudo -u test".to_string());
- let closure = "/nix/store/blah/etc";
- let temp_path = &"/tmp".into();
- let debug_logs = true;
- let log_dir = Some("/tmp/something.txt");
-
- assert_eq!(
- build_wait_command(
- &sudo,
- closure,
- temp_path,
- debug_logs,
- log_dir
- ),
- "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' wait '/nix/store/blah/etc'"
- .to_string(),
- );
-}
-
-#[derive(Error, Debug)]
-pub enum DeployProfileError {
- #[error("Failed to calculate activate bin path from deploy bin path: {0}")]
- DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
-
- #[error("Failed to spawn activation command over SSH: {0}")]
- SSHSpawnActivateError(std::io::Error),
-
- #[error("Failed to run activation command over SSH: {0}")]
- SSHActivateError(std::io::Error),
- #[error("Activating over SSH resulted in a bad exit code: {0:?}")]
- SSHActivateExitError(Option),
-
- #[error("Failed to run wait command over SSH: {0}")]
- SSHWaitError(std::io::Error),
- #[error("Waiting over SSH resulted in a bad exit code: {0:?}")]
- SSHWaitExitError(Option),
-
- #[error("Failed to run confirmation command over SSH (the server should roll back): {0}")]
- SSHConfirmError(std::io::Error),
- #[error(
- "Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}"
- )]
- SSHConfirmExitError(Option),
-}
-
-pub async fn deploy_profile(
- deploy_data: &super::DeployData<'_>,
- deploy_defs: &super::DeployDefs,
-) -> Result<(), DeployProfileError> {
- info!(
- "Activating profile `{}` for node `{}`",
- deploy_data.profile_name, deploy_data.node_name
- );
-
- let temp_path: Cow = match &deploy_data.merged_settings.temp_path {
- Some(x) => x.into(),
- None => "/tmp".into(),
- };
-
- let confirm_timeout = deploy_data.merged_settings.confirm_timeout.unwrap_or(30);
-
- let magic_rollback = deploy_data.merged_settings.magic_rollback.unwrap_or(true);
-
- let auto_rollback = deploy_data.merged_settings.auto_rollback.unwrap_or(true);
-
- let self_activate_command = build_activate_command(
- &deploy_defs.sudo,
- &deploy_defs.profile_path,
- &deploy_data.profile.profile_settings.path,
- auto_rollback,
- &temp_path,
- confirm_timeout,
- magic_rollback,
- deploy_data.debug_logs,
- deploy_data.log_dir,
- );
-
- debug!("Constructed activation command: {}", self_activate_command);
-
- let self_wait_command = build_wait_command(
- &deploy_defs.sudo,
- &deploy_data.profile.profile_settings.path,
- &temp_path,
- deploy_data.debug_logs,
- deploy_data.log_dir,
- );
-
- debug!("Constructed wait command: {}", self_wait_command);
-
- let hostname = match deploy_data.cmd_overrides.hostname {
- Some(ref x) => x,
- None => &deploy_data.node.node_settings.hostname,
- };
-
- let ssh_addr = format!("ssh://{}@{}", deploy_defs.ssh_user, hostname);
-
- let mut ssh_activate_command_ = Command::new("ssh");
- let ssh_activate_command = ssh_activate_command_.arg(&ssh_addr);
-
- for ssh_opt in &deploy_data.merged_settings.ssh_opts {
- ssh_activate_command.arg(&ssh_opt);
- }
-
- if !magic_rollback {
- let ssh_activate_exit_status = ssh_activate_command
- .arg(self_activate_command)
- .status()
- .await
- .map_err(DeployProfileError::SSHActivateError)?;
-
- match ssh_activate_exit_status.code() {
- Some(0) => (),
- a => return Err(DeployProfileError::SSHActivateExitError(a)),
- };
-
- info!("Success activating, done!");
- } else {
- let ssh_activate = ssh_activate_command
- .arg(self_activate_command)
- .spawn()
- .map_err(DeployProfileError::SSHSpawnActivateError)?;
-
- info!("Creating activation waiter");
-
- let mut ssh_wait_command_ = Command::new("ssh");
- let ssh_wait_command = ssh_wait_command_.arg(&ssh_addr);
-
- for ssh_opt in &deploy_data.merged_settings.ssh_opts {
- ssh_wait_command.arg(ssh_opt);
- }
-
- let ssh_wait_exit_status = ssh_wait_command
- .arg(self_wait_command)
- .status()
- .await
- .map_err(DeployProfileError::SSHWaitError)?;
-
- match ssh_wait_exit_status.code() {
- Some(0) => (),
- a => return Err(DeployProfileError::SSHWaitExitError(a)),
- };
-
- info!("Success activating, attempting to confirm activation");
-
- let mut c = Command::new("ssh");
- let mut ssh_confirm_command = c.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname));
-
- for ssh_opt in &deploy_data.merged_settings.ssh_opts {
- ssh_confirm_command = ssh_confirm_command.arg(ssh_opt);
- }
-
- let lock_path =
- super::make_lock_path(&temp_path, &deploy_data.profile.profile_settings.path);
-
- let mut confirm_command = format!("rm {}", lock_path);
- if let Some(sudo_cmd) = &deploy_defs.sudo {
- confirm_command = format!("{} {}", sudo_cmd, confirm_command);
- }
-
- debug!(
- "Attempting to run command to confirm deployment: {}",
- confirm_command
- );
-
- let ssh_exit_status = ssh_confirm_command
- .arg(confirm_command)
- .status()
- .await
- .map_err(DeployProfileError::SSHConfirmError)?;
-
- match ssh_exit_status.code() {
- Some(0) => (),
- a => return Err(DeployProfileError::SSHConfirmExitError(a)),
- };
-
- info!("Deployment confirmed.");
- }
-
- Ok(())
-}
diff --git a/src/utils/mod.rs b/src/utils/mod.rs
deleted file mode 100644
index bc46f4c..0000000
--- a/src/utils/mod.rs
+++ /dev/null
@@ -1,426 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-// SPDX-FileCopyrightText: 2020 Andreas Fuchs
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use rnix::{types::*, SyntaxKind::*};
-
-use merge::Merge;
-
-use thiserror::Error;
-
-use flexi_logger::*;
-
-#[macro_export]
-macro_rules! good_panic {
- ($($tts:tt)*) => {{
- error!($($tts)*);
- std::process::exit(1);
- }}
-}
-
-pub fn make_lock_path(temp_path: &str, closure: &str) -> String {
- let lock_hash =
- &closure["/nix/store/".len()..closure.find("-").unwrap_or_else(|| closure.len())];
- format!("{}/deploy-rs-canary-{}", temp_path, lock_hash)
-}
-
-fn make_emoji(level: log::Level) -> &'static str {
- match level {
- log::Level::Error => "❌",
- log::Level::Warn => "⚠️",
- log::Level::Info => "ℹ️",
- log::Level::Debug => "❓",
- log::Level::Trace => "🖊️",
- }
-}
-
-pub fn logger_formatter_activate(
- w: &mut dyn std::io::Write,
- _now: &mut DeferredNow,
- record: &Record,
-) -> Result<(), std::io::Error> {
- let level = record.level();
-
- write!(
- w,
- "⭐ {} [activate] [{}] {}",
- make_emoji(level),
- style(level, level.to_string()),
- record.args()
- )
-}
-
-pub fn logger_formatter_wait(
- w: &mut dyn std::io::Write,
- _now: &mut DeferredNow,
- record: &Record,
-) -> Result<(), std::io::Error> {
- let level = record.level();
-
- write!(
- w,
- "👀 {} [wait] [{}] {}",
- make_emoji(level),
- style(level, level.to_string()),
- record.args()
- )
-}
-
-pub fn logger_formatter_deploy(
- w: &mut dyn std::io::Write,
- _now: &mut DeferredNow,
- record: &Record,
-) -> Result<(), std::io::Error> {
- let level = record.level();
-
- write!(
- w,
- "🚀 {} [deploy] [{}] {}",
- make_emoji(level),
- style(level, level.to_string()),
- record.args()
- )
-}
-
-pub enum LoggerType {
- Deploy,
- Activate,
- Wait,
-}
-
-pub fn init_logger(
- debug_logs: bool,
- log_dir: Option<&str>,
- logger_type: LoggerType,
-) -> Result<(), FlexiLoggerError> {
- let logger_formatter = match logger_type {
- LoggerType::Deploy => logger_formatter_deploy,
- LoggerType::Activate => logger_formatter_activate,
- LoggerType::Wait => logger_formatter_wait,
- };
-
- if let Some(log_dir) = log_dir {
- let mut logger = Logger::with_env_or_str("debug")
- .log_to_file()
- .format_for_stderr(logger_formatter)
- .set_palette("196;208;51;7;8".to_string())
- .directory(log_dir)
- .duplicate_to_stderr(match debug_logs {
- true => Duplicate::Debug,
- false => Duplicate::Info,
- })
- .print_message();
-
- match logger_type {
- LoggerType::Activate => logger = logger.discriminant("activate"),
- LoggerType::Wait => logger = logger.discriminant("wait"),
- LoggerType::Deploy => (),
- }
-
- logger.start()?;
- } else {
- Logger::with_env_or_str(match debug_logs {
- true => "debug",
- false => "info",
- })
- .log_target(LogTarget::StdErr)
- .format(logger_formatter)
- .set_palette("196;208;51;7;8".to_string())
- .start()?;
- }
-
- Ok(())
-}
-
-pub mod data;
-pub mod deploy;
-pub mod push;
-
-#[derive(Debug)]
-pub struct CmdOverrides {
- pub ssh_user: Option,
- pub profile_user: Option,
- pub ssh_opts: Option,
- pub fast_connection: Option,
- pub auto_rollback: Option,
- pub hostname: Option,
- pub magic_rollback: Option,
- pub temp_path: Option,
- pub confirm_timeout: Option,
-}
-
-#[derive(PartialEq, Debug)]
-pub struct DeployFlake<'a> {
- pub repo: &'a str,
- pub node: Option,
- pub profile: Option,
-}
-
-#[derive(Error, Debug)]
-pub enum ParseFlakeError {
- #[error("The given path was too long, did you mean to put something in quotes?")]
- PathTooLong,
- #[error("Unrecognized node or token encountered")]
- Unrecognized,
-}
-pub fn parse_flake(flake: &str) -> Result {
- let flake_fragment_start = flake.find('#');
- let (repo, maybe_fragment) = match flake_fragment_start {
- Some(s) => (&flake[..s], Some(&flake[s + 1..])),
- None => (flake, None),
- };
-
- let mut node: Option = None;
- let mut profile: Option = None;
-
- if let Some(fragment) = maybe_fragment {
- let ast = rnix::parse(fragment);
-
- let first_child = match ast.root().node().first_child() {
- Some(x) => x,
- None => {
- return Ok(DeployFlake {
- repo,
- node: None,
- profile: None,
- })
- }
- };
-
- let mut node_over = false;
-
- for entry in first_child.children_with_tokens() {
- let x: Option = match (entry.kind(), node_over) {
- (TOKEN_DOT, false) => {
- node_over = true;
- None
- }
- (TOKEN_DOT, true) => {
- return Err(ParseFlakeError::PathTooLong);
- }
- (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()),
- (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()),
- (NODE_STRING, _) => {
- let c = entry
- .into_node()
- .unwrap()
- .children_with_tokens()
- .nth(1)
- .unwrap();
-
- Some(c.into_token().unwrap().text().to_string())
- }
- _ => return Err(ParseFlakeError::Unrecognized),
- };
-
- if !node_over {
- node = x;
- } else {
- profile = x;
- }
- }
- }
-
- Ok(DeployFlake {
- repo,
- node,
- profile,
- })
-}
-
-#[test]
-fn test_parse_flake() {
- assert_eq!(
- parse_flake("../deploy/examples/system").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: None,
- profile: None,
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system#").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: None,
- profile: None,
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: Some("computer".to_string()),
- profile: Some("something.nix".to_string()),
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: Some("example.com".to_string()),
- profile: Some("system".to_string()),
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system#example").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: Some("example".to_string()),
- profile: None
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system#example.system").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: Some("example".to_string()),
- profile: Some("system".to_string())
- }
- );
-
- assert_eq!(
- parse_flake("../deploy/examples/system").unwrap(),
- DeployFlake {
- repo: "../deploy/examples/system",
- node: None,
- profile: None,
- }
- );
-}
-
-#[derive(Debug, Clone)]
-pub struct DeployData<'a> {
- pub node_name: &'a str,
- pub node: &'a data::Node,
- pub profile_name: &'a str,
- pub profile: &'a data::Profile,
-
- pub cmd_overrides: &'a CmdOverrides,
-
- pub merged_settings: data::GenericSettings,
-
- pub debug_logs: bool,
- pub log_dir: Option<&'a str>,
-}
-
-#[derive(Debug)]
-pub struct DeployDefs {
- pub ssh_user: String,
- pub profile_user: String,
- pub profile_path: String,
- pub sudo: Option,
-}
-
-#[derive(Error, Debug)]
-pub enum DeployDataDefsError {
- #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")]
- NoProfileUser(String, String),
-}
-
-impl<'a> DeployData<'a> {
- pub fn defs(&'a self) -> Result {
- let ssh_user = match self.merged_settings.ssh_user {
- Some(ref u) => u.clone(),
- None => whoami::username(),
- };
-
- let profile_user = match self.merged_settings.user {
- Some(ref x) => x.clone(),
- None => match self.merged_settings.ssh_user {
- Some(ref x) => x.clone(),
- None => {
- return Err(DeployDataDefsError::NoProfileUser(
- self.profile_name.to_owned(),
- self.node_name.to_owned(),
- ))
- }
- },
- };
-
- let profile_path = match self.profile.profile_settings.profile_path {
- None => match &profile_user[..] {
- "root" => format!("/nix/var/nix/profiles/{}", self.profile_name),
- _ => format!(
- "/nix/var/nix/profiles/per-user/{}/{}",
- profile_user, self.profile_name
- ),
- },
- Some(ref x) => x.clone(),
- };
-
- let sudo: Option = match self.merged_settings.user {
- Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)),
- _ => None,
- };
-
- Ok(DeployDefs {
- ssh_user,
- profile_user,
- profile_path,
- sudo,
- })
- }
-}
-
-pub fn make_deploy_data<'a, 's>(
- top_settings: &'s data::GenericSettings,
- node: &'a data::Node,
- node_name: &'a str,
- profile: &'a data::Profile,
- profile_name: &'a str,
- cmd_overrides: &'a CmdOverrides,
- debug_logs: bool,
- log_dir: Option<&'a str>,
-) -> DeployData<'a> {
- let mut merged_settings = profile.generic_settings.clone();
- merged_settings.merge(node.generic_settings.clone());
- merged_settings.merge(top_settings.clone());
-
- if cmd_overrides.ssh_user.is_some() {
- merged_settings.ssh_user = cmd_overrides.ssh_user.clone();
- }
- if cmd_overrides.profile_user.is_some() {
- merged_settings.user = cmd_overrides.profile_user.clone();
- }
- if let Some(ref ssh_opts) = cmd_overrides.ssh_opts {
- merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect();
- }
- if let Some(fast_connection) = cmd_overrides.fast_connection {
- merged_settings.fast_connection = Some(fast_connection);
- }
- if let Some(auto_rollback) = cmd_overrides.auto_rollback {
- merged_settings.auto_rollback = Some(auto_rollback);
- }
- if let Some(magic_rollback) = cmd_overrides.magic_rollback {
- merged_settings.magic_rollback = Some(magic_rollback);
- }
-
- DeployData {
- profile,
- profile_name,
- node,
- node_name,
-
- cmd_overrides,
-
- merged_settings,
-
- debug_logs,
- log_dir,
- }
-}
-
-#[derive(Error, Debug)]
-pub enum DeployPathToActivatePathError {
- #[error("Deploy path did not have a parent directory")]
- PathTooShort,
- #[error("Deploy path was not valid utf8")]
- InvalidUtf8,
-}
diff --git a/src/utils/push.rs b/src/utils/push.rs
deleted file mode 100644
index 503e062..0000000
--- a/src/utils/push.rs
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-FileCopyrightText: 2020 Serokell
-//
-// SPDX-License-Identifier: MPL-2.0
-
-use std::process::Stdio;
-use tokio::process::Command;
-use std::path::Path;
-
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum PushProfileError {
- #[error("Failed to calculate activate bin path from deploy bin path: {0}")]
- DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
- #[error("Failed to run Nix build command: {0}")]
- BuildError(std::io::Error),
- #[error("Nix build command resulted in a bad exit code: {0:?}")]
- BuildExitError(Option),
- #[error("Activation script deploy-rs-activate does not exist in profile.\n\
- Did you forget to use deploy-rs#lib.<...>.activate.<...> on your profile path?")]
- DeployRsActivateDoesntExist,
- #[error("Activation script activate-rs does not exist in profile.\n\
- Is there a mismatch in deploy-rs used in the flake you're deploying and deploy-rs command you're running?")]
- ActivateRsDoesntExist,
- #[error("Failed to run Nix sign command: {0}")]
- SignError(std::io::Error),
- #[error("Nix sign command resulted in a bad exit code: {0:?}")]
- SignExitError(Option),
- #[error("Failed to run Nix copy command: {0}")]
- CopyError(std::io::Error),
- #[error("Nix copy command resulted in a bad exit code: {0:?}")]
- CopyExitError(Option),
-}
-
-pub async fn push_profile(
- supports_flakes: bool,
- check_sigs: bool,
- repo: &str,
- deploy_data: &super::DeployData<'_>,
- deploy_defs: &super::DeployDefs,
- keep_result: bool,
- result_path: Option<&str>,
- extra_build_args: &[String],
-) -> Result<(), PushProfileError> {
- info!(
- "Building profile `{}` for node `{}`",
- deploy_data.profile_name, deploy_data.node_name
- );
-
- let mut build_c = if supports_flakes {
- Command::new("nix")
- } else {
- Command::new("nix-build")
- };
-
- let mut build_command = if supports_flakes {
- build_c.arg("build").arg(format!(
- "{}#deploy.nodes.\"{}\".profiles.\"{}\".path",
- repo, deploy_data.node_name, deploy_data.profile_name
- ))
- } else {
- build_c.arg(&repo).arg("-A").arg(format!(
- "deploy.nodes.\"{}\".profiles.\"{}\".path",
- deploy_data.node_name, deploy_data.profile_name
- ))
- };
-
- build_command = match (keep_result, supports_flakes) {
- (true, _) => {
- let result_path = result_path.unwrap_or("./.deploy-gc");
-
- build_command.arg("--out-link").arg(format!(
- "{}/{}/{}",
- result_path, deploy_data.node_name, deploy_data.profile_name
- ))
- }
- (false, false) => build_command.arg("--no-out-link"),
- (false, true) => build_command.arg("--no-link"),
- };
-
- for extra_arg in extra_build_args {
- build_command = build_command.arg(extra_arg);
- }
-
- let build_exit_status = build_command
- // Logging should be in stderr, this just stops the store path from printing for no reason
- .stdout(Stdio::null())
- .status()
- .await
- .map_err(PushProfileError::BuildError)?;
-
- match build_exit_status.code() {
- Some(0) => (),
- a => return Err(PushProfileError::BuildExitError(a)),
- };
-
- if ! Path::new(format!("{}/deploy-rs-activate", deploy_data.profile.profile_settings.path).as_str()).exists() {
- return Err(PushProfileError::DeployRsActivateDoesntExist);
- }
-
- if ! Path::new(format!("{}/activate-rs", deploy_data.profile.profile_settings.path).as_str()).exists() {
- return Err(PushProfileError::ActivateRsDoesntExist);
- }
-
-
-
- if let Ok(local_key) = std::env::var("LOCAL_KEY") {
- info!(
- "Signing key present! Signing profile `{}` for node `{}`",
- deploy_data.profile_name, deploy_data.node_name
- );
-
- let sign_exit_status = Command::new("nix")
- .arg("sign-paths")
- .arg("-r")
- .arg("-k")
- .arg(local_key)
- .arg(&deploy_data.profile.profile_settings.path)
- .status()
- .await
- .map_err(PushProfileError::SignError)?;
-
- match sign_exit_status.code() {
- Some(0) => (),
- a => return Err(PushProfileError::SignExitError(a)),
- };
- }
-
- debug!(
- "Copying profile `{}` to node `{}`",
- deploy_data.profile_name, deploy_data.node_name
- );
-
- let mut copy_command_ = Command::new("nix");
- let mut copy_command = copy_command_.arg("copy");
-
- if deploy_data.merged_settings.fast_connection != Some(true) {
- copy_command = copy_command.arg("--substitute-on-destination");
- }
-
- if !check_sigs {
- copy_command = copy_command.arg("--no-check-sigs");
- }
-
- let ssh_opts_str = deploy_data
- .merged_settings
- .ssh_opts
- // This should provide some extra safety, but it also breaks for some reason, oh well
- // .iter()
- // .map(|x| format!("'{}'", x))
- // .collect::>()
- .join(" ");
-
- let hostname = match deploy_data.cmd_overrides.hostname {
- Some(ref x) => x,
- None => &deploy_data.node.node_settings.hostname,
- };
-
- let copy_exit_status = copy_command
- .arg("--to")
- .arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname))
- .arg(&deploy_data.profile.profile_settings.path)
- .env("NIX_SSHOPTS", ssh_opts_str)
- .status()
- .await
- .map_err(PushProfileError::CopyError)?;
-
- match copy_exit_status.code() {
- Some(0) => (),
- a => return Err(PushProfileError::CopyExitError(a)),
- };
-
- Ok(())
-}
--
cgit v1.2.3