aboutsummaryrefslogtreecommitdiff
path: root/src/bin
diff options
context:
space:
mode:
authornotgne22021-01-24 18:53:42 -0700
committernotgne22021-01-24 18:53:42 -0700
commitb35fccfd67945d029906c217a6302928e849a3eb (patch)
treebbc503f06540f8ee953cd0a1f0b44c11f3722a68 /src/bin
parentfc77473568cfcb86245c4cf45b59d7b86e049a5c (diff)
parenta33127ad4144282696b061af61c188e75ee49452 (diff)
Merge branch 'master' into notgne2/document-hostname-dot
Diffstat (limited to '')
-rw-r--r--src/bin/activate.rs (renamed from src/activate.rs)203
-rw-r--r--src/bin/deploy.rs (renamed from src/main.rs)228
2 files changed, 269 insertions, 162 deletions
diff --git a/src/activate.rs b/src/bin/activate.rs
index 84d4b12..2f13b44 100644
--- a/src/activate.rs
+++ b/src/bin/activate.rs
@@ -3,6 +3,8 @@
//
// SPDX-License-Identifier: MPL-2.0
+use signal_hook::{consts::signal::SIGHUP, iterator::Signals};
+
use clap::Clap;
use tokio::fs;
@@ -18,27 +20,44 @@ use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use thiserror::Error;
-extern crate pretty_env_logger;
#[macro_use]
extern crate log;
-#[macro_use]
extern crate serde_derive;
-#[macro_use]
-mod utils;
-
-/// Activation portion of the simple Rust Nix deploy tool
+/// Remote activation utility for deploy-rs
#[derive(Clap, Debug)]
#[clap(version = "1.0", author = "Serokell <https://serokell.io/>")]
struct Opts {
- profile_path: String,
- closure: String,
+ /// Print debug logs to output
+ #[clap(short, long)]
+ debug_logs: bool,
+ /// Directory to print logs to
+ #[clap(long)]
+ log_dir: Option<String>,
- /// Temp path for any temporary files that may be needed during activation
+ /// Path for any temporary files that may be needed during activation
#[clap(long)]
temp_path: String,
+ #[clap(subcommand)]
+ subcmd: SubCommand,
+}
+
+#[derive(Clap, Debug)]
+enum SubCommand {
+ Activate(ActivateOpts),
+ Wait(WaitOpts),
+}
+
+/// Activate a profile
+#[derive(Clap, Debug)]
+struct ActivateOpts {
+ /// The closure to activate
+ closure: String,
+ /// The profile path to install into
+ profile_path: String,
+
/// Maximum time to wait for confirmation after activation
#[clap(long)]
confirm_timeout: u16,
@@ -52,6 +71,13 @@ struct Opts {
auto_rollback: bool,
}
+/// Activate a profile
+#[derive(Clap, Debug)]
+struct WaitOpts {
+ /// The closure to wait for
+ closure: String,
+}
+
#[derive(Error, Debug)]
pub enum DeactivateError {
#[error("Failed to execute the rollback command: {0}")]
@@ -195,8 +221,9 @@ pub async fn activation_confirmation(
confirm_timeout: u16,
closure: String,
) -> Result<(), ActivationConfirmationError> {
- let lock_hash = &closure["/nix/store/".len()..];
- let lock_path = format!("{}/deploy-rs-canary-{}", temp_path, lock_hash);
+ let lock_path = deploy::make_lock_path(&temp_path, &closure);
+
+ debug!("Ensuring parent directory exists for canary file");
if let Some(parent) = Path::new(&lock_path).parent() {
fs::create_dir_all(parent)
@@ -204,53 +231,98 @@ pub async fn activation_confirmation(
.map_err(ActivationConfirmationError::CreateConfirmDirError)?;
}
+ debug!("Creating canary file");
+
fs::File::create(&lock_path)
.await
- .map_err(ActivationConfirmationError::CreateConfirmDirError)?;
+ .map_err(ActivationConfirmationError::CreateConfirmFileError)?;
+
+ debug!("Creating notify watcher");
let (deleted, done) = mpsc::channel(1);
+
let mut watcher: RecommendedWatcher =
Watcher::new_immediate(move |res: Result<notify::event::Event, notify::Error>| {
let send_result = match res {
Ok(e) if e.kind == notify::EventKind::Remove(notify::event::RemoveKind::File) => {
- deleted.blocking_send(Ok(()))
+ debug!("Got worthy removal event, sending on channel");
+ deleted.try_send(Ok(()))
+ }
+ Err(e) => {
+ debug!("Got error waiting for removal event, sending on channel");
+ deleted.try_send(Err(e))
}
Ok(_) => Ok(()), // ignore non-removal events
- Err(e) => deleted.blocking_send(Err(e)),
};
+
if let Err(e) = send_result {
- // We can't communicate our error, but panic-ing would
- // be bad; let's write an error and trust that the
- // activate function will realize we aren't sending
- // data.
- eprintln!("Could not send file system event to watcher: {}", e);
+ error!("Could not send file system event to watcher: {}", e);
}
})?;
- watcher.watch(lock_path, RecursiveMode::Recursive)?;
- if let fork::Fork::Child =
- fork::daemon(false, false).map_err(ActivationConfirmationError::ForkError)?
- {
- std::thread::spawn(move || {
- let rt = tokio::runtime::Runtime::new().unwrap();
+ watcher.watch(&lock_path, RecursiveMode::NonRecursive)?;
+
+ if let Err(err) = danger_zone(done, confirm_timeout).await {
+ error!("Error waiting for confirmation event: {}", err);
- rt.block_on(async move {
- if let Err(err) = danger_zone(done, confirm_timeout).await {
- if let Err(err) = deactivate(&profile_path).await {
- good_panic!("Error de-activating due to another error in confirmation thread, oh no...: {}", err);
- }
+ if let Err(err) = deactivate(&profile_path).await {
+ error!(
+ "Error de-activating due to another error waiting for confirmation, oh no...: {}",
+ err
+ );
+ }
+ }
- good_panic!("Error in confirmation thread: {}", err);
+ Ok(())
+}
+
+#[derive(Error, Debug)]
+pub enum WaitError {
+ #[error("Error creating watcher for activation: {0}")]
+ Watcher(#[from] notify::Error),
+ #[error("Error waiting for activation: {0}")]
+ Waiting(#[from] DangerZoneError),
+}
+pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> {
+ let lock_path = deploy::make_lock_path(&temp_path, &closure);
+
+ let (created, done) = mpsc::channel(1);
+
+ let mut watcher: RecommendedWatcher = {
+ // TODO: fix wasteful clone
+ let lock_path = lock_path.clone();
+
+ Watcher::new_immediate(move |res: Result<notify::event::Event, notify::Error>| {
+ let send_result = match res {
+ Ok(e) if e.kind == notify::EventKind::Create(notify::event::CreateKind::File) => {
+ match &e.paths[..] {
+ [x] if x == Path::new(&lock_path) => created.try_send(Ok(())),
+ _ => Ok(()),
}
- });
- })
- .join()
- .unwrap();
+ }
+ Err(e) => created.try_send(Err(e)),
+ Ok(_) => Ok(()), // ignore non-removal events
+ };
+
+ if let Err(e) = send_result {
+ error!("Could not send file system event to watcher: {}", e);
+ }
+ })?
+ };
+
+ watcher.watch(&temp_path, RecursiveMode::NonRecursive)?;
- info!("Confirmation successful!");
+ // Avoid a potential race condition by checking for existence after watcher creation
+ if fs::metadata(&lock_path).await.is_ok() {
+ watcher.unwatch(&temp_path)?;
+ return Ok(());
}
- std::process::exit(0);
+ danger_zone(done, 240).await?;
+
+ info!("Found canary file, done waiting!");
+
+ Ok(())
}
#[derive(Error, Debug)]
@@ -301,6 +373,8 @@ pub async fn activate(
}
};
+ debug!("Running activation script");
+
let activate_status = match Command::new(format!("{}/deploy-rs-activate", profile_path))
.env("PROFILE", &profile_path)
.current_dir(&profile_path)
@@ -331,6 +405,7 @@ pub async fn activate(
if magic_rollback {
info!("Magic rollback is enabled, setting up confirmation hook...");
+
match activation_confirmation(profile_path.clone(), temp_path, confirm_timeout, closure)
.await
{
@@ -347,26 +422,48 @@ pub async fn activate(
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
- if std::env::var("DEPLOY_LOG").is_err() {
- std::env::set_var("DEPLOY_LOG", "info");
- }
-
- pretty_env_logger::init_custom_env("DEPLOY_LOG");
+ // Ensure that this process stays alive after the SSH connection dies
+ let mut signals = Signals::new(&[SIGHUP])?;
+ std::thread::spawn(move || {
+ for _ in signals.forever() {
+ println!("Received NOHUP - ignoring...");
+ }
+ });
let opts: Opts = Opts::parse();
- match activate(
- opts.profile_path,
- opts.closure,
- opts.auto_rollback,
- opts.temp_path,
- opts.confirm_timeout,
- opts.magic_rollback,
- )
- .await
- {
+ deploy::init_logger(
+ opts.debug_logs,
+ opts.log_dir.as_deref(),
+ match opts.subcmd {
+ SubCommand::Activate(_) => deploy::LoggerType::Activate,
+ SubCommand::Wait(_) => deploy::LoggerType::Wait,
+ },
+ )?;
+
+ let r = match opts.subcmd {
+ SubCommand::Activate(activate_opts) => activate(
+ activate_opts.profile_path,
+ activate_opts.closure,
+ activate_opts.auto_rollback,
+ opts.temp_path,
+ activate_opts.confirm_timeout,
+ activate_opts.magic_rollback,
+ )
+ .await
+ .map_err(|x| Box::new(x) as Box<dyn std::error::Error>),
+
+ SubCommand::Wait(wait_opts) => wait(opts.temp_path, wait_opts.closure)
+ .await
+ .map_err(|x| Box::new(x) as Box<dyn std::error::Error>),
+ };
+
+ match r {
Ok(()) => (),
- Err(err) => good_panic!("{}", err),
+ Err(err) => {
+ error!("{}", err);
+ std::process::exit(1)
+ }
}
Ok(())
diff --git a/src/main.rs b/src/bin/deploy.rs
index be7ad40..caf3d4e 100644
--- a/src/main.rs
+++ b/src/bin/deploy.rs
@@ -12,17 +12,12 @@ use tokio::process::Command;
use thiserror::Error;
-extern crate pretty_env_logger;
-
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
-#[macro_use]
-mod utils;
-
/// Simple Rust rewrite of a simple Nix Flake deployment tool
#[derive(Clap, Debug)]
#[clap(version = "1.0", author = "Serokell <https://serokell.io/>")]
@@ -39,6 +34,13 @@ struct Opts {
/// Extra arguments to be passed to nix build
extra_build_args: Vec<String>,
+ /// Print debug logs to output
+ #[clap(short, long)]
+ debug_logs: bool,
+ /// Directory to print logs to (including the background activation process)
+ #[clap(long)]
+ log_dir: Option<String>,
+
/// Keep the build outputs of each built profile
#[clap(short, long)]
keep_result: bool,
@@ -98,9 +100,9 @@ async fn test_flake_support() -> Result<bool, std::io::Error> {
#[derive(Error, Debug)]
enum CheckDeploymentError {
#[error("Failed to execute Nix checking command: {0}")]
- NixCheckError(#[from] std::io::Error),
+ NixCheck(#[from] std::io::Error),
#[error("Nix checking command resulted in a bad exit code: {0:?}")]
- NixCheckExitError(Option<i32>),
+ NixCheckExit(Option<i32>),
}
async fn check_deployment(
@@ -136,7 +138,7 @@ async fn check_deployment(
match check_status.code() {
Some(0) => (),
- a => return Err(CheckDeploymentError::NixCheckExitError(a)),
+ a => return Err(CheckDeploymentError::NixCheckExit(a)),
};
Ok(())
@@ -145,15 +147,15 @@ async fn check_deployment(
#[derive(Error, Debug)]
enum GetDeploymentDataError {
#[error("Failed to execute nix eval command: {0}")]
- NixEvalError(std::io::Error),
+ NixEval(std::io::Error),
#[error("Failed to read output from evaluation: {0}")]
- NixEvalOutError(std::io::Error),
+ NixEvalOut(std::io::Error),
#[error("Evaluation resulted in a bad exit code: {0:?}")]
- NixEvalExitError(Option<i32>),
+ NixEvalExit(Option<i32>),
#[error("Error converting evaluation output to utf8: {0}")]
- DecodeUtf8Error(#[from] std::string::FromUtf8Error),
+ DecodeUtf8(#[from] std::string::FromUtf8Error),
#[error("Error decoding the JSON from evaluation: {0}")]
- DecodeJsonError(#[from] serde_json::error::Error),
+ DecodeJson(#[from] serde_json::error::Error),
}
/// Evaluates the Nix in the given `repo` and return the processed Data from it
@@ -161,7 +163,7 @@ async fn get_deployment_data(
supports_flakes: bool,
repo: &str,
extra_build_args: &[String],
-) -> Result<utils::data::Data, GetDeploymentDataError> {
+) -> Result<deploy::data::Data, GetDeploymentDataError> {
info!("Evaluating flake in {}", repo);
let mut c = match supports_flakes {
@@ -193,16 +195,16 @@ async fn get_deployment_data(
let build_child = build_command
.stdout(Stdio::piped())
.spawn()
- .map_err(GetDeploymentDataError::NixEvalError)?;
+ .map_err(GetDeploymentDataError::NixEval)?;
let build_output = build_child
.wait_with_output()
.await
- .map_err(GetDeploymentDataError::NixEvalOutError)?;
+ .map_err(GetDeploymentDataError::NixEvalOut)?;
match build_output.status.code() {
Some(0) => (),
- a => return Err(GetDeploymentDataError::NixEvalExitError(a)),
+ a => return Err(GetDeploymentDataError::NixEvalExit(a)),
};
let data_json = String::from_utf8(build_output.stdout)?;
@@ -220,14 +222,14 @@ struct PromptPart<'a> {
}
fn print_deployment(
- parts: &[(utils::DeployData, utils::DeployDefs)],
+ parts: &[(deploy::DeployData, deploy::DeployDefs)],
) -> Result<(), toml::ser::Error> {
let mut part_map: HashMap<String, HashMap<String, PromptPart>> = HashMap::new();
for (data, defs) in parts {
part_map
.entry(data.node_name.to_string())
- .or_insert(HashMap::new())
+ .or_insert_with(HashMap::new)
.insert(
data.profile_name.to_string(),
PromptPart {
@@ -242,7 +244,7 @@ fn print_deployment(
let toml = toml::to_string(&part_map)?;
- warn!("The following profiles are going to be deployed:\n{}", toml);
+ info!("The following profiles are going to be deployed:\n{}", toml);
Ok(())
}
@@ -259,7 +261,7 @@ enum PromptDeploymentError {
}
fn prompt_deployment(
- parts: &[(utils::DeployData, utils::DeployDefs)],
+ parts: &[(deploy::DeployData, deploy::DeployDefs)],
) -> Result<(), PromptDeploymentError> {
print_deployment(parts)?;
@@ -309,9 +311,9 @@ fn prompt_deployment(
#[derive(Error, Debug)]
enum RunDeployError {
#[error("Failed to deploy profile: {0}")]
- DeployProfileError(#[from] utils::deploy::DeployProfileError),
+ DeployProfile(#[from] deploy::deploy::DeployProfileError),
#[error("Failed to push profile: {0}")]
- PushProfileError(#[from] utils::push::PushProfileError),
+ PushProfile(#[from] deploy::push::PushProfileError),
#[error("No profile named `{0}` was found")]
ProfileNotFound(String),
#[error("No node named `{0}` was found")]
@@ -319,45 +321,78 @@ enum RunDeployError {
#[error("Profile was provided without a node name")]
ProfileWithoutNode,
#[error("Error processing deployment definitions: {0}")]
- DeployDataDefsError(#[from] utils::DeployDataDefsError),
+ DeployDataDefs(#[from] deploy::DeployDataDefsError),
#[error("Failed to make printable TOML of deployment: {0}")]
TomlFormat(#[from] toml::ser::Error),
#[error("{0}")]
- PromptDeploymentError(#[from] PromptDeploymentError),
+ PromptDeployment(#[from] PromptDeploymentError),
}
+type ToDeploy<'a> = Vec<(
+ (&'a str, &'a deploy::data::Node),
+ (&'a str, &'a deploy::data::Profile),
+)>;
+
async fn run_deploy(
- deploy_flake: utils::DeployFlake<'_>,
- data: utils::data::Data,
+ deploy_flake: deploy::DeployFlake<'_>,
+ data: deploy::data::Data,
supports_flakes: bool,
check_sigs: bool,
interactive: bool,
- cmd_overrides: utils::CmdOverrides,
+ cmd_overrides: deploy::CmdOverrides,
keep_result: bool,
result_path: Option<&str>,
extra_build_args: &[String],
+ debug_logs: bool,
+ log_dir: Option<String>,
) -> Result<(), RunDeployError> {
- let to_deploy: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
- match (&deploy_flake.node, &deploy_flake.profile) {
- (Some(node_name), Some(profile_name)) => {
- let node = match data.nodes.get(node_name) {
- Some(x) => x,
- None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
- };
+ let to_deploy: ToDeploy = match (&deploy_flake.node, &deploy_flake.profile) {
+ (Some(node_name), Some(profile_name)) => {
+ let node = match data.nodes.get(node_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
+ };
+ let profile = match node.node_settings.profiles.get(profile_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
+ };
+
+ vec![((node_name, node), (profile_name, profile))]
+ }
+ (Some(node_name), None) => {
+ let node = match data.nodes.get(node_name) {
+ Some(x) => x,
+ None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
+ };
+
+ let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
+
+ for profile_name in [
+ node.node_settings.profiles_order.iter().collect(),
+ node.node_settings.profiles.keys().collect::<Vec<&String>>(),
+ ]
+ .concat()
+ {
let profile = match node.node_settings.profiles.get(profile_name) {
Some(x) => x,
None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
};
- vec![((node_name, node), (profile_name, profile))]
+ if !profiles_list.iter().any(|(n, _)| n == profile_name) {
+ profiles_list.push((&profile_name, profile));
+ }
}
- (Some(node_name), None) => {
- let node = match data.nodes.get(node_name) {
- Some(x) => x,
- None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
- };
- let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
+ profiles_list
+ .into_iter()
+ .map(|x| ((node_name.as_str(), node), x))
+ .collect()
+ }
+ (None, None) => {
+ let mut l = Vec::new();
+
+ for (node_name, node) in &data.nodes {
+ let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
for profile_name in [
node.node_settings.profiles_order.iter().collect(),
@@ -377,61 +412,31 @@ async fn run_deploy(
}
}
- profiles_list
+ let ll: ToDeploy = profiles_list
.into_iter()
.map(|x| ((node_name.as_str(), node), x))
- .collect()
- }
- (None, None) => {
- let mut l = Vec::new();
-
- for (node_name, node) in &data.nodes {
- let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
-
- for profile_name in [
- node.node_settings.profiles_order.iter().collect(),
- node.node_settings.profiles.keys().collect::<Vec<&String>>(),
- ]
- .concat()
- {
- let profile = match node.node_settings.profiles.get(profile_name) {
- Some(x) => x,
- None => {
- return Err(RunDeployError::ProfileNotFound(
- profile_name.to_owned(),
- ))
- }
- };
-
- if !profiles_list.iter().any(|(n, _)| n == profile_name) {
- profiles_list.push((&profile_name, profile));
- }
- }
-
- let ll: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
- profiles_list
- .into_iter()
- .map(|x| ((node_name.as_str(), node), x))
- .collect();
+ .collect();
- l.extend(ll);
- }
-
- l
+ l.extend(ll);
}
- (None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
- };
- let mut parts: Vec<(utils::DeployData, utils::DeployDefs)> = Vec::new();
+ l
+ }
+ (None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
+ };
+
+ let mut parts: Vec<(deploy::DeployData, deploy::DeployDefs)> = Vec::new();
for ((node_name, node), (profile_name, profile)) in to_deploy {
- let deploy_data = utils::make_deploy_data(
+ let deploy_data = deploy::make_deploy_data(
&data.generic_settings,
node,
node_name,
profile,
profile_name,
&cmd_overrides,
+ debug_logs,
+ log_dir.as_deref(),
);
let deploy_defs = deploy_data.defs()?;
@@ -446,21 +451,21 @@ async fn run_deploy(
}
for (deploy_data, deploy_defs) in &parts {
- utils::push::push_profile(
+ deploy::push::push_profile(deploy::push::PushProfileData {
supports_flakes,
check_sigs,
- deploy_flake.repo,
- &deploy_data,
- &deploy_defs,
+ repo: deploy_flake.repo,
+ deploy_data: &deploy_data,
+ deploy_defs: &deploy_defs,
keep_result,
result_path,
extra_build_args,
- )
+ })
.await?;
}
for (deploy_data, deploy_defs) in &parts {
- utils::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
+ deploy::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
}
Ok(())
@@ -469,33 +474,35 @@ async fn run_deploy(
#[derive(Error, Debug)]
enum RunError {
#[error("Failed to deploy profile: {0}")]
- DeployProfileError(#[from] utils::deploy::DeployProfileError),
+ DeployProfile(#[from] deploy::deploy::DeployProfileError),
#[error("Failed to push profile: {0}")]
- PushProfileError(#[from] utils::push::PushProfileError),
+ PushProfile(#[from] deploy::push::PushProfileError),
#[error("Failed to test for flake support: {0}")]
- FlakeTestError(std::io::Error),
+ FlakeTest(std::io::Error),
#[error("Failed to check deployment: {0}")]
- CheckDeploymentError(#[from] CheckDeploymentError),
+ CheckDeployment(#[from] CheckDeploymentError),
#[error("Failed to evaluate deployment data: {0}")]
- GetDeploymentDataError(#[from] GetDeploymentDataError),
+ GetDeploymentData(#[from] GetDeploymentDataError),
#[error("Error parsing flake: {0}")]
- ParseFlakeError(#[from] utils::ParseFlakeError),
+ ParseFlake(#[from] deploy::ParseFlakeError),
+ #[error("Error initiating logger: {0}")]
+ Logger(#[from] flexi_logger::FlexiLoggerError),
#[error("{0}")]
- RunDeployError(#[from] RunDeployError),
+ RunDeploy(#[from] RunDeployError),
}
async fn run() -> Result<(), RunError> {
- if std::env::var("DEPLOY_LOG").is_err() {
- std::env::set_var("DEPLOY_LOG", "info");
- }
-
- pretty_env_logger::init_custom_env("DEPLOY_LOG");
-
let opts: Opts = Opts::parse();
- let deploy_flake = utils::parse_flake(opts.flake.as_str())?;
+ deploy::init_logger(
+ opts.debug_logs,
+ opts.log_dir.as_deref(),
+ deploy::LoggerType::Deploy,
+ )?;
- let cmd_overrides = utils::CmdOverrides {
+ let deploy_flake = deploy::parse_flake(opts.flake.as_str())?;
+
+ let cmd_overrides = deploy::CmdOverrides {
ssh_user: opts.ssh_user,
profile_user: opts.profile_user,
ssh_opts: opts.ssh_opts,
@@ -507,9 +514,7 @@ async fn run() -> Result<(), RunError> {
confirm_timeout: opts.confirm_timeout,
};
- let supports_flakes = test_flake_support()
- .await
- .map_err(RunError::FlakeTestError)?;
+ let supports_flakes = test_flake_support().await.map_err(RunError::FlakeTest)?;
if !supports_flakes {
warn!("A Nix version without flakes support was detected, support for this is work in progress");
@@ -534,6 +539,8 @@ async fn run() -> Result<(), RunError> {
opts.keep_result,
result_path,
&opts.extra_build_args,
+ opts.debug_logs,
+ opts.log_dir,
)
.await?;
@@ -544,7 +551,10 @@ async fn run() -> Result<(), RunError> {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
match run().await {
Ok(()) => (),
- Err(err) => good_panic!("{}", err),
+ Err(err) => {
+ error!("{}", err);
+ std::process::exit(1);
+ }
}
Ok(())