summaryrefslogtreecommitdiff
path: root/init/src/services/units.rs
diff options
context:
space:
mode:
Diffstat (limited to 'init/src/services/units.rs')
-rw-r--r--init/src/services/units.rs293
1 files changed, 293 insertions, 0 deletions
diff --git a/init/src/services/units.rs b/init/src/services/units.rs
new file mode 100644
index 0000000..01c4183
--- /dev/null
+++ b/init/src/services/units.rs
@@ -0,0 +1,293 @@
+// This module defines unit settings parsing logig
+//
+// NOTE: ON UNIT PARSING LOGIC
+// when parsing a unit in /etc/vigil/units/ we shoul ignore
+// ANY broken/bad constructed toml unit configuration file
+// without "crashing" logic.
+//
+// NOTE: ON GENERAL SERVICE LOGGING
+// when vigil starts service, it should send out logs to vigil?
+// or logs in such init systems are just taking the stdout+stderr
+// of service and showing its output? idk 4 now, look into how its supposed to be
+
+use crate::{RUNLEVEL_STATE, log_warning};
+use serde::Deserialize;
+use std::{
+ fs::{read_dir, read_to_string},
+ process::Child,
+ time::Duration,
+};
+
+/*
+ [info]
+ name = "name"
+ description = "description"
+ version = "version"
+
+ [config]
+ exec = "path to exec"
+ runlevel =
+ restart=always|never
+
+*/
+
+#[derive(Deserialize, PartialEq, Clone)]
+enum Restart {
+ #[serde(alias = "always")]
+ Always,
+ #[serde(alias = "never")]
+ Never,
+}
+
+#[derive(Deserialize, PartialEq, Eq)]
+pub enum Runlevel {
+ /// The system is shutting down, runlevel int: 0
+ Shutdown,
+ /// One-user system debug-mode, runlevel int: 1
+ OneUser,
+ /// Multi-user CLI (TTY) with no network, runlevel int: 2
+ MultiNoNetwork,
+ /// Multi-user CLI with network, runlevel int: 3
+ MultiNetwork,
+ /// Multi-user mode with GUI, runlevel int: 5
+ MultiGUINetwork,
+ /// Runlevel is not critical for running the service, runlevel int: 4
+ Undefined,
+ /// System going too reboot, runlevel int: 6
+ Reboot,
+}
+
+#[allow(dead_code)]
+#[derive(Deserialize)]
+pub struct ServiceInfo {
+ /// Service name
+ name: String,
+ /// Service description
+ description: String,
+ /// Service version
+ version: String,
+}
+
+#[allow(dead_code)]
+#[derive(Deserialize)]
+pub struct ServiceConfig {
+ /// Execution command, like ExecStart in sysd
+ exec: String,
+ /// Runlevel, like after=*.target
+ runlevel: Runlevel,
+ /// Restart service: Always|Never
+ restart: Restart,
+}
+
+/// Main Unit-file struct
+#[allow(dead_code)]
+#[derive(Deserialize)]
+pub struct Unit {
+ info: ServiceInfo,
+ config: ServiceConfig,
+}
+
+#[allow(dead_code)]
+impl Restart {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Restart::Always => "always",
+ Restart::Never => "never",
+ }
+ }
+}
+
+/// Function:
+/// 1. starting services declared in `/etc/vigil/units/`
+/// 2. checks services for dropping and if ts true to restarting it
+/// max restart count: 3
+///
+/// Based on the global runlevel variable, should be runned as second thread
+// TODO: More logs && better errors processing && fix clippy warnings
+pub fn services_mainloop() -> Result<(), Box<dyn std::error::Error + Send>> {
+ let unit_list = parse_all_units()?;
+ let mut pids: Vec<(Child, String, Restart, u8)> = vec![];
+
+ loop {
+ match *RUNLEVEL_STATE.lock().unwrap() {
+ Runlevel::Undefined => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::Undefined {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::Reboot => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::Reboot {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::Shutdown => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::Shutdown {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::OneUser => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::OneUser {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::MultiNoNetwork => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::MultiNoNetwork {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::MultiNetwork => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::MultiNetwork {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+
+ Runlevel::MultiGUINetwork => {
+ for unit in &unit_list {
+ if unit.config.runlevel == Runlevel::MultiGUINetwork {
+ let child = std::process::Command::new(unit.config.exec.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ pids.push((
+ child,
+ unit.config.exec.clone(),
+ unit.config.restart.clone(),
+ 0,
+ ));
+ }
+ }
+ }
+ }
+
+ for i in 0..pids.len() {
+ match pids[i].0.try_wait() {
+ Ok(Some(status)) => {
+ if pids[i].2 == Restart::Always && pids[i].3 < 3 {
+ let new_child = std::process::Command::new(pids[i].1.clone())
+ .spawn()
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?;
+ log_warning(&format!(
+ "One of units dropped with Error {};\n Restarting it. Attempt: {}",
+ status,
+ pids[i].3 + 1
+ ));
+
+ pids.push((
+ new_child,
+ pids[i].1.clone(),
+ pids[i].2.clone(),
+ pids[i].3 + 1,
+ ));
+ pids.remove(i);
+ } else {
+ log_warning(&format!(
+ "Unit {} has reached maximum restart attempts or restart policy is not Always, not restarting.",
+ pids[i].1
+ ));
+ continue;
+ }
+ }
+ Ok(None) => continue,
+ Err(e) => log_warning(&format!("Error occurred while checking services: {}", e)),
+ }
+ }
+
+ std::thread::sleep(Duration::from_secs(15));
+ }
+}
+
+fn parse_all_units() -> Result<Vec<Unit>, Box<dyn std::error::Error + Send>> {
+ let mut units: Vec<Unit> = Vec::new();
+
+ for unit in read_dir("/etc/vigil/units")
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?
+ {
+ let unit_path = unit
+ .map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send>)?
+ .path();
+ let unit_str: String = match read_to_string(unit_path) {
+ Ok(content) => content,
+ Err(e) => {
+ log_warning(&format!("Error while reading unit: {}", e));
+ continue;
+ }
+ };
+
+ let deserialized: Result<Unit, _> = toml::from_str(&unit_str);
+ match deserialized {
+ Ok(unit) => {
+ units.push(unit);
+ }
+ Err(e) => {
+ log_warning(&format!("Error while parsing unit: {}", e));
+ continue;
+ }
+ };
+ }
+
+ Ok(units)
+}