Skip to content

Commit

Permalink
fix: add more logging
Browse files Browse the repository at this point in the history
  • Loading branch information
stringhandler committed Aug 1, 2024
1 parent 97c28c9 commit ed8f8e8
Show file tree
Hide file tree
Showing 12 changed files with 517 additions and 38 deletions.
367 changes: 362 additions & 5 deletions src-tauri/Cargo.lock

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ edition = "2021"
tauri-build = { version = "1", features = ["isolation"] }

[dependencies]
tari_common = { git = "https://github.com/tari-project/tari.git", branch = "development" }
tari_shutdown = { git = "https://github.com/tari-project/tari.git", branch = "development" }

anyhow = "1"
Expand Down Expand Up @@ -44,9 +45,12 @@ tokio-util = { version = "0.7.11", features = ["compat"] }
sanitize-filename = "0.5"
async-trait = "0.1.81"
sysinfo = "0.30.13"
log4rs = "1.3.0"


# static bind lzma
lzma-sys = { version = "0.1.0", features = ["static"] }
log = "0.4.22"


[features]
Expand Down
71 changes: 71 additions & 0 deletions src-tauri/log4rs_sample.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# A sample log configuration file for running in release mode. By default, this configuration splits up log messages to
# three destinations:
# * Console: For log messages with level INFO and higher
# * log/base-node/network.log: INFO-level logs related to the comms crate. This file will be quite busy since there
# are lots of P2P debug messages, and so this traffic is segregated from the application log messages
# * log/base-node/base_layer.log: Non-comms related INFO-level messages and higher are logged into this file
# * log/base-node/other.log: Third-party crates' messages will be logged here at an ERROR level
#
# See https://docs.rs/log4rs/0.8.3/log4rs/encode/pattern/index.html for deciphering the log pattern. The log format
# used in this sample configuration prints messages as:
# timestamp [target] LEVEL message
refresh_rate: 30 seconds
appenders:
# An appender named "stdout" that writes to stdout
stdout:
kind: console
encoder:
pattern: "{d(%H:%M)} {h({l}):5} {m}{n}"
filters:
- kind: threshold
level: warn



# An appender named "base_layer" that writes to a file with a custom pattern encoder
default:
kind: rolling_file
path: "{{log_dir}}/universe/universe.log"
policy:
kind: compound
trigger:
kind: size
limit: 10mb
roller:
kind: fixed_window
base: 1
count: 5
pattern: "{{log_dir}}/universe/universe.{}.log"
encoder:
pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {l:5} {m} // {f}:{L}{n}"

# An appender named "other" that writes to a file with a custom pattern encoder
other:
kind: rolling_file
path: "{{log_dir}}/universe/other.log"
policy:
kind: compound
trigger:
kind: size
limit: 10mb
roller:
kind: fixed_window
base: 1
count: 5
pattern: "{{log_dir}}/universe/other.{}.log"
encoder:
pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {l:5} {m}{n} // {f}:{L} "

# Set the default logging level to "info"
root:
level: info
appenders:
- stdout

loggers:
# Route log events common to every application to all appenders
tari::universe:
level: debug
appenders:
- default

3 changes: 0 additions & 3 deletions src-tauri/src/binary_resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,6 @@ impl BinaryResolver {
binary: Binaries,
force_download: bool,
) -> Result<Version, Error> {
let cache_dir = tauri::api::path::cache_dir()
.ok_or(anyhow::anyhow!("Failed to get cache dir"))?
.join("tari-universe");
let adapter = self
.adapters
.get(&binary)
Expand Down
12 changes: 9 additions & 3 deletions src-tauri/src/cpu_miner.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
use std::path::PathBuf;
use log::warn;
use sysinfo::{CpuRefreshKind, RefreshKind, System};
use crate::mm_proxy_manager::MmProxyManager;
use crate::xmrig::http_api::XmrigHttpApiClient;
use crate::xmrig_adapter::{XmrigAdapter, XmrigNodeConnection};
use crate::{CpuMinerConfig, CpuMinerConnection, CpuMinerConnectionStatus, CpuMinerStatus};
use tari_shutdown::{Shutdown, ShutdownSignal};
use tauri::async_runtime::JoinHandle;
use tauri::Manager;
use tokio::select;
use tokio::time::MissedTickBehavior;


const LOG_TARGET: &str = "tari::universe::cpu_miner";
pub enum CpuMinerEvent {
Stdout(String),
Stderr(String),
Expand All @@ -34,16 +39,17 @@ impl CpuMiner {
mut app_shutdown: ShutdownSignal,
cpu_miner_config: &CpuMinerConfig,
local_mm_proxy: &MmProxyManager,
base_path: PathBuf
) -> Result<(), anyhow::Error> {
if self.watcher_task.is_some() {
println!("Tried to start mining twice");
warn!(target: LOG_TARGET, "Tried to start mining twice");
return Ok(());
}
let mut inner_shutdown = self.miner_shutdown.to_signal();

let xmrig_node_connection = match cpu_miner_config.node_connection {
CpuMinerConnection::BuiltInProxy => {
local_mm_proxy.start(app_shutdown.clone()).await?;
local_mm_proxy.start(app_shutdown.clone(),base_path).await?;
local_mm_proxy.wait_ready().await?;
XmrigNodeConnection::LocalMmproxy {
host_name: "127.0.0.1".to_string(),
Expand Down Expand Up @@ -135,7 +141,7 @@ impl CpuMiner {
// Refresh CPUs again.
s.refresh_cpu();

let mut cpu_usage = s.global_cpu_info().cpu_usage();
let cpu_usage = s.global_cpu_info().cpu_usage();

match &self.api_client {
Some(client) => {
Expand Down
46 changes: 39 additions & 7 deletions src-tauri/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,27 @@ use std::thread::sleep;
use std::{panic, process};
use tari_shutdown::Shutdown;
use tokio::sync::RwLock;
use log::{debug, error, info};
use tauri::{api, RunEvent, UpdaterEvent};

#[tauri::command]
async fn start_mining<'r>(
window: tauri::Window,
state: tauri::State<'r, UniverseAppState>,
app: tauri::AppHandle,
) -> Result<(), String> {
let config = state.cpu_miner_config.read().await;
state
.node_manager
.ensure_started(state.shutdown.to_signal())
.ensure_started(state.shutdown.to_signal(), app.path_resolver().app_local_data_dir().unwrap())
.await
.map_err(|e| e.to_string())?;
let mm_proxy_manager = state.mm_proxy_manager.read().await;
state
.cpu_miner
.write()
.await
.start(state.shutdown.to_signal(), &config, &mm_proxy_manager).await
.start(state.shutdown.to_signal(), &config, &mm_proxy_manager, app.path_resolver().app_local_data_dir().unwrap()).await
.map_err(|e| {
dbg!(e.to_string());
e.to_string()
Expand Down Expand Up @@ -124,13 +127,14 @@ struct UniverseAppState {
node_manager: NodeManager,
}

pub const LOG_TARGET : &str = "tari::universe::main";

fn main() {
let default_hook = panic::take_hook();
panic::set_hook(Box::new(move |info| {
default_hook(info);
process::exit(1);
}));

let mut shutdown = Shutdown::new();
let mm_proxy_manager = Arc::new(RwLock::new(MmProxyManager::new()));
let node_manager = NodeManager::new();
Expand All @@ -150,17 +154,45 @@ fn main() {
.build(tauri::generate_context!())
.expect("error while running tauri application");


tari_common::initialize_logging(
&app.path_resolver().app_config_dir().unwrap().join("log4rs_config.yml"),
&app.path_resolver().app_log_dir().unwrap(),
include_str!("../log4rs_sample.yml"),
).expect("Could not set up logging");
info!(
target: LOG_TARGET,
"Starting Tari Universe version: {}",
app.package_info().version
);


println!("Logs stored at {:?}", app.path_resolver().app_log_dir().unwrap());

app.run(move |_app_handle, event| match event {
tauri::RunEvent::Updater(updater_event) => {
dbg!(updater_event);
match updater_event {
UpdaterEvent::Error(e) => {
error!(target: LOG_TARGET, "Updater error: {:?}", e);
},
_ => {
info!(target: LOG_TARGET, "Updater event: {:?}", updater_event);
}
}
},
tauri::RunEvent::ExitRequested { api, .. } => {
tauri::RunEvent::ExitRequested { api: _, .. } => {
// api.prevent_exit();
println!("App shutdown caught");
info!(target: LOG_TARGET, "App shutdown caught");
shutdown.trigger();
// TODO: Find a better way of knowing that all miners have stopped
sleep(std::time::Duration::from_secs(3));
info!(target: LOG_TARGET, "App shutdown complete");
}
RunEvent::MainEventsCleared=> {
// no need to handle
}
_ => {
debug!(target: LOG_TARGET, "Unhandled event: {:?}", event);
}
_ => {}
});
}
3 changes: 2 additions & 1 deletion src-tauri/src/merge_mining_adapter.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::path::PathBuf;
use crate::binary_resolver::{Binaries, BinaryResolver};
use crate::process_adapter::{ProcessAdapter, ProcessInstance, StatusMonitor};
use crate::xmrig_adapter::XmrigInstance;
Expand All @@ -24,7 +25,7 @@ impl ProcessAdapter for MergeMiningProxyAdapter {
type Instance = MergeMiningProxyInstance;
type StatusMonitor = MergeMiningProxyStatusMonitor;

fn spawn_inner(&self) -> Result<(Self::Instance, Self::StatusMonitor), Error> {
fn spawn_inner(&self, log_folder: PathBuf) -> Result<(Self::Instance, Self::StatusMonitor), Error> {
let inner_shutdown = Shutdown::new();
let mut shutdown_signal = inner_shutdown.to_signal();

Expand Down
10 changes: 7 additions & 3 deletions src-tauri/src/minotari_node_adapter.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
use std::path::PathBuf;
use crate::binary_resolver::{Binaries, BinaryResolver};
use crate::process_adapter::{ProcessAdapter, ProcessInstance, StatusMonitor};
use crate::xmrig_adapter::XmrigInstance;
use anyhow::Error;
use async_trait::async_trait;
use dirs_next::{cache_dir, data_dir, data_local_dir};
use log::info;
use tari_shutdown::Shutdown;
use tokio::select;
use tokio::task::JoinHandle;

const LOG_TARGET: &str = "tari::universe::minotari_node_adapter";

pub struct MinotariNodeAdapter {
force_download: bool,
use_tor: bool
Expand All @@ -26,11 +30,11 @@ impl ProcessAdapter for MinotariNodeAdapter {
type Instance = MinotariNodeInstance;
type StatusMonitor = MinotariNodeStatusMonitor;

fn spawn_inner(&self) -> Result<(Self::Instance, Self::StatusMonitor), Error> {
fn spawn_inner(&self, log_path: PathBuf) -> Result<(Self::Instance, Self::StatusMonitor), Error> {
let inner_shutdown = Shutdown::new();
let mut shutdown_signal = inner_shutdown.to_signal();
let shutdown_signal = inner_shutdown.to_signal();

dbg!("STarting node");
info!(target: LOG_TARGET, "Starting minotari node");
let working_dir = data_local_dir().unwrap().join("tari-universe").join("node");
std::fs::create_dir_all(&working_dir)?;

Expand Down
13 changes: 8 additions & 5 deletions src-tauri/src/mm_proxy_manager.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
use std::path::PathBuf;
use crate::merge_mining_adapter::MergeMiningProxyAdapter;
use crate::process_watcher::ProcessWatcher;
use std::sync::Arc;
use log::info;
use tari_shutdown::ShutdownSignal;
use tauri::AppHandle;
use tokio::sync::RwLock;
use tokio::time::sleep;

const LOG_TARGET: &str = "tari::universe::mm_proxy_manager";

pub struct MmProxyManager {
watcher: Arc<RwLock<ProcessWatcher<MergeMiningProxyAdapter>>>,
}
Expand All @@ -19,12 +24,10 @@ impl MmProxyManager {
}
}

pub async fn start(&self, app_shutdown: ShutdownSignal) -> Result<(), anyhow::Error> {
dbg!("Starting merge mining proxy");
// let (mut rx, mut child) = Command::new_sidecar("minotari_merge_mining_proxy")?.spawn()?;
// let sidecar_adapter = SidecarAdapter::<MergeMiningProxyInstance>::new("minotari_merge_mining_proxy".to_string());
pub async fn start(&self, app_shutdown: ShutdownSignal, base_path: PathBuf) -> Result<(), anyhow::Error> {
let mut process_watcher = self.watcher.write().await;
process_watcher.start(app_shutdown).await?;
info!(target: LOG_TARGET, "Starting mmproxy");
process_watcher.start(app_shutdown, base_path).await?;

Ok(())
}
Expand Down
13 changes: 8 additions & 5 deletions src-tauri/src/node_manager.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
use std::path::PathBuf;
use crate::merge_mining_adapter::MergeMiningProxyAdapter;
use crate::minotari_node_adapter::MinotariNodeAdapter;
use crate::process_watcher::ProcessWatcher;
use std::sync::Arc;
use tari_shutdown::ShutdownSignal;
use tokio::sync::RwLock;


const LOG_TARGET: &str = "tari::universe::node_manager";

pub struct NodeManager {
watcher: Arc<RwLock<ProcessWatcher<MinotariNodeAdapter>>>,
}
Expand All @@ -30,17 +34,16 @@ impl NodeManager {
}
}

pub async fn ensure_started(&self, app_shutdown: ShutdownSignal) -> Result<(), anyhow::Error> {
pub async fn ensure_started(&self, app_shutdown: ShutdownSignal, base_path: PathBuf) -> Result<(), anyhow::Error> {
let mut process_watcher = self.watcher.write().await;
process_watcher.start(app_shutdown).await?;
process_watcher.start(app_shutdown, base_path).await?;
process_watcher.wait_ready().await?;
Ok(())
}

pub async fn start(&self, app_shutdown: ShutdownSignal) -> Result<(), anyhow::Error> {
dbg!("Starting node");
pub async fn start(&self, app_shutdown: ShutdownSignal, base_path: PathBuf) -> Result<(), anyhow::Error> {
let mut process_watcher = self.watcher.write().await;
process_watcher.start(app_shutdown).await?;
process_watcher.start(app_shutdown, base_path).await?;

Ok(())
}
Expand Down
7 changes: 4 additions & 3 deletions src-tauri/src/process_adapter.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
use std::path::PathBuf;
use async_trait::async_trait;

pub trait ProcessAdapter {
type Instance: ProcessInstance;
type StatusMonitor: StatusMonitor;
// fn spawn(&self) -> Result<(Receiver<()>, TInstance), anyhow::Error>;
fn spawn_inner(&self) -> Result<(Self::Instance, Self::StatusMonitor), anyhow::Error>;
fn spawn_inner(&self, base_folder: PathBuf) -> Result<(Self::Instance, Self::StatusMonitor), anyhow::Error>;
fn name(&self) -> &str;

fn spawn(&self) -> Result<Self::Instance, anyhow::Error> {
let (instance, _) = self.spawn_inner()?;
fn spawn(&self, base_folder:PathBuf) -> Result<Self::Instance, anyhow::Error> {
let (instance, _) = self.spawn_inner(base_folder)?;
Ok(instance)
}
}
Expand Down
Loading

0 comments on commit ed8f8e8

Please sign in to comment.