chore: move tasks to job schedule, move main to async, result for tasks, clean-up
This commit is contained in:
parent
1456961851
commit
26b2f77300
224
src/main.rs
224
src/main.rs
@ -1,48 +1,32 @@
|
|||||||
// use std::sync::atomic::{AtomicUsize, Ordering};
|
// use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use tokio_cron_scheduler::{JobScheduler, Job};
|
||||||
|
pub type BxDynResult<T> = std::result::Result<T, Box<dyn std::error::Error>>;
|
||||||
use app_env::{
|
use app_env::{
|
||||||
AppStore,
|
AppStore,
|
||||||
appenv::AppEnv,
|
appenv::AppEnv,
|
||||||
appinfo::AppInfo,
|
appinfo::AppInfo,
|
||||||
appdata::AppData,
|
appdata::AppData,
|
||||||
config::{Config,WebServer}
|
config::{Config}
|
||||||
};
|
};
|
||||||
use clds::clouds::{
|
use clds::clouds::{
|
||||||
monitor_rules::{MonitorRules},
|
monitor_rules::{MonitorRules},
|
||||||
};
|
};
|
||||||
use clds::monitor::run_monitor;
|
use clds::monitor::run_monitor;
|
||||||
|
|
||||||
use app_auth::AuthStore;
|
use app_auth::AuthStore;
|
||||||
use reject_filters::{handle_rejection};
|
use reject_filters::{handle_rejection};
|
||||||
// use zterton::models::{Terton};
|
|
||||||
//use std::fs; //, io};
|
|
||||||
// use std::fs::OpenOptions;
|
|
||||||
// use std::io::Write;
|
|
||||||
//use std::path::Path;
|
|
||||||
// use serde_yaml::Value;
|
|
||||||
use anyhow::{Result};
|
use anyhow::{Result};
|
||||||
// use std::env;
|
|
||||||
//use warp::{http::Response as HttpResponse, Filter, filters::BoxedFilter};
|
|
||||||
use warp::{
|
use warp::{
|
||||||
// http::{StatusCode},
|
// http::{StatusCode},
|
||||||
http::{method::Method, HeaderMap},
|
http::{method::Method, HeaderMap},
|
||||||
Filter,
|
Filter,
|
||||||
};
|
};
|
||||||
// use warp::filters::header::headers_cloned;
|
|
||||||
// use warp::path::FullPath;
|
|
||||||
// use warp::http::{Uri, HeaderMap, HeaderValue};
|
|
||||||
|
|
||||||
//use crate::utils::set_ta_data;
|
|
||||||
use crate::defs::{DataDBs,CollsData,load_cloud_env};
|
use crate::defs::{DataDBs,CollsData,load_cloud_env};
|
||||||
use clds::clouds::defs::{
|
use clds::clouds::defs::{
|
||||||
Cloud,
|
Cloud,
|
||||||
};
|
};
|
||||||
// use clds::app_env::config::Config;
|
|
||||||
use clds::clouds::on_clouds::{make_cloud_cache,run_clouds_check};
|
use clds::clouds::on_clouds::{make_cloud_cache,run_clouds_check};
|
||||||
use reqenv::ReqEnv;
|
use reqenv::ReqEnv;
|
||||||
|
|
||||||
// #[macro_use]
|
|
||||||
// extern crate kloud_entries_macro_derive;
|
|
||||||
|
|
||||||
// static WEBSERVER: AtomicUsize = AtomicUsize::new(0);
|
// static WEBSERVER: AtomicUsize = AtomicUsize::new(0);
|
||||||
const PKG_VERSION: &'static str = env!("CARGO_PKG_VERSION");
|
const PKG_VERSION: &'static str = env!("CARGO_PKG_VERSION");
|
||||||
// const PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
// const PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
||||||
@ -55,9 +39,6 @@ pub mod graphql;
|
|||||||
pub mod filters;
|
pub mod filters;
|
||||||
pub mod handlers;
|
pub mod handlers;
|
||||||
|
|
||||||
// pub const MODEL_PATH: &String = String::from("./auth/auth_model.conf");
|
|
||||||
// pub const POLICY_PATH: &String = String::from("./auth/policy.csv");
|
|
||||||
|
|
||||||
async fn create_auth_store(app_env: &AppEnv,verbose: &str) -> AuthStore {
|
async fn create_auth_store(app_env: &AppEnv,verbose: &str) -> AuthStore {
|
||||||
let config = app_env.get_curr_websrvr_config();
|
let config = app_env.get_curr_websrvr_config();
|
||||||
let model_path = config.st_auth_model_path();
|
let model_path = config.st_auth_model_path();
|
||||||
@ -87,31 +68,10 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
if verbose != "quiet" {
|
if verbose != "quiet" {
|
||||||
println!("ZTerton web services at {}",&zterton_env);
|
println!("ZTerton web services at {}",&zterton_env);
|
||||||
}
|
}
|
||||||
// envmnt::set("ZTERTON", "WEBSERVER");
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
/*
|
|
||||||
let app = Zterton::new(
|
|
||||||
app_env.config.srv_protocol.to_owned(),
|
|
||||||
app_env.config.srv_host.to_owned(),
|
|
||||||
app_env.config.srv_port,
|
|
||||||
);
|
|
||||||
let serverstring = format!("{}:{}",&srv_host,&srv_port);
|
|
||||||
match std::net::TcpStream::connect(&serverstring) {
|
|
||||||
Ok(_serverstream) => {
|
|
||||||
Ok(())
|
|
||||||
Err(anyhow!("Source {}: Connection to '{}' for tsksrvc '{}' failed: {}",&source,&serverstring,&tsk_name,&e))
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
// handle_input(serverstream);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
// WEBSERVER.store(1,Ordering::Relaxed);
|
// WEBSERVER.store(1,Ordering::Relaxed);
|
||||||
|
|
||||||
// TODO pass root file-name frmt from AppEnv Config
|
// TODO pass root file-name frmt from AppEnv Config
|
||||||
// if init at load
|
|
||||||
// set_ta_data(&app_env).await?;
|
|
||||||
println!("Loading webserver: {} ({})",&config.name,&app_env.curr_web);
|
println!("Loading webserver: {} ({})",&config.name,&app_env.curr_web);
|
||||||
|
|
||||||
let (app, socket) = zterton::start_web(&mut app_env).await;
|
let (app, socket) = zterton::start_web(&mut app_env).await;
|
||||||
@ -122,8 +82,6 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
// As static casbin
|
// As static casbin
|
||||||
println!("Load auth store ...");
|
println!("Load auth store ...");
|
||||||
let auth_store = create_auth_store(&app_env,"").await;
|
let auth_store = create_auth_store(&app_env,"").await;
|
||||||
// dbg!(&auth_store.users.read().await);
|
|
||||||
// dbg!(&auth_store.shadows.read().await);
|
|
||||||
|
|
||||||
println!("Load data store ...");
|
println!("Load data store ...");
|
||||||
|
|
||||||
@ -133,8 +91,6 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
auth: auth_store.to_owned(),
|
auth: auth_store.to_owned(),
|
||||||
};
|
};
|
||||||
println!("Load web filters ...");
|
println!("Load web filters ...");
|
||||||
// let store = warp::any().map(move || ta_store.clone());
|
|
||||||
//let routes = warp::any().map(|| "Hello, World!");
|
|
||||||
|
|
||||||
// let us get some static boxes from config values:
|
// let us get some static boxes from config values:
|
||||||
let log_name = app_env.config.st_log_name();
|
let log_name = app_env.config.st_log_name();
|
||||||
@ -160,7 +116,7 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
app_auth_filters::auth(app_store.clone(),auth_store.clone(),cors.clone()); // .with(cors.clone());
|
app_auth_filters::auth(app_store.clone(),auth_store.clone(),cors.clone()); // .with(cors.clone());
|
||||||
|
|
||||||
let gqli_api =
|
let gqli_api =
|
||||||
// // If not graphiQL comment/remove next line Interface GiQL MUST BEFORE graphql post with schema
|
// If not graphiQL comment/remove next line Interface GiQL MUST BEFORE graphql post with schema
|
||||||
// app_api.to_owned()
|
// app_api.to_owned()
|
||||||
graphql::graphiql(gql_path, giql_path, data_dbs.clone()).await;
|
graphql::graphiql(gql_path, giql_path, data_dbs.clone()).await;
|
||||||
if giql_path.len() > 0 {
|
if giql_path.len() > 0 {
|
||||||
@ -179,14 +135,6 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
let kloud_api = filters::CollFilters::new("kloud")
|
let kloud_api = filters::CollFilters::new("kloud")
|
||||||
.filters_config(data_dbs.clone(),cloud.clone(),cors.clone());
|
.filters_config(data_dbs.clone(),cloud.clone(),cors.clone());
|
||||||
|
|
||||||
// let ta_api =
|
|
||||||
// filters::CollFilters::new("ta").filters(&config, data_dbs.clone(),cors.clone());
|
|
||||||
|
|
||||||
// let tp_api = filters::CollFilters::new("tp").filters(&config, data_dbs.clone(),cors.clone());
|
|
||||||
// .or(tracking_point::load_tp_filters().filters(&config, data_dbs.clone()))
|
|
||||||
// .or(topographic_anatomy::filters::ta(&config, data_dbs.clone()))
|
|
||||||
// .or(tracking_point::filters::tp(&config, data_dbs.clone()))
|
|
||||||
|
|
||||||
let file_api = app_file_filters::files(app_store.clone(),auth_store.clone()).with(cors.clone());
|
let file_api = app_file_filters::files(app_store.clone(),auth_store.clone()).with(cors.clone());
|
||||||
// Path for static files, better to be LAST
|
// Path for static files, better to be LAST
|
||||||
let fs_api = warp::fs::dir(html_path).with(warp::compression::gzip());
|
let fs_api = warp::fs::dir(html_path).with(warp::compression::gzip());
|
||||||
@ -194,16 +142,13 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
let app_api = auth_api
|
let app_api = auth_api
|
||||||
.or(gqli_api).or(gql_api)
|
.or(gqli_api).or(gql_api)
|
||||||
.or(kloud_api)
|
.or(kloud_api)
|
||||||
// .or(ta_api)
|
|
||||||
// .or(tp_api)
|
|
||||||
.or(file_api)
|
.or(file_api)
|
||||||
.or(fs_api)
|
.or(fs_api)
|
||||||
.recover(move | error: warp::Rejection| handle_rejection(error, app_store.clone()))
|
.recover(move | error: warp::Rejection| handle_rejection(error, app_store.clone()))
|
||||||
.boxed();
|
.boxed();
|
||||||
// Wrap routes with log to get info
|
// Wrap routes with log to get info
|
||||||
let routes = app_api.with(warp::log(log_name));
|
let routes = app_api.with(warp::log(log_name));
|
||||||
// let routes = app_api.with(cors).with(warp::log(log_name));
|
//let routes = app_api.with(cors).with(warp::log(log_name));
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Starting http server: {}://{}:{}",
|
"Starting http server: {}://{}:{}",
|
||||||
&app.protocol, &app.host, &app.port
|
&app.protocol, &app.host, &app.port
|
||||||
@ -226,7 +171,7 @@ async fn up_web_server(webpos: usize) -> Result<()> {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
pub async fn run_cache_clouds() {
|
pub async fn run_cache_clouds() -> Result<()> {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
let mut arg_cfg_path = String::from("");
|
let mut arg_cfg_path = String::from("");
|
||||||
let mut arg_env_path = String::from("");
|
let mut arg_env_path = String::from("");
|
||||||
@ -243,7 +188,7 @@ pub async fn run_cache_clouds() {
|
|||||||
let mut app_env = AppEnv::default();
|
let mut app_env = AppEnv::default();
|
||||||
let config_content = Config::load_file_content("quiet", &arg_cfg_path);
|
let config_content = Config::load_file_content("quiet", &arg_cfg_path);
|
||||||
if ! config_content.contains("run_mode") {
|
if ! config_content.contains("run_mode") {
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
app_env.config = Config::new(config_content,"quiet");
|
app_env.config = Config::new(config_content,"quiet");
|
||||||
let app_store = AppStore::new(AppData::new(app_env.to_owned()));
|
let app_store = AppStore::new(AppData::new(app_env.to_owned()));
|
||||||
@ -257,11 +202,12 @@ pub async fn run_cache_clouds() {
|
|||||||
Method::GET,
|
Method::GET,
|
||||||
"/config", "config", "kloud"
|
"/config", "config", "kloud"
|
||||||
);
|
);
|
||||||
let _ = make_cloud_cache(&reqenv,&cloud).await;
|
let res = make_cloud_cache(&reqenv,&cloud).await;
|
||||||
println!("Cache service on Clouds: done {} __________ ",chrono::Utc::now().timestamp());
|
println!("Cache service on Clouds: done {} __________ ",chrono::Utc::now().timestamp());
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run_check_clouds() {
|
pub async fn run_check_clouds() -> Result<()> {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
let mut arg_cfg_path = String::from("");
|
let mut arg_cfg_path = String::from("");
|
||||||
let mut arg_env_path = String::from("");
|
let mut arg_env_path = String::from("");
|
||||||
@ -278,7 +224,7 @@ pub async fn run_check_clouds() {
|
|||||||
let mut app_env = AppEnv::default();
|
let mut app_env = AppEnv::default();
|
||||||
let config_content = Config::load_file_content("quiet",&arg_cfg_path);
|
let config_content = Config::load_file_content("quiet",&arg_cfg_path);
|
||||||
if ! config_content.contains("run_mode") {
|
if ! config_content.contains("run_mode") {
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
app_env.config = Config::new(config_content,"quiet");
|
app_env.config = Config::new(config_content,"quiet");
|
||||||
let app_store = AppStore::new(AppData::new(app_env.to_owned()));
|
let app_store = AppStore::new(AppData::new(app_env.to_owned()));
|
||||||
@ -292,10 +238,11 @@ pub async fn run_check_clouds() {
|
|||||||
Method::GET,
|
Method::GET,
|
||||||
"/config", "config", "kloud"
|
"/config", "config", "kloud"
|
||||||
);
|
);
|
||||||
let _ = run_clouds_check(&reqenv,&cloud).await;
|
let res = run_clouds_check(&reqenv,&cloud).await;
|
||||||
println!("Check Cloud service: done {} __________ ",chrono::Utc::now().timestamp());
|
println!("Check Cloud service: done {} __________ ",chrono::Utc::now().timestamp());
|
||||||
|
res
|
||||||
}
|
}
|
||||||
pub async fn run_clouds_monitor() {
|
pub async fn run_clouds_monitor() -> Result<()> {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
let mut arg_cfg_path = String::from("");
|
let mut arg_cfg_path = String::from("");
|
||||||
let mut arg_env_path = String::from("");
|
let mut arg_env_path = String::from("");
|
||||||
@ -312,24 +259,19 @@ pub async fn run_clouds_monitor() {
|
|||||||
let mut app_env = AppEnv::default();
|
let mut app_env = AppEnv::default();
|
||||||
let config_content = Config::load_file_content("quiet",&arg_cfg_path);
|
let config_content = Config::load_file_content("quiet",&arg_cfg_path);
|
||||||
if ! config_content.contains("run_mode") {
|
if ! config_content.contains("run_mode") {
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
app_env.config = Config::new(config_content,"quiet");
|
app_env.config = Config::new(config_content,"quiet");
|
||||||
|
//let monitor_sched_task = app_env.config.get_schedtask("monitor");
|
||||||
let monitor_rules = MonitorRules::load(&app_env.config.monitor_rules_path,&app_env.config.monitor_rules_file,&app_env.config.monitor_rules_format);
|
let monitor_rules = MonitorRules::load(&app_env.config.monitor_rules_path,&app_env.config.monitor_rules_file,&app_env.config.monitor_rules_format);
|
||||||
// monitor_rules.rules[0].context = RuleContext::Service(String::from("kubernetes"));
|
|
||||||
if monitor_rules.rules.len() > 0 {
|
if monitor_rules.rules.len() > 0 {
|
||||||
let _ = run_monitor(monitor_rules,cloud,app_env).await;
|
run_monitor(monitor_rules,cloud,app_env).await?
|
||||||
}
|
}
|
||||||
println!("Monitor Cloud: done {} __________ ",chrono::Utc::now().timestamp());
|
Ok(())
|
||||||
}
|
}
|
||||||
// for standalone server & async use
|
#[tokio::main]
|
||||||
// #[tokio::main]
|
pub async fn main() -> BxDynResult<()> { //std::io::Result<()> {
|
||||||
// pub async fn main() -> Result<()> {
|
|
||||||
|
|
||||||
pub fn main() -> Result<()> {
|
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
// println!("I got {:?} arguments: {:?}.", args.len() - 1, &args[1..]);
|
|
||||||
if args.len() > 1 {
|
if args.len() > 1 {
|
||||||
match args[1].as_str() {
|
match args[1].as_str() {
|
||||||
"-h" | "--help" =>
|
"-h" | "--help" =>
|
||||||
@ -341,6 +283,7 @@ pub fn main() -> Result<()> {
|
|||||||
_ => println!("{}",PKG_NAME),
|
_ => println!("{}",PKG_NAME),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let mut sched = JobScheduler::new();
|
||||||
let mut arg_cfg_path = String::from("");
|
let mut arg_cfg_path = String::from("");
|
||||||
let mut arg_env_path = String::from("");
|
let mut arg_env_path = String::from("");
|
||||||
args.iter().enumerate().for_each(|(idx,arg)| {
|
args.iter().enumerate().for_each(|(idx,arg)| {
|
||||||
@ -355,73 +298,70 @@ pub fn main() -> Result<()> {
|
|||||||
dotenv::from_path(env_path)?;
|
dotenv::from_path(env_path)?;
|
||||||
}
|
}
|
||||||
pretty_env_logger::init();
|
pretty_env_logger::init();
|
||||||
// assert!(output.is_ok());
|
let config_content = Config::load_file_content("quiet", &arg_cfg_path);
|
||||||
let loop_duration: u64;
|
if !config_content.contains("run_mode") {
|
||||||
let run_websrvrs: bool;
|
panic!("Error no run_mode found");
|
||||||
let run_cache: bool;
|
}
|
||||||
let run_check: bool;
|
let config = Config::new(config_content,"quiet");
|
||||||
let run_monitor: bool;
|
if config.run_schedtasks {
|
||||||
let websrvrs: Vec<WebServer>;
|
for it in &config.schedtasks {
|
||||||
{
|
if ! it.on_start {
|
||||||
let config_content = Config::load_file_content("quiet", &arg_cfg_path);
|
continue;
|
||||||
if config_content.contains("run_mode") {
|
|
||||||
let config = Config::new(config_content,"quiet");
|
|
||||||
loop_duration = config.loop_duration;
|
|
||||||
// loop_duration = 10;
|
|
||||||
run_websrvrs = config.run_websrvrs;
|
|
||||||
run_cache = config.run_cache;
|
|
||||||
run_check = config.run_check;
|
|
||||||
run_monitor = config.run_monitor;
|
|
||||||
websrvrs = config.websrvrs;
|
|
||||||
if run_cache {
|
|
||||||
println!("Running 'cloud_cache' every {} seconds in LOOP",&loop_duration);
|
|
||||||
}
|
|
||||||
if run_check {
|
|
||||||
println!("Running 'cloud_check' every {} seconds in LOOP",&loop_duration);
|
|
||||||
}
|
}
|
||||||
} else {
|
match it.name.as_str() {
|
||||||
loop_duration = 0;
|
"monitor" => tokio::spawn(async {run_clouds_monitor().await}),
|
||||||
run_websrvrs = false;
|
"check" => tokio::spawn(async {run_check_clouds().await}),
|
||||||
run_cache = false;
|
"cache" => tokio::spawn(async {run_cache_clouds().await}),
|
||||||
run_check = false;
|
_ => {
|
||||||
run_monitor = false;
|
eprintln!("Error task {} not defined",&it.name);
|
||||||
websrvrs = Vec::new();
|
continue;
|
||||||
|
},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if config.run_websrvrs {
|
||||||
// println!("content: {}",&config_content);
|
for (pos,it) in config.websrvrs.iter().enumerate() {
|
||||||
let rt = tokio::runtime::Runtime::new().unwrap_or_else(|e|
|
println!("{} -> {}",it.name,pos);
|
||||||
panic!("Error create tokio runtime {}",e)
|
tokio::spawn(async move {up_web_server(pos).await});
|
||||||
);
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
||||||
if run_websrvrs {
|
}
|
||||||
websrvrs.iter().enumerate().for_each(|(pos,it)| {
|
|
||||||
rt.block_on(async move {
|
|
||||||
println!("{} -> {}",it.name,pos);
|
|
||||||
tokio::spawn(async move {up_web_server(pos).await});
|
|
||||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
loop {
|
if config.run_schedtasks {
|
||||||
rt.block_on(async move {
|
for it in config.schedtasks {
|
||||||
if run_monitor {
|
if it.schedule.is_empty() {
|
||||||
tokio::spawn(async {run_clouds_monitor().await }); // For async task
|
eprintln!("Task {} no schedule defined",&it.name);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
if run_check {
|
let res = match it.name.as_str() {
|
||||||
tokio::spawn(async {run_check_clouds().await }); // For async task
|
"monitor" =>
|
||||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
sched.add(Job::new(&it.schedule.to_owned(), move |uuid, _l| {
|
||||||
|
println!("Schedule {} {}: {}",&it.name,&it.schedule,uuid);
|
||||||
|
tokio::spawn(async {run_clouds_monitor().await});
|
||||||
|
})?),
|
||||||
|
"check" =>
|
||||||
|
sched.add(Job::new(&it.schedule.to_owned(), move |uuid, _l| {
|
||||||
|
println!("Schedule {} {}: {}",&it.name,&it.schedule,uuid);
|
||||||
|
tokio::spawn(async {run_check_clouds().await});
|
||||||
|
})?),
|
||||||
|
"cache" =>
|
||||||
|
sched.add(Job::new(&it.schedule.to_owned(), move |uuid, _l| {
|
||||||
|
println!("Schedule {} {}: {}",&it.name,&it.schedule,uuid);
|
||||||
|
tokio::spawn(async {run_cache_clouds().await});
|
||||||
|
})?),
|
||||||
|
_ => {
|
||||||
|
eprintln!("Error task {} not defined",&it.name);
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
match res {
|
||||||
|
Ok(_) => { continue; },
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Error scheduling task {}",e);
|
||||||
|
continue;
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// {
|
};
|
||||||
// // For blocking task:
|
let _= sched.start().await;
|
||||||
// let join_handle = tokio::task::spawn_blocking(|| do_my_task());
|
|
||||||
// tokio::spawn(async { do_my_task().await; });
|
|
||||||
// join_handle.await; // TODO: should handle error here
|
|
||||||
// }
|
|
||||||
if run_cache {
|
|
||||||
tokio::spawn(async {run_cache_clouds().await }); // For async task
|
|
||||||
}
|
|
||||||
println!("LOOP: {} __________",chrono::Utc::now().timestamp());
|
|
||||||
tokio::time::sleep(tokio::time::Duration::from_secs(loop_duration)).await;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user