blogdor/src/main.rs
2026-01-01 23:57:34 -08:00

77 lines
2.5 KiB
Rust

use std::time::Duration;
use blogdor::{BlogdorTheAggregator, UserRequest};
use tokio::sync::mpsc::{UnboundedReceiver, unbounded_channel};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
const BLOGDOR_SNOOZE: Duration = Duration::from_hours(1);
#[tokio::main(flavor = "multi_thread")]
async fn main() {
init_logs();
let bta = BlogdorTheAggregator::new().await;
let (tx, rx) = unbounded_channel();
bta.spawn_http(tx).await;
run_loop(&bta, rx).await;
bta.close_db().await;
tracing::info!("db closed, exiting");
}
fn init_logs() {
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "blogdor=debug,axum=debug".into()),
)
.with(tracing_subscriber::fmt::layer())
.init();
}
async fn run_loop(bta: &BlogdorTheAggregator, mut user_req_rx: UnboundedReceiver<UserRequest>) {
let mut check_feeds = tokio::time::interval(BLOGDOR_SNOOZE);
let mut check_stale = tokio::time::interval(Duration::from_hours(24));
loop {
tokio::select! {
biased;
user_req = user_req_rx.recv() => {
if let Some(ureq) = user_req {
bta.process_user_request(&ureq).await;
}
}
_ = check_feeds.tick() => {
match bta.check_feeds().await {
Ok(results) => {
for result in results {
if let Some(ref posts) = result.entries {
tracing::debug!(
"got {} new posts from {}",
posts.len(),
result.url
);
bta.post_entries(posts).await;
} else {
tracing::debug!("no new posts from {}", result.url);
}
}
},
// outer check_feeds error
Err(e) => {
tracing::warn!("could not check feeds: {e}");
}
}
}
_ = check_stale.tick() => {
bta.check_stale().await;
}
_ = bta.cancelled() => {
tracing::info!("shutting down the aggregation loop");
break;
}
}
}
}