mirror of
https://github.com/flibusta-apps/library_updater.git
synced 2025-12-06 15:45:36 +01:00
Add axum server
This commit is contained in:
525
src/main.rs
525
src/main.rs
@@ -4,522 +4,55 @@ extern crate lazy_static;
|
||||
pub mod config;
|
||||
pub mod types;
|
||||
pub mod utils;
|
||||
pub mod updater;
|
||||
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
sync::{Arc, Mutex}, str::FromStr
|
||||
};
|
||||
use std::net::SocketAddr;
|
||||
use axum::{Router, routing::post, http::HeaderMap};
|
||||
|
||||
use config::Webhook;
|
||||
use deadpool_postgres::{Config, CreatePoolError, ManagerConfig, Pool, RecyclingMethod, Runtime};
|
||||
use futures::{io::copy, TryStreamExt};
|
||||
use reqwest::header::{HeaderMap, HeaderValue, HeaderName};
|
||||
use tokio::fs::{File, remove_file};
|
||||
use tokio_cron_scheduler::{JobScheduler, Job, JobSchedulerError};
|
||||
use tokio_postgres::NoTls;
|
||||
use crate::updater::cron_jobs;
|
||||
|
||||
use async_compression::futures::bufread::GzipDecoder;
|
||||
async fn update(headers: HeaderMap) -> &'static str {
|
||||
let config_api_key = config::CONFIG.api_key.clone();
|
||||
|
||||
use sql_parse::{
|
||||
parse_statement, InsertReplace, InsertReplaceType, ParseOptions, SQLArguments, SQLDialect,
|
||||
Statement,
|
||||
};
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
use types::{
|
||||
Author, AuthorAnnotation, AuthorAnnotationPic, BookAnnotation, BookAnnotationPic, BookAuthor,
|
||||
BookGenre, FromVecExpression, Genre, Sequence, SequenceInfo, Translator, Update,
|
||||
};
|
||||
use utils::read_lines;
|
||||
|
||||
use crate::types::Book;
|
||||
|
||||
async fn download_file(filename_str: &str) -> Result<(), Box<dyn std::error::Error + Send>> {
|
||||
log::info!("Download {filename_str}...");
|
||||
|
||||
let link = format!("{}/sql/{filename_str}.gz", &config::CONFIG.fl_base_url);
|
||||
|
||||
let response = match reqwest::get(link).await {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
let api_key = match headers.get("Authorization") {
|
||||
Some(v) => v,
|
||||
None => return "No api-key!",
|
||||
};
|
||||
|
||||
let response = match response.error_for_status() {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
match remove_file(filename_str).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => log::debug!("Can't remove file: {:?}", err),
|
||||
};
|
||||
|
||||
let mut file = match File::create(filename_str).await {
|
||||
Ok(v) => v.compat(),
|
||||
Err(err) => {
|
||||
log::error!("Can't create {filename_str}: {:?}", err);
|
||||
return Err(Box::new(err))
|
||||
},
|
||||
};
|
||||
|
||||
let data = response
|
||||
.bytes_stream()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
|
||||
.into_async_read();
|
||||
|
||||
let decoder = GzipDecoder::new(data);
|
||||
|
||||
match copy(decoder, &mut file).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
log::error!("Can't write data {filename_str}: {}", err);
|
||||
return Err(Box::new(err))
|
||||
},
|
||||
};
|
||||
|
||||
log::info!("{filename_str} downloaded!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process<T>(
|
||||
pool: Pool,
|
||||
source_id: i16,
|
||||
file_name: &str,
|
||||
deps: Vec<Arc<Mutex<Option<UpdateStatus>>>>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send>>
|
||||
where
|
||||
T: Debug + FromVecExpression<T> + Update,
|
||||
{
|
||||
if deps.len() != 0 {
|
||||
loop {
|
||||
let mut some_failed = false;
|
||||
let mut some_none = false;
|
||||
|
||||
for dep in deps.iter() {
|
||||
let status = dep.lock().unwrap();
|
||||
match &*status {
|
||||
Some(status) => match status {
|
||||
UpdateStatus::Success => (),
|
||||
UpdateStatus::Fail => some_failed = true,
|
||||
},
|
||||
None => some_none = true,
|
||||
}
|
||||
}
|
||||
|
||||
if !some_failed && !some_none {
|
||||
break;
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
if config_api_key != api_key.to_str().unwrap() {
|
||||
return "Wrong api-key!"
|
||||
}
|
||||
|
||||
match download_file(file_name).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
let parse_options = ParseOptions::new()
|
||||
.dialect(SQLDialect::MariaDB)
|
||||
.arguments(SQLArguments::QuestionMark)
|
||||
.warn_unquoted_identifiers(true);
|
||||
|
||||
let lines = read_lines(file_name);
|
||||
|
||||
let lines = match lines {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
match T::before_update(&pool.get().await.unwrap()).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
log::info!("Start update {file_name}...");
|
||||
|
||||
for line in lines.into_iter() {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
let mut issues = Vec::new();
|
||||
let ast = parse_statement(&line, &mut issues, &parse_options);
|
||||
|
||||
match ast {
|
||||
Some(Statement::InsertReplace(
|
||||
i @ InsertReplace {
|
||||
type_: InsertReplaceType::Insert(_),
|
||||
..
|
||||
},
|
||||
)) => {
|
||||
for value in i.values.into_iter() {
|
||||
for t_value in value.1.into_iter() {
|
||||
let value = T::from_vec_expression(&t_value);
|
||||
let client = pool.get().await.unwrap();
|
||||
|
||||
match value.update(&client, source_id).await {
|
||||
Ok(_) => {
|
||||
// log::info!("{:?}", value);
|
||||
()
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Update error: {:?} : {:?}", value, err);
|
||||
return Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
tokio::spawn(async {
|
||||
match updater::update().await {
|
||||
Ok(_) => log::info!("Updated!"),
|
||||
Err(err) => log::info!("Updater err: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
||||
log::info!("Updated {file_name}...");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_postgres_pool() -> Result<Pool, CreatePoolError> {
|
||||
let mut config = Config::new();
|
||||
|
||||
config.host = Some(config::CONFIG.postgres_host.clone());
|
||||
config.port = Some(config::CONFIG.postgres_port);
|
||||
config.dbname = Some(config::CONFIG.postgres_db_name.clone());
|
||||
config.user = Some(config::CONFIG.postgres_user.clone());
|
||||
config.password = Some(config::CONFIG.postgres_password.clone());
|
||||
config.connect_timeout = Some(std::time::Duration::from_secs(5));
|
||||
config.manager = Some(ManagerConfig {
|
||||
recycling_method: RecyclingMethod::Verified,
|
||||
});
|
||||
|
||||
match config.create_pool(Some(Runtime::Tokio1), NoTls) {
|
||||
Ok(pool) => Ok(pool),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
"Update started"
|
||||
}
|
||||
|
||||
async fn get_source(pool: Pool) -> Result<i16, Box<dyn std::error::Error>> {
|
||||
let client = pool.get().await.unwrap();
|
||||
async fn start_app() {
|
||||
let app = Router::new()
|
||||
.route("/update", post(update));
|
||||
|
||||
let row = match client
|
||||
.query_one("SELECT id FROM sources WHERE name = 'flibusta';", &[])
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], 8080));
|
||||
|
||||
axum::Server::bind(&addr)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
let id = row.get(0);
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
enum UpdateStatus {
|
||||
Success,
|
||||
Fail,
|
||||
}
|
||||
|
||||
async fn send_webhooks() -> Result<(), Box<reqwest::Error>> {
|
||||
for webhook in config::CONFIG.webhooks.clone().into_iter() {
|
||||
let Webhook { method, url, headers } = webhook;
|
||||
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let builder = match method {
|
||||
config::Method::Get => {
|
||||
client.get(url)
|
||||
},
|
||||
config::Method::Post => {
|
||||
client.post(url)
|
||||
},
|
||||
};
|
||||
|
||||
let t_headers: Vec<(HeaderName, HeaderValue)> = headers.into_iter().map(|(key, val)| {
|
||||
let value = match val {
|
||||
serde_json::Value::String(v) => v,
|
||||
_ => panic!("Header value not string!")
|
||||
};
|
||||
|
||||
(
|
||||
HeaderName::from_str(key.as_ref()).unwrap(),
|
||||
HeaderValue::from_str(&value).unwrap()
|
||||
)
|
||||
}).collect();
|
||||
|
||||
let headers = HeaderMap::from_iter(t_headers.into_iter());
|
||||
|
||||
let response = builder.headers(headers).send().await;
|
||||
|
||||
let response = match response {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
match response.error_for_status() {
|
||||
Ok(_) => (),
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update() -> Result<(), Box<dyn std::error::Error>> {
|
||||
log::info!("Start update...");
|
||||
|
||||
let pool = match get_postgres_pool().await {
|
||||
Ok(pool) => pool,
|
||||
Err(err) => panic!("{:?}", err),
|
||||
};
|
||||
|
||||
let source_id = match get_source(pool.clone()).await {
|
||||
Ok(v) => Arc::new(v),
|
||||
Err(err) => panic!("{:?}", err),
|
||||
};
|
||||
|
||||
let author_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
let book_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
let sequence_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
let book_annotation_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
let author_annotation_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
let genre_status: Arc<Mutex<Option<UpdateStatus>>> = Arc::new(Mutex::new(None));
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let author_status_clone = author_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let author_process = tokio::spawn(async move {
|
||||
match process::<Author>(pool_clone, *source_id_clone, "lib.libavtorname.sql", vec![]).await
|
||||
{
|
||||
Ok(_) => {
|
||||
let mut status = author_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = author_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let book_status_clone = book_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let book_process = tokio::spawn(async move {
|
||||
match process::<Book>(pool_clone, *source_id_clone, "lib.libbook.sql", vec![]).await {
|
||||
Ok(_) => {
|
||||
let mut status = book_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = book_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Fail);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![author_status.clone(), book_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let book_author_process = tokio::spawn(async move {
|
||||
process::<BookAuthor>(pool_clone, *source_id_clone, "lib.libavtor.sql", deps).await
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![author_status.clone(), book_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let translator_process = tokio::spawn(async move {
|
||||
process::<Translator>(pool_clone, *source_id_clone, "lib.libtranslator.sql", deps).await
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let sequence_status_clone = sequence_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let sequence_process = tokio::spawn(async move {
|
||||
match process::<Sequence>(pool_clone, *source_id_clone, "lib.libseqname.sql", vec![]).await
|
||||
{
|
||||
Ok(_) => {
|
||||
let mut status = sequence_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = sequence_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Fail);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![book_status.clone(), sequence_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let sequence_info_process = tokio::spawn(async move {
|
||||
process::<SequenceInfo>(pool_clone, *source_id_clone, "lib.libseq.sql", deps).await
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![book_status.clone()];
|
||||
let book_annotation_status_clone = book_annotation_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let book_annotation_process = tokio::spawn(async move {
|
||||
match process::<BookAnnotation>(pool_clone, *source_id_clone, "lib.b.annotations.sql", deps)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
let mut status = book_annotation_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = book_annotation_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Fail);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![book_annotation_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let book_annotation_pics_process = tokio::spawn(async move {
|
||||
process::<BookAnnotationPic>(
|
||||
pool_clone,
|
||||
*source_id_clone,
|
||||
"lib.b.annotations_pics.sql",
|
||||
deps,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![author_status.clone()];
|
||||
let author_annotation_status_clone = author_annotation_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let author_annotation_process = tokio::spawn(async move {
|
||||
match process::<AuthorAnnotation>(
|
||||
pool_clone,
|
||||
*source_id_clone,
|
||||
"lib.a.annotations.sql",
|
||||
deps,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
let mut status = author_annotation_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = author_annotation_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Fail);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![author_annotation_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let author_annotation_pics_process = tokio::spawn(async move {
|
||||
process::<AuthorAnnotationPic>(
|
||||
pool_clone,
|
||||
*source_id_clone,
|
||||
"lib.a.annotations_pics.sql",
|
||||
deps,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let genre_status_clone = genre_status.clone();
|
||||
let source_id_clone = source_id.clone();
|
||||
let genre_annotation_process = tokio::spawn(async move {
|
||||
match process::<Genre>(pool_clone, *source_id_clone, "lib.libgenrelist.sql", vec![]).await {
|
||||
Ok(_) => {
|
||||
let mut status = genre_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Success);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let mut status = genre_status_clone.lock().unwrap();
|
||||
*status = Some(UpdateStatus::Fail);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let deps = vec![genre_status.clone(), book_status.clone()];
|
||||
let source_id_clone = source_id.clone();
|
||||
let book_genre_process = tokio::spawn(async move {
|
||||
process::<BookGenre>(pool_clone, *source_id_clone, "lib.libgenre.sql", deps).await
|
||||
});
|
||||
|
||||
for process in [
|
||||
author_process,
|
||||
book_process,
|
||||
book_author_process,
|
||||
translator_process,
|
||||
sequence_process,
|
||||
sequence_info_process,
|
||||
book_annotation_process,
|
||||
book_annotation_pics_process,
|
||||
author_annotation_process,
|
||||
author_annotation_pics_process,
|
||||
genre_annotation_process,
|
||||
book_genre_process
|
||||
] {
|
||||
let process_result = match process.await {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Box::new(err)),
|
||||
};
|
||||
|
||||
match process_result {
|
||||
Ok(_) => (),
|
||||
Err(err) => panic!("{:?}", err),
|
||||
}
|
||||
}
|
||||
|
||||
match send_webhooks().await {
|
||||
Ok(_) => {
|
||||
log::info!("Webhooks sended!");
|
||||
},
|
||||
Err(err) => {
|
||||
log::info!("Webhooks send failed : {err}");
|
||||
return Err(Box::new(err))
|
||||
},
|
||||
};
|
||||
|
||||
Ok(())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), JobSchedulerError> {
|
||||
async fn main() {
|
||||
let _guard = sentry::init(config::CONFIG.sentry_dsn.clone());
|
||||
env_logger::init();
|
||||
|
||||
let job_scheduler = JobScheduler::new().await.unwrap();
|
||||
tokio::spawn(async {
|
||||
cron_jobs().await
|
||||
});
|
||||
|
||||
let update_job = match Job::new_async("* 0 5 * * *", |_uuid, _l| Box::pin(async {
|
||||
match update().await {
|
||||
Ok(_) => log::info!("Updated"),
|
||||
Err(err) => log::info!("Update err: {:?}", err),
|
||||
};
|
||||
})) {
|
||||
Ok(v) => v,
|
||||
Err(err) => panic!("{:?}", err),
|
||||
};
|
||||
|
||||
job_scheduler.add(update_job).await.unwrap();
|
||||
|
||||
match job_scheduler.start().await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
start_app().await;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user