use bench::data::random_entities; use env_logger::{Builder, Env}; use log::info; use migration::{Migrator, MigratorTrait}; use rand::prelude::*; use sea_orm::prelude::*; use sea_orm::sea_query::{Func, SimpleExpr}; use sea_orm::ConnectOptions; use sea_orm::{Database, QuerySelect}; use entity::prelude::*; async fn run() -> Result<(), anyhow::Error> { dotenvy::dotenv()?; let db_url = std::env::var("DATABASE_URL")?; info!("starting db"); let opts = ConnectOptions::new(db_url); let db = Database::connect(opts).await?; Migrator::refresh(&db).await?; let db = &db; info!("connected to db"); info!("starting data load"); let pages = random_entities(1000, 1_000_000); info!("finished data load"); info!("starting db insert"); for chunk in pages.chunks(5000) { let _ = Page::insert_many(chunk.to_vec()).exec(db).await?; } info!("finished db insert"); let length_expr: SimpleExpr = Func::char_length(Expr::col(( entity::page::Entity, entity::page::Column::Text, ))) .into(); info!("fetching big row count"); let mut large_row_ids: Vec = entity::page::Entity::find() .filter(length_expr.binary(migration::BinOper::GreaterThan, Expr::val(8 * 1024))) .column(entity::page::Column::Id) .into_tuple() .all(db) .await?; info!("counted {} big rows", large_row_ids.len()); let num_rows = Page::find().count(db).await?; info!("inserted {} rows", num_rows); let mut rng = thread_rng(); large_row_ids.shuffle(&mut rng); info!("starting"); let mut bytes_read = 0; for id in large_row_ids.iter().take(1000) { let row = Page::find_by_id(*id).one(db).await?.unwrap(); bytes_read += row.text.len(); } println!("read {} bytes", bytes_read); info!("done"); Ok(()) } #[tokio::main] async fn main() -> Result<(), anyhow::Error> { init_logger(); run().await?; Ok(()) } fn init_logger() { let env = Env::default().filter_or("BENCH_LOG_LEVEL", "info,sqlx=error"); Builder::from_env(env).format_timestamp_millis().init(); }